diff --git a/docs/cce/umn/.placeholder b/docs/cce/umn/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/cce/umn/ALL_META.TXT.json b/docs/cce/umn/ALL_META.TXT.json index 45ca625e..bebc082e 100644 --- a/docs/cce/umn/ALL_META.TXT.json +++ b/docs/cce/umn/ALL_META.TXT.json @@ -20,7 +20,7 @@ "githuburl":"" }, { - "uri":"cce_01_0236.html", + "uri":"cce_bulletin_0000.html", "product_code":"cce", "code":"3", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -43,7 +43,7 @@ "uri":"cce_bulletin_0003.html", "product_code":"cce", "code":"5", - "des":"This section describes the Kubernetes version support mechanism of CCE.Version number: The format is x.y.z-r{n}, where x.y is the major version and z is the minor version", + "des":"This section explains versioning in CCE, and the policies for Kubernetes version support.Version number: The format is x.y.z, where x.y is the major version and z is the ", "doc_type":"usermanual2", "kw":"Kubernetes Version Support Mechanism,Product Bulletin,User Guide", "title":"Kubernetes Version Support Mechanism", @@ -63,7 +63,7 @@ "uri":"cce_bulletin_0301.html", "product_code":"cce", "code":"7", - "des":"CCE nodes in Hybrid clusters can run on EulerOS 2.2, EulerOS 2.5, EulerOS 2.9 and CentOS 7.7. The following table lists the supported patches for these OSs.The OS patches", + "des":"CCE nodes in Hybrid clusters can run on EulerOS 2.5, EulerOS 2.9 and CentOS 7.7. The following table lists the supported patches for these OSs.The OS patches and verifica", "doc_type":"usermanual2", "kw":"OS Patch Notes for Cluster Nodes,Product Bulletin,User Guide", "title":"OS Patch Notes for Cluster Nodes", @@ -80,9 +80,19 @@ "githuburl":"" }, { - "uri":"CVE-2021-4034.html", + "uri":"cce_bulletin_0011.html", "product_code":"cce", "code":"9", + "des":"High-risk vulnerabilities:CCE fixes vulnerabilities as soon as possible after the Kubernetes community detects them and releases fixing solutions. The fixing policies are", + "doc_type":"usermanual2", + "kw":"Vulnerability Fixing Policies,Security Vulnerability Responses,User Guide", + "title":"Vulnerability Fixing Policies", + "githuburl":"" + }, + { + "uri":"CVE-2021-4034.html", + "product_code":"cce", + "code":"10", "des":"Recently, a security research team disclosed a privilege escalation vulnerability (CVE-2021-4034, also dubbed PwnKit) in PolKit's pkexec. Unprivileged users can gain full", "doc_type":"usermanual2", "kw":"Linux Polkit Privilege Escalation Vulnerability (CVE-2021-4034),Security Vulnerability Responses,Use", @@ -92,17 +102,27 @@ { "uri":"cce_bulletin_0206.html", "product_code":"cce", - "code":"10", + "code":"11", "des":"The Linux Kernel SACK vulnerabilities have been fixed. This section describes the solution to these vulnerabilities.On June 18, 2019, Red Hat released a security notice, ", "doc_type":"usermanual2", "kw":"Notice on Fixing Linux Kernel SACK Vulnerabilities,Security Vulnerability Responses,User Guide", "title":"Notice on Fixing Linux Kernel SACK Vulnerabilities", "githuburl":"" }, + { + "uri":"cce_10_0477.html", + "product_code":"cce", + "code":"12", + "des":"In clusters earlier than v1.21, a token is obtained by mounting the secret of the service account to a pod. Tokens obtained this way are permanent. This approach is no lo", + "doc_type":"usermanual2", + "kw":"Service Account Token Security Improvement,Product Bulletin,User Guide", + "title":"Service Account Token Security Improvement", + "githuburl":"" + }, { "uri":"cce_01_9994.html", "product_code":"cce", - "code":"11", + "code":"13", "des":"CCE works closely with multiple cloud services to support computing, storage, networking, and monitoring functions. When you log in to the CCE console for the first time,", "doc_type":"usermanual2", "kw":"Obtaining Resource Permissions,User Guide", @@ -110,9 +130,9 @@ "githuburl":"" }, { - "uri":"cce_01_0027.html", + "uri":"cce_10_0091.html", "product_code":"cce", - "code":"12", + "code":"14", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Clusters", @@ -120,29 +140,109 @@ "githuburl":"" }, { - "uri":"cce_01_0002.html", + "uri":"cce_10_0002.html", "product_code":"cce", - "code":"13", - "des":"Kubernetes is a containerized application software system that can be easily deployed and managed. It facilitates container scheduling and orchestration.For application d", + "code":"15", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"Cluster Overview,Clusters,User Guide", + "kw":"Cluster Overview", "title":"Cluster Overview", "githuburl":"" }, { - "uri":"cce_01_0342.html", + "uri":"cce_10_0430.html", "product_code":"cce", - "code":"14", + "code":"16", + "des":"Kubernetes allows you to easily deploy and manage containerized application and facilitates container scheduling and orchestration.For developers, Kubernetes is a cluster", + "doc_type":"usermanual2", + "kw":"Basic Cluster Information,Cluster Overview,User Guide", + "title":"Basic Cluster Information", + "githuburl":"" + }, + { + "uri":"cce_10_0342.html", + "product_code":"cce", + "code":"17", "des":"The following table lists the differences between CCE Turbo clusters and CCE clusters:The QingTian architecture consists of data plane (software-hardware synergy) and man", "doc_type":"usermanual2", - "kw":"CCE Turbo Clusters and CCE Clusters,Clusters,User Guide", + "kw":"CCE Turbo Clusters and CCE Clusters,Cluster Overview,User Guide", "title":"CCE Turbo Clusters and CCE Clusters", "githuburl":"" }, { - "uri":"cce_01_0298.html", + "uri":"cce_10_0349.html", "product_code":"cce", - "code":"15", + "code":"18", + "des":"kube-proxy is a key component of a Kubernetes cluster. It is responsible for load balancing and forwarding between a Service and its backend pod.CCE supports two forwardi", + "doc_type":"usermanual2", + "kw":"Comparing iptables and IPVS,Cluster Overview,User Guide", + "title":"Comparing iptables and IPVS", + "githuburl":"" + }, + { + "uri":"cce_10_0068.html", + "product_code":"cce", + "code":"19", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Release Notes", + "title":"Release Notes", + "githuburl":"" + }, + { + "uri":"cce_10_0467.html", + "product_code":"cce", + "code":"20", + "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.25.Kubernetes 1", + "doc_type":"usermanual2", + "kw":"CCE Kubernetes 1.25 Release Notes,Release Notes,User Guide", + "title":"CCE Kubernetes 1.25 Release Notes", + "githuburl":"" + }, + { + "uri":"cce_10_0468.html", + "product_code":"cce", + "code":"21", + "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.23.Changes in C", + "doc_type":"usermanual2", + "kw":"CCE Kubernetes 1.23 Release Notes,Release Notes,User Guide", + "title":"CCE Kubernetes 1.23 Release Notes", + "githuburl":"" + }, + { + "uri":"cce_10_0469.html", + "product_code":"cce", + "code":"22", + "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.21.Kubernetes 1", + "doc_type":"usermanual2", + "kw":"CCE Kubernetes 1.21 Release Notes,Release Notes,User Guide", + "title":"CCE Kubernetes 1.21 Release Notes", + "githuburl":"" + }, + { + "uri":"cce_10_0470.html", + "product_code":"cce", + "code":"23", + "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.19.Kubernetes 1", + "doc_type":"usermanual2", + "kw":"CCE Kubernetes 1.19 Release Notes,Release Notes,User Guide", + "title":"CCE Kubernetes 1.19 Release Notes", + "githuburl":"" + }, + { + "uri":"cce_10_0471.html", + "product_code":"cce", + "code":"24", + "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.17.All resource", + "doc_type":"usermanual2", + "kw":"CCE Kubernetes 1.17 Release Notes,Release Notes,User Guide", + "title":"CCE Kubernetes 1.17 Release Notes", + "githuburl":"" + }, + { + "uri":"cce_10_0298.html", + "product_code":"cce", + "code":"25", "des":"CCE Turbo clusters run on a cloud native infrastructure that features software-hardware synergy to support passthrough networking, high security and reliability, and inte", "doc_type":"usermanual2", "kw":"Creating a CCE Turbo Cluster,Clusters,User Guide", @@ -150,9 +250,9 @@ "githuburl":"" }, { - "uri":"cce_01_0028.html", + "uri":"cce_10_0028.html", "product_code":"cce", - "code":"16", + "code":"26", "des":"On the CCE console, you can easily create Kubernetes clusters. Kubernetes can manage container clusters at scale. A cluster manages a group of node resources.In CCE, you ", "doc_type":"usermanual2", "kw":"Creating a CCE Cluster,Clusters,User Guide", @@ -160,9 +260,9 @@ "githuburl":"" }, { - "uri":"cce_01_0140.html", + "uri":"cce_10_0140.html", "product_code":"cce", - "code":"17", + "code":"27", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Using kubectl to Run a Cluster", @@ -170,19 +270,29 @@ "githuburl":"" }, { - "uri":"cce_01_0107.html", + "uri":"cce_10_0107.html", "product_code":"cce", - "code":"18", + "code":"28", "des":"This section uses a CCE cluster as an example to describe how to connect to a CCE cluster using kubectl.When you access a cluster using kubectl, CCE uses thekubeconfig.js", "doc_type":"usermanual2", - "kw":"Public network access,Connecting to a Cluster Using kubectl,Using kubectl to Run a Cluster,User Guid", + "kw":"Connecting to a Cluster Using kubectl,Using kubectl to Run a Cluster,User Guide", "title":"Connecting to a Cluster Using kubectl", "githuburl":"" }, { - "uri":"cce_01_0139.html", + "uri":"cce_10_0367.html", "product_code":"cce", - "code":"19", + "code":"29", + "des":"A Subject Alternative Name (SAN) can be signed in to a cluster server certificate. A SAN is usually used by the client to verify the server validity in TLS handshakes. Sp", + "doc_type":"usermanual2", + "kw":"Customizing a Cluster Certificate SAN,Using kubectl to Run a Cluster,User Guide", + "title":"Customizing a Cluster Certificate SAN", + "githuburl":"" + }, + { + "uri":"cce_10_0139.html", + "product_code":"cce", + "code":"30", "des":"getThe get command displays one or many resources of a cluster.This command prints a table of the most important information about all resources, including cluster nodes,", "doc_type":"usermanual2", "kw":"Common kubectl Commands,Using kubectl to Run a Cluster,User Guide", @@ -190,29 +300,9 @@ "githuburl":"" }, { - "uri":"cce_01_0023.html", + "uri":"cce_10_0215.html", "product_code":"cce", - "code":"20", - "des":"Before running kubectl commands, you should have the kubectl development skills and understand the kubectl operations. For details, see Kubernetes API and kubectl CLI.Go ", - "doc_type":"usermanual2", - "kw":"kubectl,Affinity,Anti-affinity,Workload Access Mode,Advanced Workload Settings,Configuration Center,", - "title":"kubectl Usage Guide", - "githuburl":"" - }, - { - "uri":"cce_01_0157.html", - "product_code":"cce", - "code":"21", - "des":"The Cluster Auto Scaling feature allows CCE to automatically scale out a cluster (adding worker nodes to a cluster) according to custom policies when workloads cannot be ", - "doc_type":"usermanual2", - "kw":"scale out,Cooldown Period,Metric-based policy,Scheduled policy,Periodic policy,Setting Cluster Auto ", - "title":"Setting Cluster Auto Scaling", - "githuburl":"" - }, - { - "uri":"cce_01_0215.html", - "product_code":"cce", - "code":"22", + "code":"31", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Upgrading a Cluster", @@ -220,49 +310,49 @@ "githuburl":"" }, { - "uri":"cce_01_0197.html", + "uri":"cce_10_0197.html", "product_code":"cce", - "code":"23", + "code":"32", "des":"To enable interoperability from one Kubernetes installation to the next, you must upgrade your Kubernetes clusters before the maintenance period ends.After the latest Kub", "doc_type":"usermanual2", - "kw":"Overview,Upgrading a Cluster,User Guide", - "title":"Overview", + "kw":"Upgrade Overview,Upgrading a Cluster,User Guide", + "title":"Upgrade Overview", "githuburl":"" }, { - "uri":"cce_01_0302.html", + "uri":"cce_10_0302.html", "product_code":"cce", - "code":"24", - "des":"Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Overview.Upgraded clusters ca", + "code":"33", + "des":"Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Upgrade Overview.Upgraded clu", "doc_type":"usermanual2", "kw":"Before You Start,Upgrading a Cluster,User Guide", "title":"Before You Start", "githuburl":"" }, { - "uri":"cce_01_0120.html", + "uri":"cce_10_0120.html", "product_code":"cce", - "code":"25", + "code":"34", "des":"You can upgrade your clusters to a newer Kubernetes version on the CCE console.Before the upgrade, learn about the target version to which each CCE cluster can be upgrade", "doc_type":"usermanual2", - "kw":"Performing Replace/Rolling Upgrade (v1.13 and Earlier),Upgrading a Cluster,User Guide", - "title":"Performing Replace/Rolling Upgrade (v1.13 and Earlier)", + "kw":"Performing Replace/Rolling Upgrade,Upgrading a Cluster,User Guide", + "title":"Performing Replace/Rolling Upgrade", "githuburl":"" }, { - "uri":"cce_01_0301.html", + "uri":"cce_10_0301.html", "product_code":"cce", - "code":"26", - "des":"On the CCE console, You can perform an in-place cluster upgrade to use new cluster features.Before the upgrade, learn about the target version to which each CCE cluster c", + "code":"35", + "des":"You can upgrade your clusters to a newer version on the CCE console.Before the upgrade, learn about the target version to which each CCE cluster can be upgraded in what w", "doc_type":"usermanual2", - "kw":"Performing In-place Upgrade (v1.15 and Later),Upgrading a Cluster,User Guide", - "title":"Performing In-place Upgrade (v1.15 and Later)", + "kw":"Performing In-place Upgrade,Upgrading a Cluster,User Guide", + "title":"Performing In-place Upgrade", "githuburl":"" }, { - "uri":"cce_01_0210.html", + "uri":"cce_10_0210.html", "product_code":"cce", - "code":"27", + "code":"36", "des":"This section describes how to migrate services from a cluster of an earlier version to a cluster of a later version in CCE.This operation is applicable when a cross-versi", "doc_type":"usermanual2", "kw":"Migrating Services Across Clusters of Different Versions,Upgrading a Cluster,User Guide", @@ -270,19 +360,9 @@ "githuburl":"" }, { - "uri":"cce_01_0068.html", + "uri":"cce_10_0031.html", "product_code":"cce", - "code":"28", - "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. To enable interoperability from one Kubernetes installation to the nex", - "doc_type":"usermanual2", - "kw":"CCE Kubernetes Release Notes,Upgrading a Cluster,User Guide", - "title":"CCE Kubernetes Release Notes", - "githuburl":"" - }, - { - "uri":"cce_01_0031.html", - "product_code":"cce", - "code":"29", + "code":"37", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Managing a Cluster", @@ -290,9 +370,19 @@ "githuburl":"" }, { - "uri":"cce_01_0212.html", + "uri":"cce_10_0213.html", "product_code":"cce", - "code":"30", + "code":"38", + "des":"CCE allows you to manage cluster parameters, through which you can let core components work under your very requirements.This function is supported only in clusters of v1", + "doc_type":"usermanual2", + "kw":"Managing Cluster Components,Managing a Cluster,User Guide", + "title":"Managing Cluster Components", + "githuburl":"" + }, + { + "uri":"cce_10_0212.html", + "product_code":"cce", + "code":"39", "des":"This section describes how to delete a cluster.Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workl", "doc_type":"usermanual2", "kw":"Deleting a Cluster,Managing a Cluster,User Guide", @@ -300,69 +390,49 @@ "githuburl":"" }, { - "uri":"cce_01_0214.html", + "uri":"cce_10_0214.html", "product_code":"cce", - "code":"31", - "des":"If you do not need to use a cluster temporarily, you are advised to hibernate the cluster to save cluster management costs.After a cluster is hibernated, resources such a", + "code":"40", + "des":"If you do not need to use a cluster temporarily, you are advised to hibernate the cluster.After a cluster is hibernated, resources such as workloads cannot be created or ", "doc_type":"usermanual2", "kw":"Hibernating and Waking Up a Cluster,Managing a Cluster,User Guide", "title":"Hibernating and Waking Up a Cluster", "githuburl":"" }, { - "uri":"cce_01_0213.html", + "uri":"cce_10_0602.html", "product_code":"cce", - "code":"32", - "des":"CCE clusters allow you to manage Kubernetes parameters, through which you can let core components work under your very requirements.This function is supported only in clu", + "code":"41", + "des":"If overload control is enabled, concurrent requests are dynamically controlled based on the resource pressure of master nodes to keep them and the cluster available.The c", "doc_type":"usermanual2", - "kw":"Configuring Kubernetes Parameters,Managing a Cluster,User Guide", - "title":"Configuring Kubernetes Parameters", + "kw":"Cluster Overload Control,Managing a Cluster,User Guide", + "title":"Cluster Overload Control", "githuburl":"" }, { - "uri":"cce_01_0175.html", + "uri":"cce_10_0175.html", "product_code":"cce", - "code":"33", - "des":"Before accessing cluster resources through open-source Kubernetes APIs, obtain the cluster's certificate.The downloaded certificate contains three files: client.key, clie", + "code":"42", + "des":"This section describes how to obtain the cluster certificate from the console and use it to access Kubernetes clusters.The downloaded certificate contains three files: cl", "doc_type":"usermanual2", "kw":"Obtaining a Cluster Certificate,Clusters,User Guide", "title":"Obtaining a Cluster Certificate", "githuburl":"" }, { - "uri":"cce_01_0085.html", + "uri":"cce_10_0403.html", "product_code":"cce", - "code":"34", - "des":"This section describes how to control permissions on resources in a cluster, for example, allow user A to read and write application data in a namespace, and user B to on", + "code":"43", + "des":"CCE allows you to change the number of nodes managed in a cluster.This function is supported for clusters of v1.15 and later versions.Starting from v1.15.11, the number o", "doc_type":"usermanual2", - "kw":"Controlling Cluster Permissions,Clusters,User Guide", - "title":"Controlling Cluster Permissions", + "kw":"Changing Cluster Scale,Clusters,User Guide", + "title":"Changing Cluster Scale", "githuburl":"" }, { - "uri":"cce_01_0347.html", + "uri":"cce_10_0183.html", "product_code":"cce", - "code":"35", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Cluster Parameters", - "title":"Cluster Parameters", - "githuburl":"" - }, - { - "uri":"cce_01_0348.html", - "product_code":"cce", - "code":"36", - "des":"The maximum number of pods that can be created on a node is determined by the following parameters:Number of container IP addresses that can be allocated on a node (alpha", - "doc_type":"usermanual2", - "kw":"Maximum Number of Pods That Can Be Created on a Node,Cluster Parameters,User Guide", - "title":"Maximum Number of Pods That Can Be Created on a Node", - "githuburl":"" - }, - { - "uri":"cce_01_0183.html", - "product_code":"cce", - "code":"37", + "code":"44", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"node labels", @@ -370,89 +440,109 @@ "githuburl":"" }, { - "uri":"cce_01_0180.html", + "uri":"cce_10_0180.html", "product_code":"cce", - "code":"38", - "des":"A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (P", + "code":"45", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"Overview,Nodes,User Guide", - "title":"Overview", + "kw":"Node Overview", + "title":"Node Overview", "githuburl":"" }, { - "uri":"cce_01_0033.html", + "uri":"cce_10_0461.html", "product_code":"cce", - "code":"39", - "des":"A node is a virtual or physical machine that provides computing resources. Sufficient nodes must be available in your project to ensure that operations, such as creating ", + "code":"46", + "des":"A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (P", + "doc_type":"usermanual2", + "kw":"Precautions for Using a Node,Node Overview,User Guide", + "title":"Precautions for Using a Node", + "githuburl":"" + }, + { + "uri":"cce_10_0462.html", + "product_code":"cce", + "code":"47", + "des":"Container engines, one of the most important components of Kubernetes, manage the lifecycle of images and containers. The kubelet interacts with a container runtime throu", + "doc_type":"usermanual2", + "kw":"Container Engine,Node Overview,User Guide", + "title":"Container Engine", + "githuburl":"" + }, + { + "uri":"cce_10_0463.html", + "product_code":"cce", + "code":"48", + "des":"The most significant difference is that each Kata container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualiz", + "doc_type":"usermanual2", + "kw":"Kata Containers and Common Containers,Node Overview,User Guide", + "title":"Kata Containers and Common Containers", + "githuburl":"" + }, + { + "uri":"cce_10_0348.html", + "product_code":"cce", + "code":"49", + "des":"The maximum number of pods that can be created on a node is determined by the following parameters:Number of container IP addresses that can be allocated on a node (alpha", + "doc_type":"usermanual2", + "kw":"Maximum Number of Pods That Can Be Created on a Node,Node Overview,User Guide", + "title":"Maximum Number of Pods That Can Be Created on a Node", + "githuburl":"" + }, + { + "uri":"cce_10_0178.html", + "product_code":"cce", + "code":"50", + "des":"Some of the resources on the node need to run some necessary Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total num", + "doc_type":"usermanual2", + "kw":"node,Kubernetes,Formula for Calculating the Reserved Resources of a Node,Node Overview,User Guide", + "title":"Formula for Calculating the Reserved Resources of a Node", + "githuburl":"" + }, + { + "uri":"cce_10_0341.html", + "product_code":"cce", + "code":"51", + "des":"This section describes how to allocate data disk space.When creating a node, you need to configure a data disk whose capacity is greater than or equal to 100GB for the no", + "doc_type":"usermanual2", + "kw":"Data Disk Space Allocation,Node Overview,User Guide", + "title":"Data Disk Space Allocation", + "githuburl":"" + }, + { + "uri":"cce_10_0363.html", + "product_code":"cce", + "code":"52", + "des":"At least one cluster has been created.A key pair has been created for identity authentication upon remote node login.The node has 2-core or higher CPU, 4 GB or larger mem", "doc_type":"usermanual2", "kw":"Creating a Node,Nodes,User Guide", "title":"Creating a Node", "githuburl":"" }, { - "uri":"cce_01_0363.html", + "uri":"cce_10_0198.html", "product_code":"cce", - "code":"40", - "des":"At least one CCE Turbo cluster is available. For details on how to create a cluster, see Creating a CCE Turbo Cluster.A key pair has been created for identity authenticat", + "code":"53", + "des":"In CCE, you can Creating a Node or add existing nodes (ECSs) into your cluster.While an ECS is being accepted into a cluster, the operating system of the ECS will be rese", "doc_type":"usermanual2", - "kw":"Creating a Node in a CCE Turbo Cluster,Nodes,User Guide", - "title":"Creating a Node in a CCE Turbo Cluster", + "kw":"Adding Nodes for Management,Nodes,User Guide", + "title":"Adding Nodes for Management", "githuburl":"" }, { - "uri":"cce_01_0338.html", + "uri":"cce_10_0338.html", "product_code":"cce", - "code":"41", - "des":"Removing a node from a cluster in CCE will re-install the node OS and clear CCE components on the node.Removing a node will not delete the server (ECS) corresponding to t", + "code":"54", + "des":"Removing a node from a cluster will re-install the node OS and clear CCE components on the node.Removing a node will not delete the server corresponding to the node. You ", "doc_type":"usermanual2", "kw":"Removing a Node,Nodes,User Guide", "title":"Removing a Node", "githuburl":"" }, { - "uri":"cce_01_0185.html", + "uri":"cce_10_0003.html", "product_code":"cce", - "code":"42", - "des":"If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).Only login to a running ECS is allowed.Only the user linux can l", - "doc_type":"usermanual2", - "kw":"Logging In to a Node,Nodes,User Guide", - "title":"Logging In to a Node", - "githuburl":"" - }, - { - "uri":"cce_01_0004.html", - "product_code":"cce", - "code":"43", - "des":"You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.", - "doc_type":"usermanual2", - "kw":"node labels,Inherent Label of a Node,Deleting a Node Label,Managing Node Labels,Nodes,User Guide", - "title":"Managing Node Labels", - "githuburl":"" - }, - { - "uri":"cce_01_0184.html", - "product_code":"cce", - "code":"44", - "des":"Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required.Some inf", - "doc_type":"usermanual2", - "kw":"Synchronizing Node Data,Nodes,User Guide", - "title":"Synchronizing Node Data", - "githuburl":"" - }, - { - "uri":"cce_01_0352.html", - "product_code":"cce", - "code":"45", - "des":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.A taint is a key-value pair associated with an effect. The following ef", - "doc_type":"usermanual2", - "kw":"Configuring Node Scheduling (Tainting),Nodes,User Guide", - "title":"Configuring Node Scheduling (Tainting)", - "githuburl":"" - }, - { - "uri":"cce_01_0003.html", - "product_code":"cce", - "code":"46", + "code":"55", "des":"You can reset a node to modify the node configuration, such as the node OS and login mode.Resetting a node will reinstall the node OS and the Kubernetes software on the n", "doc_type":"usermanual2", "kw":"Resetting a Node,Nodes,User Guide", @@ -460,9 +550,49 @@ "githuburl":"" }, { - "uri":"cce_01_0186.html", + "uri":"cce_10_0185.html", "product_code":"cce", - "code":"47", + "code":"56", + "des":"If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).Only login to a running ECS is allowed.Only the user linux can l", + "doc_type":"usermanual2", + "kw":"Logging In to a Node,Nodes,User Guide", + "title":"Logging In to a Node", + "githuburl":"" + }, + { + "uri":"cce_10_0004.html", + "product_code":"cce", + "code":"57", + "des":"You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.", + "doc_type":"usermanual2", + "kw":"node labels,Inherent Label of a Node,Managing Node Labels,Nodes,User Guide", + "title":"Managing Node Labels", + "githuburl":"" + }, + { + "uri":"cce_10_0352.html", + "product_code":"cce", + "code":"58", + "des":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.A taint is a key-value pair associated with an effect. The following ef", + "doc_type":"usermanual2", + "kw":"Managing Node Taints,Nodes,User Guide", + "title":"Managing Node Taints", + "githuburl":"" + }, + { + "uri":"cce_10_0184.html", + "product_code":"cce", + "code":"59", + "des":"Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required.Some inf", + "doc_type":"usermanual2", + "kw":"Synchronizing Data with Cloud Servers,Nodes,User Guide", + "title":"Synchronizing Data with Cloud Servers", + "githuburl":"" + }, + { + "uri":"cce_10_0186.html", + "product_code":"cce", + "code":"60", "des":"When a node in a CCE cluster is deleted, services running on the node will also be deleted. Exercise caution when performing this operation.After a CCE cluster is deleted", "doc_type":"usermanual2", "kw":"Deleting a Node,Nodes,User Guide", @@ -470,9 +600,9 @@ "githuburl":"" }, { - "uri":"cce_01_0036.html", + "uri":"cce_10_0036.html", "product_code":"cce", - "code":"48", + "code":"61", "des":"After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not resu", "doc_type":"usermanual2", "kw":"Stopping a Node,Nodes,User Guide", @@ -480,9 +610,9 @@ "githuburl":"" }, { - "uri":"cce_01_0276.html", + "uri":"cce_10_0276.html", "product_code":"cce", - "code":"49", + "code":"62", "des":"In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.The o", "doc_type":"usermanual2", "kw":"Performing Rolling Upgrade for Nodes,Nodes,User Guide", @@ -490,49 +620,9 @@ "githuburl":"" }, { - "uri":"cce_01_0178.html", + "uri":"cce_10_0035.html", "product_code":"cce", - "code":"50", - "des":"Some of the resources on the node need to run some necessary Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total num", - "doc_type":"usermanual2", - "kw":"node,Kubernetes,Formula for Calculating the Reserved Resources of a Node,Nodes,User Guide", - "title":"Formula for Calculating the Reserved Resources of a Node", - "githuburl":"" - }, - { - "uri":"cce_01_0200.html", - "product_code":"cce", - "code":"51", - "des":"This section describes how to check whether there are available raw disks and Linux LVM disk partitions and how to create Linux LVM disk partitions.To improve the system ", - "doc_type":"usermanual2", - "kw":"direct-lvm,raw disk,Creating a Linux LVM Disk Partition for Docker,Nodes,User Guide", - "title":"Creating a Linux LVM Disk Partition for Docker", - "githuburl":"" - }, - { - "uri":"cce_01_0341.html", - "product_code":"cce", - "code":"52", - "des":"When creating a node, you need to configure data disks for the node.The data disk is divided into Kubernetes space and user space. The user space defines the space that i", - "doc_type":"usermanual2", - "kw":"Data Disk Space Allocation,Nodes,User Guide", - "title":"Data Disk Space Allocation", - "githuburl":"" - }, - { - "uri":"cce_01_0344.html", - "product_code":"cce", - "code":"53", - "des":"You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).When creating a node in a cluster of v1.13.10 or later, if a data disk is not manage", - "doc_type":"usermanual2", - "kw":"Adding a Second Data Disk to a Node in a CCE Cluster,Nodes,User Guide", - "title":"Adding a Second Data Disk to a Node in a CCE Cluster", - "githuburl":"" - }, - { - "uri":"cce_01_0035.html", - "product_code":"cce", - "code":"54", + "code":"63", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Node Pools", @@ -540,9 +630,9 @@ "githuburl":"" }, { - "uri":"cce_01_0081.html", + "uri":"cce_10_0081.html", "product_code":"cce", - "code":"55", + "code":"64", "des":"CCE introduces node pools to help you better manage nodes in Kubernetes clusters. A node pool contains one node or a group of nodes with identical configuration in a clus", "doc_type":"usermanual2", "kw":"Deploying a Workload in a Specified Node Pool,Node Pool Overview,Node Pools,User Guide", @@ -550,19 +640,19 @@ "githuburl":"" }, { - "uri":"cce_01_0012.html", + "uri":"cce_10_0012.html", "product_code":"cce", - "code":"56", - "des":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.For details abou", + "code":"65", + "des":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.The autoscaler a", "doc_type":"usermanual2", "kw":"Creating a Node Pool,Node Pools,User Guide", "title":"Creating a Node Pool", "githuburl":"" }, { - "uri":"cce_01_0222.html", + "uri":"cce_10_0222.html", "product_code":"cce", - "code":"57", + "code":"66", "des":"The default node pool DefaultPool does not support the following management operations.CCE allows you to highly customize Kubernetes parameter settings on core components", "doc_type":"usermanual2", "kw":"Managing a Node Pool,Node Pools,User Guide", @@ -570,19 +660,19 @@ "githuburl":"" }, { - "uri":"cce_01_0046.html", + "uri":"cce_10_0046.html", "product_code":"cce", - "code":"58", + "code":"67", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"scaling policies", + "kw":"Workloads", "title":"Workloads", "githuburl":"" }, { - "uri":"cce_01_0006.html", + "uri":"cce_10_0006.html", "product_code":"cce", - "code":"59", + "code":"68", "des":"A workload is an application running on Kubernetes. No matter how many components are there in your workload, you can run it in a group of Kubernetes pods. A workload is ", "doc_type":"usermanual2", "kw":"Overview,Workloads,User Guide", @@ -590,89 +680,69 @@ "githuburl":"" }, { - "uri":"cce_01_0047.html", + "uri":"cce_10_0047.html", "product_code":"cce", - "code":"60", + "code":"69", "des":"Deployments are workloads (for example, Nginx) that do not store any data or status. You can create Deployments on the CCE console or by running kubectl commands.Before c", "doc_type":"usermanual2", - "kw":"Create YAML,create a workload using kubectl,Creating a Deployment,Workloads,User Guide", + "kw":"create a workload using kubectl,Creating a Deployment,Workloads,User Guide", "title":"Creating a Deployment", "githuburl":"" }, { - "uri":"cce_01_0048.html", + "uri":"cce_10_0048.html", "product_code":"cce", - "code":"61", + "code":"70", "des":"StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.A conta", "doc_type":"usermanual2", - "kw":"Create YAML,Using kubectl,Creating a StatefulSet,Workloads,User Guide", + "kw":"Using kubectl,Creating a StatefulSet,Workloads,User Guide", "title":"Creating a StatefulSet", "githuburl":"" }, { - "uri":"cce_01_0216.html", + "uri":"cce_10_0216.html", "product_code":"cce", - "code":"62", + "code":"71", "des":"CCE provides deployment and management capabilities for multiple types of containers and supports features of container workloads, including creation, configuration, moni", "doc_type":"usermanual2", - "kw":"Creating a DaemonSet,Workloads,User Guide", + "kw":"create a workload using kubectl,Creating a DaemonSet,Workloads,User Guide", "title":"Creating a DaemonSet", "githuburl":"" }, { - "uri":"cce_01_0150.html", + "uri":"cce_10_0150.html", "product_code":"cce", - "code":"63", + "code":"72", "des":"Jobs are short-lived and run for a certain time to completion. They can be executed immediately after being deployed. It is completed after it exits normally (exit 0).A j", "doc_type":"usermanual2", - "kw":"private container image,My Images,authenticated,Shared Images,Creating a Job,Workloads,User Guide", + "kw":"Creating a Job,Workloads,User Guide", "title":"Creating a Job", "githuburl":"" }, { - "uri":"cce_01_0151.html", + "uri":"cce_10_0151.html", "product_code":"cce", - "code":"64", + "code":"73", "des":"A cron job runs on a repeating schedule. You can perform time synchronization for all active nodes at a fixed time point.A cron job runs periodically at the specified tim", "doc_type":"usermanual2", - "kw":"time synchronization,private container image,Concurrency Policy,Forbid,Allow,Replace,Schedule,My Ima", + "kw":"time synchronization,Creating a Cron Job,Workloads,User Guide", "title":"Creating a Cron Job", "githuburl":"" }, { - "uri":"cce_01_0013.html", + "uri":"cce_10_0007.html", "product_code":"cce", - "code":"65", - "des":"A pod is the smallest and simplest unit in the Kubernetes object model that you create or deploy. A pod encapsulates an application's container (or, in some cases, multip", + "code":"74", + "des":"After a workload is created, you can upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.Workload/Job managementOperationDescriptionMonitor", "doc_type":"usermanual2", - "kw":"Deleting a Pod,Managing Pods,Workloads,User Guide", - "title":"Managing Pods", - "githuburl":"" - }, - { - "uri":"cce_01_0007.html", - "product_code":"cce", - "code":"66", - "des":"After a workload is created, you can scale, upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.Workload/Job managementOperationDescription", - "doc_type":"usermanual2", - "kw":"Add Label,Managing Workloads and Jobs,Workloads,User Guide", + "kw":"Managing Workloads and Jobs,Workloads,User Guide", "title":"Managing Workloads and Jobs", "githuburl":"" }, { - "uri":"cce_01_0057.html", + "uri":"cce_10_0130.html", "product_code":"cce", - "code":"67", - "des":"After scaling policies are defined, pods can be automatically added or deleted based on resource changes, fixed time, and fixed periods. You do not need to manually adjus", - "doc_type":"usermanual2", - "kw":"scaling policies,Metric-based policy,Scheduled policy,Periodic policy,Scaling a Workload,Workloads,U", - "title":"Scaling a Workload", - "githuburl":"" - }, - { - "uri":"cce_01_0130.html", - "product_code":"cce", - "code":"68", + "code":"75", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Configuring a Container", @@ -680,9 +750,19 @@ "githuburl":"" }, { - "uri":"cce_01_0009.html", + "uri":"cce_10_0396.html", "product_code":"cce", - "code":"69", + "code":"76", + "des":"A workload is an abstract model of a group of pods. One pod can encapsulate one or more containers. You can click Add Container in the upper right corner to add multiple ", + "doc_type":"usermanual2", + "kw":"Setting Basic Container Information,Configuring a Container,User Guide", + "title":"Setting Basic Container Information", + "githuburl":"" + }, + { + "uri":"cce_10_0009.html", + "product_code":"cce", + "code":"77", "des":"CCE allows you to create workloads using images pulled from third-party image repositories.Generally, a third-party image repository can be accessed only after authentica", "doc_type":"usermanual2", "kw":"Using a Third-Party Image,Configuring a Container,User Guide", @@ -690,189 +770,159 @@ "githuburl":"" }, { - "uri":"cce_01_0163.html", + "uri":"cce_10_0163.html", "product_code":"cce", - "code":"70", - "des":"CCE allows you to set resource limits for added containers during workload creation. You can request and limit the CPU and memory quotas used by each pod in the workload.", + "code":"78", + "des":"CCE allows you to set resource limits for added containers during workload creation. You can apply for and limit the CPU and memory quotas used by each pod in a workload.", "doc_type":"usermanual2", "kw":"Setting Container Specifications,Configuring a Container,User Guide", "title":"Setting Container Specifications", "githuburl":"" }, { - "uri":"cce_01_0105.html", + "uri":"cce_10_0105.html", "product_code":"cce", - "code":"71", + "code":"79", "des":"CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before sto", "doc_type":"usermanual2", - "kw":"Start Command,Post-Start,Pre-Stop,CLI,CLI,Setting Container Lifecycle Parameters,Configuring a Conta", + "kw":"Startup Command,Post-Start,Pre-Stop,Setting Container Lifecycle Parameters,Configuring a Container,U", "title":"Setting Container Lifecycle Parameters", "githuburl":"" }, { - "uri":"cce_01_0008.html", + "uri":"cce_10_0112.html", "product_code":"cce", - "code":"72", - "des":"When creating a workload or job, you can use an image to specify the processes running in the container.By default, the image runs the default command. To run a specific ", + "code":"80", + "des":"Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect application ex", "doc_type":"usermanual2", - "kw":"Commands and parameters used to run a container,Setting the Startup Command,Setting Container Startu", - "title":"Setting Container Startup Commands", - "githuburl":"" - }, - { - "uri":"cce_01_0112.html", - "product_code":"cce", - "code":"73", - "des":"Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect service except", - "doc_type":"usermanual2", - "kw":"Health check,Health Check Methods,HTTP request,TCP port,CLI,Setting Health Check for a Container,Con", + "kw":"Health check,HTTP request,TCP port,CLI,Setting Health Check for a Container,Configuring a Container,", "title":"Setting Health Check for a Container", "githuburl":"" }, { - "uri":"cce_01_0113.html", + "uri":"cce_10_0113.html", "product_code":"cce", - "code":"74", + "code":"81", "des":"An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deploy", "doc_type":"usermanual2", - "kw":"Manually add environment variables,import environment variables from a secret,import environment var", + "kw":"Setting an Environment Variable,Configuring a Container,User Guide", "title":"Setting an Environment Variable", "githuburl":"" }, { - "uri":"cce_01_0149.html", - "product_code":"cce", - "code":"75", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Affinity and Anti-Affinity Scheduling", - "title":"Affinity and Anti-Affinity Scheduling", - "githuburl":"" - }, - { - "uri":"cce_01_0051.html", - "product_code":"cce", - "code":"76", - "des":"CCE supports custom and simple scheduling policies. A custom scheduling policy allows you to customize node affinity, workload affinity, and workload anti-affinity to mee", - "doc_type":"usermanual2", - "kw":"Simple Scheduling Policies,Workload-AZ affinity,Workload-node affinity,Workload-workload affinity,Sc", - "title":"Scheduling Policy Overview", - "githuburl":"" - }, - { - "uri":"cce_01_0231.html", - "product_code":"cce", - "code":"77", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Custom Scheduling Policies", - "title":"Custom Scheduling Policies", - "githuburl":"" - }, - { - "uri":"cce_01_0232.html", - "product_code":"cce", - "code":"78", - "des":"This section uses Nginx as an example to describe how to configure node affinity.PrerequisitesA workload that uses the nginx container image has been deployed on a node.P", - "doc_type":"usermanual2", - "kw":"Node Affinity,Custom Scheduling Policies,User Guide", - "title":"Node Affinity", - "githuburl":"" - }, - { - "uri":"cce_01_0233.html", - "product_code":"cce", - "code":"79", - "des":"Workload affinity determines the pods as which the target workload will be deployed in the same topology domain.There are two types of pod affinity rules: Required (hard ", - "doc_type":"usermanual2", - "kw":"Workload Affinity,Custom Scheduling Policies,User Guide", - "title":"Workload Affinity", - "githuburl":"" - }, - { - "uri":"cce_01_0234.html", - "product_code":"cce", - "code":"80", - "des":"Workload anti-affinity determines the pods from which the target workload will be deployed in a different topology domain.There are two types of pod anti-affinity rules: ", - "doc_type":"usermanual2", - "kw":"Workload Anti-Affinity,Custom Scheduling Policies,User Guide", - "title":"Workload Anti-Affinity", - "githuburl":"" - }, - { - "uri":"cce_01_0230.html", - "product_code":"cce", - "code":"81", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Simple Scheduling Policies", - "title":"Simple Scheduling Policies", - "githuburl":"" - }, - { - "uri":"cce_01_0228.html", + "uri":"cce_10_0353.html", "product_code":"cce", "code":"82", - "des":"The created workload will be deployed in the selected AZ.This section uses an Nginx workload as an example to describe how to create a workload using kubectl.Prerequisite", + "des":"When a workload is created, the container image is pulled from the image repository to the node. The image is also pulled when the workload is restarted or upgraded.By de", "doc_type":"usermanual2", - "kw":"Workload-AZ Affinity,Simple Scheduling Policies,User Guide", - "title":"Workload-AZ Affinity", + "kw":"Configuring an Image Pull Policy,Configuring a Container,User Guide", + "title":"Configuring an Image Pull Policy", "githuburl":"" }, { - "uri":"cce_01_0229.html", + "uri":"cce_10_0354.html", "product_code":"cce", "code":"83", - "des":"The created workload is not deployed on the selected AZ.This section uses Nginx as an example to describe how to create a workload using kubectl.PrerequisitesThe ECS wher", + "des":"When creating a workload, you can configure containers to use the same time zone as the node. You can enable time zone synchronization when creating a workload.The time z", "doc_type":"usermanual2", - "kw":"Workload-AZ Anti-Affinity,Simple Scheduling Policies,User Guide", - "title":"Workload-AZ Anti-Affinity", + "kw":"Configuring Time Zone Synchronization,Configuring a Container,User Guide", + "title":"Configuring Time Zone Synchronization", "githuburl":"" }, { - "uri":"cce_01_0225.html", + "uri":"cce_10_0397.html", "product_code":"cce", "code":"84", - "des":"If you select multiple nodes, the system automatically chooses one of them during workload deployment.This section uses an Nginx workload as an example to describe how to", + "des":"In actual applications, upgrade is a common operation. A Deployment, StatefulSet, or DaemonSet can easily support application upgrade.You can set different upgrade polici", "doc_type":"usermanual2", - "kw":"Workload-Node Affinity,Simple Scheduling Policies,User Guide", - "title":"Workload-Node Affinity", + "kw":"Configuring the Workload Upgrade Policy,Configuring a Container,User Guide", + "title":"Configuring the Workload Upgrade Policy", "githuburl":"" }, { - "uri":"cce_01_0226.html", + "uri":"cce_10_0232.html", "product_code":"cce", "code":"85", - "des":"If you select multiple nodes, the workload will not be deployed on these nodes.This section uses Nginx as an example to describe how to create a workload using kubectl.Pr", + "des":"A nodeSelector provides a very simple way to constrain pods to nodes with particular labels, as mentioned in Creating a DaemonSet. The affinity and anti-affinity feature ", "doc_type":"usermanual2", - "kw":"Workload-Node Anti-Affinity,Simple Scheduling Policies,User Guide", - "title":"Workload-Node Anti-Affinity", + "kw":"Scheduling Policy (Affinity/Anti-affinity),Configuring a Container,User Guide", + "title":"Scheduling Policy (Affinity/Anti-affinity)", "githuburl":"" }, { - "uri":"cce_01_0220.html", + "uri":"cce_10_0345.html", "product_code":"cce", "code":"86", - "des":"The workload to be created will be deployed on the same node as the selected affinity workloads.This section uses Nginx as an example to describe how to create a workload", + "des":"You can use GPUs in CCE containers.A GPU node has been created. For details, see Creating a Node.The gpu-beta add-on has been installed. During the installation, select t", "doc_type":"usermanual2", - "kw":"Workload-Workload Affinity,Simple Scheduling Policies,User Guide", - "title":"Workload-Workload Affinity", + "kw":"GPU Scheduling,Workloads,User Guide", + "title":"GPU Scheduling", "githuburl":"" }, { - "uri":"cce_01_0227.html", + "uri":"cce_10_0551.html", "product_code":"cce", "code":"87", - "des":"The workload to be created and the selected workloads will be deployed on different nodes.This section uses Nginx as an example to describe how to create a workload using", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"Workload-Workload Anti-Affinity,Simple Scheduling Policies,User Guide", - "title":"Workload-Workload Anti-Affinity", + "kw":"CPU Core Binding", + "title":"CPU Core Binding", "githuburl":"" }, { - "uri":"cce_01_0020.html", + "uri":"cce_10_0351.html", "product_code":"cce", "code":"88", + "des":"By default, kubelet uses CFS quotas to enforce pod CPU limits. When the node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether t", + "doc_type":"usermanual2", + "kw":"Binding CPU Cores,CPU Core Binding,User Guide", + "title":"Binding CPU Cores", + "githuburl":"" + }, + { + "uri":"cce_10_0386.html", + "product_code":"cce", + "code":"89", + "des":"CCE allows you to add annotations to a YAML file to realize some advanced pod functions. The following table describes the annotations you can add.When you create a workl", + "doc_type":"usermanual2", + "kw":"Pod Labels and Annotations,Workloads,User Guide", + "title":"Pod Labels and Annotations", + "githuburl":"" + }, + { + "uri":"cce_10_0423.html", + "product_code":"cce", + "code":"90", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Volcano Scheduling", + "title":"Volcano Scheduling", + "githuburl":"" + }, + { + "uri":"cce_10_0384.html", + "product_code":"cce", + "code":"91", + "des":"Jobs can be classified into online jobs and offline jobs based on whether services are always online.Online job: Such jobs run for a long time, with regular traffic surge", + "doc_type":"usermanual2", + "kw":"Hybrid Deployment of Online and Offline Jobs,Volcano Scheduling,User Guide", + "title":"Hybrid Deployment of Online and Offline Jobs", + "githuburl":"" + }, + { + "uri":"cce_10_0288.html", + "product_code":"cce", + "code":"92", + "des":"When the Cloud Native Network 2.0 model is used, pods use VPC ENIs or sub-ENIs for networking. You can directly bind security groups and EIPs to pods. CCE provides a cust", + "doc_type":"usermanual2", + "kw":"Security Group Policies,Workloads,User Guide", + "title":"Security Group Policies", + "githuburl":"" + }, + { + "uri":"cce_10_0020.html", + "product_code":"cce", + "code":"93", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Networking", @@ -880,9 +930,9 @@ "githuburl":"" }, { - "uri":"cce_01_0010.html", + "uri":"cce_10_0010.html", "product_code":"cce", - "code":"89", + "code":"94", "des":"You can learn about a cluster network from the following two aspects:What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are ru", "doc_type":"usermanual2", "kw":"Overview,Networking,User Guide", @@ -890,9 +940,9 @@ "githuburl":"" }, { - "uri":"cce_01_0280.html", + "uri":"cce_10_0280.html", "product_code":"cce", - "code":"90", + "code":"95", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Container Network Models", @@ -900,19 +950,19 @@ "githuburl":"" }, { - "uri":"cce_01_0281.html", + "uri":"cce_10_0281.html", "product_code":"cce", - "code":"91", - "des":"The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:Con", + "code":"96", + "des":"The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:Tun", "doc_type":"usermanual2", "kw":"Overview,Container Network Models,User Guide", "title":"Overview", "githuburl":"" }, { - "uri":"cce_01_0282.html", + "uri":"cce_10_0282.html", "product_code":"cce", - "code":"92", + "code":"97", "des":"The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet pac", "doc_type":"usermanual2", "kw":"Container Tunnel Network,Container Network Models,User Guide", @@ -920,9 +970,9 @@ "githuburl":"" }, { - "uri":"cce_01_0283.html", + "uri":"cce_10_0283.html", "product_code":"cce", - "code":"93", + "code":"98", "des":"The VPC network uses VPC routing to integrate with the underlying network. This network model is suitable for performance-intensive scenarios. The maximum number of nodes", "doc_type":"usermanual2", "kw":"VPC Network,Container Network Models,User Guide", @@ -930,9 +980,9 @@ "githuburl":"" }, { - "uri":"cce_01_0284.html", + "uri":"cce_10_0284.html", "product_code":"cce", - "code":"94", + "code":"99", "des":"Developed by CCE, Cloud Native Network 2.0 deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are all", "doc_type":"usermanual2", "kw":"Cloud Native Network 2.0,Container Network Models,User Guide", @@ -940,9 +990,9 @@ "githuburl":"" }, { - "uri":"cce_01_0247.html", + "uri":"cce_10_0247.html", "product_code":"cce", - "code":"95", + "code":"100", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Services", @@ -950,19 +1000,19 @@ "githuburl":"" }, { - "uri":"cce_01_0249.html", + "uri":"cce_10_0249.html", "product_code":"cce", - "code":"96", + "code":"101", "des":"After a pod is created, the following problems may occur if you directly access the pod:The pod can be deleted and recreated at any time by a controller such as a Deploym", "doc_type":"usermanual2", - "kw":"Overview,Services,User Guide", - "title":"Overview", + "kw":"Service Overview,Services,User Guide", + "title":"Service Overview", "githuburl":"" }, { - "uri":"cce_01_0011.html", + "uri":"cce_10_0011.html", "product_code":"cce", - "code":"97", + "code":"102", "des":"ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.The cluster-internal domain name format is Uploaded Charts for subsequent workload creation.When you upload a chart, the naming rule of the OBS bucket is changed from cce-charts-{region}", - "doc_type":"usermanual2", - "kw":"Installing a chart,Updating a chart,Downloading a chart,Deleting a chart,Uploading a Chart,My Charts", - "title":"Uploading a Chart", - "githuburl":"" - }, - { - "uri":"cce_01_0146.html", - "product_code":"cce", - "code":"180", - "des":"In the workload list, if the status is Rollback successful, the workload is rolled back successfully.", - "doc_type":"usermanual2", - "kw":"Creating a Chart-based Workload,Cluster,Upgrading a Chart-based Workload,Rolling Back a Chart-based ", - "title":"Creating a Workload from a Chart", - "githuburl":"" - }, - { - "uri":"cce_01_0064.html", - "product_code":"cce", - "code":"181", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Add-ons", - "title":"Add-ons", - "githuburl":"" - }, - { - "uri":"cce_01_0277.html", - "product_code":"cce", - "code":"182", - "des":"CCE provides multiple types of add-ons to extend cluster functions and meet feature requirements. You can install add-ons as required.", - "doc_type":"usermanual2", - "kw":"Overview,Add-ons,User Guide", - "title":"Overview", - "githuburl":"" - }, - { - "uri":"cce_01_0129.html", - "product_code":"cce", - "code":"183", - "des":"The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.coredns i", - "doc_type":"usermanual2", - "kw":"coredns add-on,DNS server,domain name resolution services,Kubernetes,coredns (System Resource Add-on", - "title":"coredns (System Resource Add-on, Mandatory)", - "githuburl":"" - }, - { - "uri":"cce_01_0127.html", - "product_code":"cce", - "code":"184", - "des":"storage-driver functions as a standard Kubernetes FlexVolume plug-in to allow containers to use IaaS storage resources. By installing and upgrading storage-driver, you ca", - "doc_type":"usermanual2", - "kw":"storage-driver (System Resource Add-on, Mandatory),Add-ons,User Guide", - "title":"storage-driver (System Resource Add-on, Mandatory)", - "githuburl":"" - }, - { - "uri":"cce_01_0066.html", - "product_code":"cce", - "code":"185", - "des":"Everest is a cloud-native container storage system. Based on Container Storage Interface (CSI), clusters of Kubernetes v1.15 or later can interconnect with cloud storage ", - "doc_type":"usermanual2", - "kw":"everest (System Resource Add-on, Mandatory),Add-ons,User Guide", - "title":"everest (System Resource Add-on, Mandatory)", - "githuburl":"" - }, - { - "uri":"cce_01_0154.html", - "product_code":"cce", - "code":"186", - "des":"Autoscaler is an important Kubernetes controller. It supports microservice scaling and is key to serverless design.When the CPU or memory usage of a microservice is too h", - "doc_type":"usermanual2", - "kw":"Auto Scale-In,autoscaler,Add-ons,User Guide", - "title":"autoscaler", - "githuburl":"" - }, - { - "uri":"cce_01_0205.html", - "product_code":"cce", - "code":"187", - "des":"From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly a", - "doc_type":"usermanual2", - "kw":"metrics-server,Add-ons,User Guide", - "title":"metrics-server", - "githuburl":"" - }, - { - "uri":"cce_01_0141.html", - "product_code":"cce", - "code":"188", - "des":"gpu-beta is a device management add-on that supports GPUs in containers. It supports only NVIDIA Tesla drivers.This add-on is available only in certain regions.This add-o", - "doc_type":"usermanual2", - "kw":"gpu-beta,Add-ons,User Guide", - "title":"gpu-beta", - "githuburl":"" - }, - { - "uri":"cce_01_0207.html", - "product_code":"cce", - "code":"189", + "code":"150", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Auto Scaling", @@ -1890,9 +1500,9 @@ "githuburl":"" }, { - "uri":"cce_01_0279.html", + "uri":"cce_10_0279.html", "product_code":"cce", - "code":"190", + "code":"151", "des":"Auto scaling is a service that automatically and economically adjusts service resources based on your service requirements and configured policies.More and more applicati", "doc_type":"usermanual2", "kw":"Overview,Auto Scaling,User Guide", @@ -1900,9 +1510,9 @@ "githuburl":"" }, { - "uri":"cce_01_0293.html", + "uri":"cce_10_0293.html", "product_code":"cce", - "code":"191", + "code":"152", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Scaling a Workload", @@ -1910,19 +1520,19 @@ "githuburl":"" }, { - "uri":"cce_01_0290.html", + "uri":"cce_10_0290.html", "product_code":"cce", - "code":"192", - "des":"Scaling policy priority: If you do not manually adjust the number of pods, auto scaling policies will take effect for resource scheduling. If manual scaling is triggered,", + "code":"153", + "des":"HPA is a controller that controls horizontal pod scaling. HPA periodically checks the pod metrics, calculates the number of replicas required to meet the target values co", "doc_type":"usermanual2", "kw":"Workload Scaling Mechanisms,Scaling a Workload,User Guide", "title":"Workload Scaling Mechanisms", "githuburl":"" }, { - "uri":"cce_01_0208.html", + "uri":"cce_10_0208.html", "product_code":"cce", - "code":"193", + "code":"154", "des":"Horizontal Pod Autoscaling (HPA) in Kubernetes implements horizontal scaling of pods. In a CCE HPA policy, you can configure different cooldown time windows and scaling t", "doc_type":"usermanual2", "kw":"Creating an HPA Policy for Workload Auto Scaling,Scaling a Workload,User Guide", @@ -1930,9 +1540,9 @@ "githuburl":"" }, { - "uri":"cce_01_0083.html", + "uri":"cce_10_0083.html", "product_code":"cce", - "code":"194", + "code":"155", "des":"After an HPA policy is created, you can update, clone, edit, and delete the policy, as well as edit the YAML file.You can view the rules, status, and events of an HPA pol", "doc_type":"usermanual2", "kw":"Managing Workload Scaling Policies,Scaling a Workload,User Guide", @@ -1940,59 +1550,189 @@ "githuburl":"" }, { - "uri":"cce_01_0395.html", + "uri":"cce_10_0291.html", "product_code":"cce", - "code":"195", - "des":"CCE clusters of v1.15 or earlier support workload scaling based on AOM monitoring data. This function is no longer supported in CCE clusters of v1.17 or later.If you have", - "doc_type":"usermanual2", - "kw":"Switching from AOM to HPA for Auto Scaling,Scaling a Workload,User Guide", - "title":"Switching from AOM to HPA for Auto Scaling", - "githuburl":"" - }, - { - "uri":"cce_01_0291.html", - "product_code":"cce", - "code":"196", + "code":"156", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"Scaling a Cluster/Node", - "title":"Scaling a Cluster/Node", + "kw":"Scaling a Node", + "title":"Scaling a Node", "githuburl":"" }, { - "uri":"cce_01_0296.html", + "uri":"cce_10_0296.html", "product_code":"cce", - "code":"197", + "code":"157", "des":"Kubernetes HPA is designed for pods. However, if the cluster resources are insufficient, you can only add nodes. Scaling of cluster nodes could be laborious. Now with clo", "doc_type":"usermanual2", - "kw":"Node Scaling Mechanisms,Scaling a Cluster/Node,User Guide", + "kw":"Node Scaling Mechanisms,Scaling a Node,User Guide", "title":"Node Scaling Mechanisms", "githuburl":"" }, { - "uri":"cce_01_0209.html", + "uri":"cce_10_0209.html", "product_code":"cce", - "code":"198", + "code":"158", "des":"CCE provides auto scaling through the autoscaler add-on. Nodes with different specifications can be automatically added across AZs on demand.If a node scaling policy and ", "doc_type":"usermanual2", - "kw":"Creating a Node Scaling Policy,Scaling a Cluster/Node,User Guide", + "kw":"Creating a Node Scaling Policy,Scaling a Node,User Guide", "title":"Creating a Node Scaling Policy", "githuburl":"" }, { - "uri":"cce_01_0063.html", + "uri":"cce_10_0063.html", "product_code":"cce", - "code":"199", + "code":"159", "des":"After a node scaling policy is created, you can delete, edit, disable, enable, or clone the policy.You can view the associated node pool, rules, and scaling history of a ", "doc_type":"usermanual2", - "kw":"Managing Node Scaling Policies,Scaling a Cluster/Node,User Guide", + "kw":"Managing Node Scaling Policies,Scaling a Node,User Guide", "title":"Managing Node Scaling Policies", "githuburl":"" }, { - "uri":"cce_01_0164.html", + "uri":"cce_10_0300.html", "product_code":"cce", - "code":"200", + "code":"160", + "des":"The best way to handle surging traffic is to automatically adjust the number of machines based on the traffic volume or resource usage, which is called scaling.In CCE, th", + "doc_type":"usermanual2", + "kw":"Using HPA and CA for Auto Scaling of Workloads and Nodes,Auto Scaling,User Guide", + "title":"Using HPA and CA for Auto Scaling of Workloads and Nodes", + "githuburl":"" + }, + { + "uri":"cce_10_0064.html", + "product_code":"cce", + "code":"161", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Add-ons", + "title":"Add-ons", + "githuburl":"" + }, + { + "uri":"cce_10_0277.html", + "product_code":"cce", + "code":"162", + "des":"CCE provides multiple types of add-ons to extend cluster functions and meet feature requirements. You can install add-ons as required.", + "doc_type":"usermanual2", + "kw":"Overview,Add-ons,User Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0129.html", + "product_code":"cce", + "code":"163", + "des":"The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.coredns i", + "doc_type":"usermanual2", + "kw":"coredns (System Resource Add-On, Mandatory),Add-ons,User Guide", + "title":"coredns (System Resource Add-On, Mandatory)", + "githuburl":"" + }, + { + "uri":"cce_10_0127.html", + "product_code":"cce", + "code":"164", + "des":"storage-driver functions as a standard Kubernetes FlexVolume plug-in to allow containers to use EVS, SFS, OBS, and SFS Turbo storage resources. By installing and upgradin", + "doc_type":"usermanual2", + "kw":"storage-driver (System Resource Add-On, Discarded),Add-ons,User Guide", + "title":"storage-driver (System Resource Add-On, Discarded)", + "githuburl":"" + }, + { + "uri":"cce_10_0066.html", + "product_code":"cce", + "code":"165", + "des":"Everest is a cloud native container storage system. Based on the Container Storage Interface (CSI), clusters of Kubernetes v1.15.6 or later obtain access to cloud storage", + "doc_type":"usermanual2", + "kw":"everest (System Resource Add-On, Mandatory),Add-ons,User Guide", + "title":"everest (System Resource Add-On, Mandatory)", + "githuburl":"" + }, + { + "uri":"cce_10_0132.html", + "product_code":"cce", + "code":"166", + "des":"node-problem-detector (npd for short) is an add-on that monitors abnormal events of cluster nodes and connects to a third-party monitoring platform. It is a daemon runnin", + "doc_type":"usermanual2", + "kw":"npd,Add-ons,User Guide", + "title":"npd", + "githuburl":"" + }, + { + "uri":"cce_10_0154.html", + "product_code":"cce", + "code":"167", + "des":"Autoscaler is an important Kubernetes controller. It supports microservice scaling and is key to serverless design.When the CPU or memory usage of a microservice is too h", + "doc_type":"usermanual2", + "kw":"autoscaler,Add-ons,User Guide", + "title":"autoscaler", + "githuburl":"" + }, + { + "uri":"cce_10_0205.html", + "product_code":"cce", + "code":"168", + "des":"From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly a", + "doc_type":"usermanual2", + "kw":"metrics-server,Add-ons,User Guide", + "title":"metrics-server", + "githuburl":"" + }, + { + "uri":"cce_10_0141.html", + "product_code":"cce", + "code":"169", + "des":"gpu-beta is a device management add-on that supports GPUs in containers. If GPU nodes are used in the cluster, the gpu-beta add-on must be installed.The driver to be down", + "doc_type":"usermanual2", + "kw":"gpu-beta,Add-ons,User Guide", + "title":"gpu-beta", + "githuburl":"" + }, + { + "uri":"cce_10_0193.html", + "product_code":"cce", + "code":"170", + "des":"Volcano is a batch processing platform based on Kubernetes. It provides a series of features required by machine learning, deep learning, bioinformatics, genomics, and ot", + "doc_type":"usermanual2", + "kw":"volcano,Add-ons,User Guide", + "title":"volcano", + "githuburl":"" + }, + { + "uri":"cce_10_0019.html", + "product_code":"cce", + "code":"171", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Charts", + "title":"Charts", + "githuburl":"" + }, + { + "uri":"cce_10_0191.html", + "product_code":"cce", + "code":"172", + "des":"CCE provides a console for managing Helm charts, helping you easily deploy applications using the charts and manage applications on the console.Helm is a package manager ", + "doc_type":"usermanual2", + "kw":"Overview,Charts,User Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0146.html", + "product_code":"cce", + "code":"173", + "des":"On the CCE console, you can upload a Helm chart package, deploy it, and manage the deployed pods.The number of charts that can be uploaded by a single user is limited. Th", + "doc_type":"usermanual2", + "kw":"Deploying an Application from a Chart,Charts,User Guide", + "title":"Deploying an Application from a Chart", + "githuburl":"" + }, + { + "uri":"cce_10_0164.html", + "product_code":"cce", + "code":"174", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Permissions Management", @@ -2000,9 +1740,9 @@ "githuburl":"" }, { - "uri":"cce_01_0187.html", + "uri":"cce_10_0187.html", "product_code":"cce", - "code":"201", + "code":"175", "des":"CCE permissions management allows you to assign permissions to IAM users and user groups under your tenant accounts. CCE combines the advantages of Identity and Access Ma", "doc_type":"usermanual2", "kw":"Permissions Overview,Permissions Management,User Guide", @@ -2010,19 +1750,19 @@ "githuburl":"" }, { - "uri":"cce_01_0188.html", + "uri":"cce_10_0188.html", "product_code":"cce", - "code":"202", - "des":"CCE cluster permissions are assigned based on IAM system policies and custom policies. You can use user groups to assign permissions to IAM users.Cluster permissions are ", + "code":"176", + "des":"CCE cluster-level permissions are assigned based on IAM system policies and custom policies. You can use user groups to assign permissions to IAM users.Cluster permission", "doc_type":"usermanual2", "kw":"Cluster Permissions (IAM-based),Permissions Management,User Guide", "title":"Cluster Permissions (IAM-based)", "githuburl":"" }, { - "uri":"cce_01_0189.html", + "uri":"cce_10_0189.html", "product_code":"cce", - "code":"203", + "code":"177", "des":"You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles. The RBAC API declares four kinds of Kub", "doc_type":"usermanual2", "kw":"Namespace Permissions (Kubernetes RBAC-based),Permissions Management,User Guide", @@ -2030,19 +1770,69 @@ "githuburl":"" }, { - "uri":"cce_01_0275.html", + "uri":"cce_10_0245.html", "product_code":"cce", - "code":"204", - "des":"A pod security policy (PSP) is a cluster-level resource that controls sensitive security aspects of the pod specification. The PodSecurityPolicy object in Kubernetes defi", + "code":"178", + "des":"The conventional distributed task scheduling mode is being replaced by Kubernetes. CCE allows you to easily deploy, manage, and scale containerized applications in the cl", "doc_type":"usermanual2", - "kw":"Pod Security Policies,Permissions Management,User Guide", - "title":"Pod Security Policies", + "kw":"Example: Designing and Configuring Permissions for Users in a Department,Permissions Management,User", + "title":"Example: Designing and Configuring Permissions for Users in a Department", "githuburl":"" }, { - "uri":"cce_01_0024.html", + "uri":"cce_10_0190.html", "product_code":"cce", - "code":"205", + "code":"179", + "des":"Some CCE permissions policies depend on the policies of other cloud services. To view or use other cloud resources on the CCE console, you need to enable the system polic", + "doc_type":"usermanual2", + "kw":"Permission Dependency of the CCE Console,Permissions Management,User Guide", + "title":"Permission Dependency of the CCE Console", + "githuburl":"" + }, + { + "uri":"cce_10_0465.html", + "product_code":"cce", + "code":"180", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Pod Security", + "title":"Pod Security", + "githuburl":"" + }, + { + "uri":"cce_10_0275.html", + "product_code":"cce", + "code":"181", + "des":"A pod security policy (PSP) is a cluster-level resource that controls sensitive security aspects of the pod specification. The PodSecurityPolicy object in Kubernetes defi", + "doc_type":"usermanual2", + "kw":"Configuring a Pod Security Policy,Pod Security,User Guide", + "title":"Configuring a Pod Security Policy", + "githuburl":"" + }, + { + "uri":"cce_10_0466.html", + "product_code":"cce", + "code":"182", + "des":"Before using Pod Security Admission, you need to understand Kubernetes Pod Security Standards. These standards define different isolation levels for pods. They let you de", + "doc_type":"usermanual2", + "kw":"Configuring Pod Security Admission,Pod Security,User Guide", + "title":"Configuring Pod Security Admission", + "githuburl":"" + }, + { + "uri":"cce_10_0477_0.html", + "product_code":"cce", + "code":"183", + "des":"In clusters earlier than v1.21, a token is obtained by mounting the secret of the service account to a pod. Tokens obtained this way are permanent. This approach is no lo", + "doc_type":"usermanual2", + "kw":"Service Account Token Security Improvement,Permissions Management,User Guide", + "title":"Service Account Token Security Improvement", + "githuburl":"" + }, + { + "uri":"cce_10_0024.html", + "product_code":"cce", + "code":"184", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Cloud Trace Service (CTS)", @@ -2050,9 +1840,9 @@ "githuburl":"" }, { - "uri":"cce_01_0025.html", + "uri":"cce_10_0025.html", "product_code":"cce", - "code":"206", + "code":"185", "des":"Cloud Trace Service (CTS) records operations on cloud service resources, allowing users to query, audit, and backtrack the resource operation requests initiated from the ", "doc_type":"usermanual2", "kw":"CCE Operations Supported by CTS,Cloud Trace Service (CTS),User Guide", @@ -2060,9 +1850,9 @@ "githuburl":"" }, { - "uri":"cce_01_0026.html", + "uri":"cce_10_0026.html", "product_code":"cce", - "code":"207", + "code":"186", "des":"After you enable CTS, the system starts recording operations on CCE resources. Operation records of the last 7 days can be viewed on the CTS management console.Trace Sour", "doc_type":"usermanual2", "kw":"Querying CTS Logs,Cloud Trace Service (CTS),User Guide", @@ -2070,29 +1860,269 @@ "githuburl":"" }, { - "uri":"cce_faq_0083.html", + "uri":"cce_10_0305.html", + "product_code":"cce", + "code":"187", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Storage (FlexVolume)", + "title":"Storage (FlexVolume)", + "githuburl":"" + }, + { + "uri":"cce_10_0306.html", + "product_code":"cce", + "code":"188", + "des":"In container storage, you can use different types of volumes and mount them to containers in pods as many as you want.In CCE, container storage is backed both by Kubernet", + "doc_type":"usermanual2", + "kw":"FlexVolume Overview,Storage (FlexVolume),User Guide", + "title":"FlexVolume Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0343.html", + "product_code":"cce", + "code":"189", + "des":"In clusters later than v1.15.11-r1, CSI (the everest add-on) has taken over all functions of fuxi FlexVolume (the storage-driver add-on) for managing container storage. Y", + "doc_type":"usermanual2", + "kw":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?,Storage", + "title":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?", + "githuburl":"" + }, + { + "uri":"cce_10_0309.html", + "product_code":"cce", + "code":"190", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Using EVS Disks as Storage Volumes", + "title":"Using EVS Disks as Storage Volumes", + "githuburl":"" + }, + { + "uri":"cce_10_0310.html", + "product_code":"cce", + "code":"191", + "des":"To achieve persistent storage, CCE allows you to mount the storage volumes created from Elastic Volume Service (EVS) disks to a path of a container. When the container is", + "doc_type":"usermanual2", + "kw":"Overview,Using EVS Disks as Storage Volumes,User Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0312.html", + "product_code":"cce", + "code":"192", + "des":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pvc-evs-auto-example.yamlvi pvc-evs-auto-example.yamlExample YAML file for clu", + "doc_type":"usermanual2", + "kw":"(kubectl) Automatically Creating an EVS Disk,Using EVS Disks as Storage Volumes,User Guide", + "title":"(kubectl) Automatically Creating an EVS Disk", + "githuburl":"" + }, + { + "uri":"cce_10_0313.html", + "product_code":"cce", + "code":"193", + "des":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pv-evs-example.yaml pvc-evs-example.yamlClusters from v1.11.7 to v1.13Example ", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a PV from an Existing EVS Disk,Using EVS Disks as Storage Volumes,User Guide", + "title":"(kubectl) Creating a PV from an Existing EVS Disk", + "githuburl":"" + }, + { + "uri":"cce_10_0314.html", + "product_code":"cce", + "code":"194", + "des":"After an EVS volume is created or imported to CCE, you can mount it to a workload.EVS disks cannot be attached across AZs. Before mounting a volume, you can run the kubec", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a Pod Mounted with an EVS Volume,Using EVS Disks as Storage Volumes,User Guide", + "title":"(kubectl) Creating a Pod Mounted with an EVS Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0329.html", + "product_code":"cce", + "code":"195", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Using SFS Turbo File Systems as Storage Volumes", + "title":"Using SFS Turbo File Systems as Storage Volumes", + "githuburl":"" + }, + { + "uri":"cce_10_0330.html", + "product_code":"cce", + "code":"196", + "des":"CCE allows you to mount a volume created from an SFS Turbo file system to a container to store data persistently. Provisioned on demand and fast, SFS Turbo is suitable fo", + "doc_type":"usermanual2", + "kw":"Overview,Using SFS Turbo File Systems as Storage Volumes,User Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0332.html", + "product_code":"cce", + "code":"197", + "des":"CCE allows you to use an existing SFS Turbo file system to create a PersistentVolume (PV). After the creation is successful, you can create a PersistentVolumeClaim (PVC) ", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a PV from an Existing SFS Turbo File System,Using SFS Turbo File Systems as Stora", + "title":"(kubectl) Creating a PV from an Existing SFS Turbo File System", + "githuburl":"" + }, + { + "uri":"cce_10_0333.html", + "product_code":"cce", + "code":"198", + "des":"After an SFS Turbo volume is created or imported to CCE, you can mount the volume to a workload.The following configuration example applies to clusters of Kubernetes 1.13", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a Deployment Mounted with an SFS Turbo Volume,Using SFS Turbo File Systems as Sto", + "title":"(kubectl) Creating a Deployment Mounted with an SFS Turbo Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0334.html", + "product_code":"cce", + "code":"199", + "des":"CCE allows you to use an existing SFS Turbo volume to create a StatefulSet.The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch efs", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a StatefulSet Mounted with an SFS Turbo Volume,Using SFS Turbo File Systems as St", + "title":"(kubectl) Creating a StatefulSet Mounted with an SFS Turbo Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0322.html", + "product_code":"cce", + "code":"200", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Using OBS Buckets as Storage Volumes", + "title":"Using OBS Buckets as Storage Volumes", + "githuburl":"" + }, + { + "uri":"cce_10_0323.html", + "product_code":"cce", + "code":"201", + "des":"CCE allows you to mount a volume created from an Object Storage Service (OBS) bucket to a container to store data persistently. Object storage is commonly used in cloud w", + "doc_type":"usermanual2", + "kw":"Overview,Using OBS Buckets as Storage Volumes,User Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0325.html", + "product_code":"cce", + "code":"202", + "des":"During the use of OBS, expected OBS buckets can be automatically created and mounted as volumes. Currently, standard and infrequent access OBS buckets are supported, whic", + "doc_type":"usermanual2", + "kw":"(kubectl) Automatically Creating an OBS Volume,Using OBS Buckets as Storage Volumes,User Guide", + "title":"(kubectl) Automatically Creating an OBS Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0326.html", + "product_code":"cce", + "code":"203", + "des":"CCE allows you to use an existing OBS bucket to create a PersistentVolume (PV). You can create a PersistentVolumeClaim (PVC) and bind it to the PV.The following configura", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a PV from an Existing OBS Bucket,Using OBS Buckets as Storage Volumes,User Guide", + "title":"(kubectl) Creating a PV from an Existing OBS Bucket", + "githuburl":"" + }, + { + "uri":"cce_10_0327.html", + "product_code":"cce", + "code":"204", + "des":"After an OBS volume is created or imported to CCE, you can mount the volume to a workload.The following configuration example applies to clusters of Kubernetes 1.13 or ea", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a Deployment Mounted with an OBS Volume,Using OBS Buckets as Storage Volumes,User", + "title":"(kubectl) Creating a Deployment Mounted with an OBS Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0328.html", + "product_code":"cce", + "code":"205", + "des":"CCE allows you to use an existing OBS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).The following configuration example applies to clusters of Kube", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a StatefulSet Mounted with an OBS Volume,Using OBS Buckets as Storage Volumes,Use", + "title":"(kubectl) Creating a StatefulSet Mounted with an OBS Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0315.html", + "product_code":"cce", + "code":"206", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Using SFS File Systems as Storage Volumes", + "title":"Using SFS File Systems as Storage Volumes", + "githuburl":"" + }, + { + "uri":"cce_10_0316.html", + "product_code":"cce", + "code":"207", + "des":"CCE allows you to mount a volume created from a Scalable File Service (SFS) file system to a container to store data persistently. SFS volumes are commonly used in ReadWr", + "doc_type":"usermanual2", + "kw":"Overview,Using SFS File Systems as Storage Volumes,User Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0318.html", "product_code":"cce", "code":"208", + "des":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pvc-sfs-auto-example.yamlvi pvc-sfs-auto-example.yamlExample YAML file:apiVers", + "doc_type":"usermanual2", + "kw":"(kubectl) Automatically Creating an SFS Volume,Using SFS File Systems as Storage Volumes,User Guide", + "title":"(kubectl) Automatically Creating an SFS Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0319.html", + "product_code":"cce", + "code":"209", + "des":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pv-sfs-example.yaml pvc-sfs-example.yamlClusters from v1.11 to v1.13Example YA", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a PV from an Existing SFS File System,Using SFS File Systems as Storage Volumes,U", + "title":"(kubectl) Creating a PV from an Existing SFS File System", + "githuburl":"" + }, + { + "uri":"cce_10_0320.html", + "product_code":"cce", + "code":"210", + "des":"After an SFS volume is created or imported to CCE, you can mount the volume to a workload.The following configuration example applies to clusters of Kubernetes 1.13 or ea", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a Deployment Mounted with an SFS Volume,Using SFS File Systems as Storage Volumes", + "title":"(kubectl) Creating a Deployment Mounted with an SFS Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0321.html", + "product_code":"cce", + "code":"211", + "des":"CCE allows you to use an existing SFS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).The following configuration example applies to clusters of Kube", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a StatefulSet Mounted with an SFS Volume,Using SFS File Systems as Storage Volume", + "title":"(kubectl) Creating a StatefulSet Mounted with an SFS Volume", + "githuburl":"" + }, + { + "uri":"cce_faq_0083.html", + "product_code":"cce", + "code":"212", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Reference", "title":"Reference", "githuburl":"" }, - { - "uri":"cce_faq_00006.html", - "product_code":"cce", - "code":"209", - "des":"Cloud Container Engine (CCE) provides highly scalable, high-performance, enterprise-class Kubernetes clusters and supports Docker containers. With CCE, you can easily dep", - "doc_type":"usermanual2", - "kw":"Checklist for Migrating Containerized Applications to the Cloud,Reference,User Guide", - "title":"Checklist for Migrating Containerized Applications to the Cloud", - "githuburl":"" - }, { "uri":"cce_01_0203.html", "product_code":"cce", - "code":"210", + "code":"213", "des":"When a node is added, EIP is set to Automatically assign. The node cannot be created, and a message indicating that EIPs are insufficient is displayed.Two methods are ava", "doc_type":"usermanual2", "kw":"How Do I Troubleshoot Insufficient EIPs When a Node Is Added?,Reference,User Guide", @@ -2102,7 +2132,7 @@ { "uri":"cce_01_0204.html", "product_code":"cce", - "code":"211", + "code":"214", "des":"Before using command line injection, write a script that can format data disks and save it to your OBS bucket. Then, inject a command line that will automatically execute", "doc_type":"usermanual2", "kw":"How Do I Format a Data Disk Using Command Line Injection?,Reference,User Guide", @@ -2112,7 +2142,7 @@ { "uri":"cce_01_0999.html", "product_code":"cce", - "code":"212", + "code":"215", "des":"After a cluster of v1.13.10 is created, you can use heapster only after rbac is enabled.kubectl delete clusterrole system:heapsterCopy the following file to a server on w", "doc_type":"usermanual2", "kw":"How Do I Use heapster in Clusters of v1.13.10?,Reference,User Guide", @@ -2122,7 +2152,7 @@ { "uri":"cce_faq_00096.html", "product_code":"cce", - "code":"213", + "code":"216", "des":"Currently, private CCE clusters use Device Mapper as the Docker storage driver.Device Mapper is developed based on the kernel framework and supports many advanced volume ", "doc_type":"usermanual2", "kw":"How Do I Change the Mode of the Docker Device Mapper?,Reference,User Guide", @@ -2132,7 +2162,7 @@ { "uri":"cce_faq_00120.html", "product_code":"cce", - "code":"214", + "code":"217", "des":"If the cluster status is available but some nodes in the cluster are unavailable, perform the following operations to rectify the fault:Check Item 1: Whether the Node Is ", "doc_type":"usermanual2", "kw":"monitrc,What Can I Do If My Cluster Status Is Available but the Node Status Is Unavailable?,Referenc", @@ -2142,7 +2172,7 @@ { "uri":"cce_faq_00039.html", "product_code":"cce", - "code":"215", + "code":"218", "des":"If the cluster is Unavailable, perform the following operations to rectify the fault:Check Item 1: Whether the Security Group Is ModifiedCheck Item 2: Whether the DHCP Fu", "doc_type":"usermanual2", "kw":"How Do I Rectify the Fault When the Cluster Status Is Unavailable?,Reference,User Guide", @@ -2152,7 +2182,7 @@ { "uri":"cce_faq_00099.html", "product_code":"cce", - "code":"216", + "code":"219", "des":"This section uses the Nginx workload as an example to describe how to set the workload access type to LoadBalancer (ELB).An ELB has been created.You have connected an Ela", "doc_type":"usermanual2", "kw":"ECS,kubectl,How Do I Use kubectl to Set the Workload Access Type to LoadBalancer (ELB)?,Reference,Us", @@ -2162,7 +2192,7 @@ { "uri":"cce_faq_00190.html", "product_code":"cce", - "code":"217", + "code":"220", "des":"You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).Before using this feature, write a script that can format data disks and save it to ", "doc_type":"usermanual2", "kw":"How Do I Add a Second Data Disk to a Node in a CCE Cluster?,Reference,User Guide", @@ -2172,7 +2202,7 @@ { "uri":"cce_faq_00029.html", "product_code":"cce", - "code":"218", + "code":"221", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Workload Abnormalities", @@ -2182,7 +2212,7 @@ { "uri":"cce_faq_00134.html", "product_code":"cce", - "code":"219", + "code":"222", "des":"If a workload is running improperly, you can view events to determine the cause.On the CCE console, choose Workloads > Deployments or StatefulSets in the navigation pane ", "doc_type":"usermanual2", "kw":"Fault Locating and Troubleshooting for Abnormal Workloads,Workload Abnormalities,User Guide", @@ -2192,7 +2222,7 @@ { "uri":"cce_faq_00098.html", "product_code":"cce", - "code":"220", + "code":"223", "des":"Viewing K8s Event InformationCheck Item 1: Checking Whether a Node Is Available in the ClusterCheck Item 2: Checking Whether Node Resources (CPU and Memory) Are Sufficien", "doc_type":"usermanual2", "kw":"workload,InstanceSchedulingFailed,Failed to Schedule an Instance,Workload Abnormalities,User Guide", @@ -2202,7 +2232,7 @@ { "uri":"cce_faq_00015.html", "product_code":"cce", - "code":"221", + "code":"224", "des":"If the workload details page displays an event indicating that image pulling fails, perform the following operations to locate the fault:Check Item 1: Checking Whether im", "doc_type":"usermanual2", "kw":"workload,Failed to Pull an Image,Workload Abnormalities,User Guide", @@ -2212,7 +2242,7 @@ { "uri":"cce_faq_00018.html", "product_code":"cce", - "code":"222", + "code":"225", "des":"On the details page of a workload, if an event is displayed indicating that the container fails to be restarted, perform the following operations to locate the fault:Rect", "doc_type":"usermanual2", "kw":"Failed to Restart a Container,Workload Abnormalities,User Guide", @@ -2222,7 +2252,7 @@ { "uri":"cce_faq_00209.html", "product_code":"cce", - "code":"223", + "code":"226", "des":"Pod actions are classified into the following two types:kube-controller-manager periodically checks the status of all nodes. If a node is in the NotReady state for a peri", "doc_type":"usermanual2", "kw":"What Should I Do If An Evicted Pod Exists?,Workload Abnormalities,User Guide", @@ -2232,7 +2262,7 @@ { "uri":"cce_faq_00140.html", "product_code":"cce", - "code":"224", + "code":"227", "des":"When a node is faulty, pods on the node are evicted to ensure workload availability. If the pods are not evicted when the node is faulty, perform the following steps:Use ", "doc_type":"usermanual2", "kw":"Instance Eviction Exception,Workload Abnormalities,User Guide", @@ -2242,7 +2272,7 @@ { "uri":"cce_faq_00210.html", "product_code":"cce", - "code":"225", + "code":"228", "des":"When a node is in the Unavailable state, CCE migrates container pods on the node and sets the pods running on the node to the Terminating state.After the node is restored", "doc_type":"usermanual2", "kw":"What Should I Do If Pods in the Terminating State Cannot Be Deleted?,Workload Abnormalities,User Gui", @@ -2252,7 +2282,7 @@ { "uri":"cce_faq_00012.html", "product_code":"cce", - "code":"226", + "code":"229", "des":"The metadata.enable field in the YAML file of the workload is false. As a result, the pod of the workload is deleted and the workload is in the stopped status.The workloa", "doc_type":"usermanual2", "kw":"What Should I Do If a Workload Is Stopped Caused by Pod Deletion?,Workload Abnormalities,User Guide", @@ -2262,7 +2292,7 @@ { "uri":"cce_faq_00005.html", "product_code":"cce", - "code":"227", + "code":"230", "des":"The pod remains in the creating state for a long time, and the sandbox-related errors are reported.Select a troubleshooting method for your cluster:Clusters of V1.13 or l", "doc_type":"usermanual2", "kw":"What Should I Do If Sandbox-Related Errors Are Reported When the Pod Remains in the Creating State?,", @@ -2272,7 +2302,7 @@ { "uri":"cce_faq_00199.html", "product_code":"cce", - "code":"228", + "code":"231", "des":"Workload pods in the cluster fail and are being redeployed constantly.After the following command is run, the command output shows that many pods are in the evicted state", "doc_type":"usermanual2", "kw":"What Should I Do If a Pod Is in the Evicted State?,Workload Abnormalities,User Guide", @@ -2282,7 +2312,7 @@ { "uri":"cce_faq_00002.html", "product_code":"cce", - "code":"229", + "code":"232", "des":"If a node has sufficient memory resources, a container on this node can use more memory resources than requested, but no more than limited. If the memory allocated to a c", "doc_type":"usermanual2", "kw":"What Should I Do If the OOM Killer Is Triggered When a Container Uses Memory Resources More Than Lim", @@ -2292,53 +2322,23 @@ { "uri":"cce_faq_00202.html", "product_code":"cce", - "code":"230", + "code":"233", "des":"A workload can be accessed from public networks through a load balancer. LoadBalancer provides higher reliability than EIP-based NodePort because an EIP is no longer boun", "doc_type":"usermanual2", "kw":"What Should I Do If a Service Released in a Workload Cannot Be Accessed from Public Networks?,Refere", "title":"What Should I Do If a Service Released in a Workload Cannot Be Accessed from Public Networks?", "githuburl":"" }, - { - "uri":"cce_bestpractice_00162.html", - "product_code":"cce", - "code":"231", - "des":"CCE uses high-performance container networking add-ons, which support the tunnel network and VPC network models.After a cluster is created, the network model cannot be ch", - "doc_type":"usermanual2", - "kw":"Selecting a Network Model When Creating a Cluster on CCE,Reference,User Guide", - "title":"Selecting a Network Model When Creating a Cluster on CCE", - "githuburl":"" - }, - { - "uri":"cce_bestpractice_00004.html", - "product_code":"cce", - "code":"232", - "des":"Before creating a cluster on CCE, determine the number of VPCs, number of subnets, container CIDR blocks, and Services for access based on service requirements.This secti", - "doc_type":"usermanual2", - "kw":"Planning CIDR Blocks for a CCE Cluster,Reference,User Guide", - "title":"Planning CIDR Blocks for a CCE Cluster", - "githuburl":"" - }, { "uri":"cce_faq_00266.html", "product_code":"cce", - "code":"233", + "code":"234", "des":"A VPC is similar to a private local area network (LAN) managed by a home gateway whose IP address is 192.168.0.0/16. A VPC is a private network built on the cloud and pro", "doc_type":"usermanual2", "kw":"VPC,cluster,nodes,What Is the Relationship Between Clusters, VPCs, and Subnets?,Reference,User Guide", "title":"What Is the Relationship Between Clusters, VPCs, and Subnets?", "githuburl":"" }, - { - "uri":"cce_bestpractice_0107.html", - "product_code":"cce", - "code":"234", - "des":"For clusters of v1.15.11-r1 and later, the CSI everest add-on has taken over all functions of the fuxi FlexVolume driver (the storage-driver add-on) for container storage", - "doc_type":"usermanual2", - "kw":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?,Referen", - "title":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?", - "githuburl":"" - }, { "uri":"cce_faq_00265.html", "product_code":"cce", @@ -2350,11 +2350,451 @@ "githuburl":"" }, { - "uri":"cce_01_9999.html", + "uri":"cce_bestpractice.html", "product_code":"cce", "code":"236", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", + "kw":"Best Practice", + "title":"Best Practice", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00006.html", + "product_code":"cce", + "code":"237", + "des":"Security, efficiency, stability, and availability are common requirements on all cloud services. To meet these requirements, the system availability, data reliability, an", + "doc_type":"usermanual2", + "kw":"Checklist for Deploying Containerized Applications in the Cloud,Best Practice,User Guide", + "title":"Checklist for Deploying Containerized Applications in the Cloud", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00237.html", + "product_code":"cce", + "code":"238", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Migration", + "title":"Migration", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0306.html", + "product_code":"cce", + "code":"239", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Migrating On-premises Kubernetes Clusters to CCE", + "title":"Migrating On-premises Kubernetes Clusters to CCE", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0307.html", + "product_code":"cce", + "code":"240", + "des":"Containers are growing in popularity and Kubernetes simplifies containerized deployment. Many companies choose to build their own Kubernetes clusters. However, the O&M wo", + "doc_type":"usermanual2", + "kw":"Solution Overview,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Solution Overview", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0308.html", + "product_code":"cce", + "code":"241", + "des":"CCE allows you to customize cluster resources to meet various service requirements. Table 1 lists the key performance parameters of a cluster and provides the planned val", + "doc_type":"usermanual2", + "kw":"Planning Resources for the Target Cluster,Migrating On-premises Kubernetes Clusters to CCE,User Guid", + "title":"Planning Resources for the Target Cluster", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0309.html", + "product_code":"cce", + "code":"242", + "des":"If your migration does not involve resources outside a cluster listed in Table 1 or you do not need to use other services to update resources after the migration, skip th", + "doc_type":"usermanual2", + "kw":"Migrating Resources Outside a Cluster,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Migrating Resources Outside a Cluster", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0310.html", + "product_code":"cce", + "code":"243", + "des":"Velero is an open-source backup and migration tool for Kubernetes clusters. It integrates the persistent volume (PV) data backup capability of the Restic tool and can be ", + "doc_type":"usermanual2", + "kw":"Installing the Migration Tool,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Installing the Migration Tool", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0311.html", + "product_code":"cce", + "code":"244", + "des":"WordPress is used as an example to describe how to migrate an application from an on-premises Kubernetes cluster to a CCE cluster. The WordPress application consists of t", + "doc_type":"usermanual2", + "kw":"Migrating Resources in a Cluster,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Migrating Resources in a Cluster", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0312.html", + "product_code":"cce", + "code":"245", + "des":"The WordPress and MySQL images used in this example can be pulled from SWR. Therefore, the image pull failure (ErrImagePull) will not occur. If the application to be migr", + "doc_type":"usermanual2", + "kw":"Updating Resources Accordingly,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Updating Resources Accordingly", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0313.html", + "product_code":"cce", + "code":"246", + "des":"Cluster migration involves full migration of application data, which may cause intra-application adaptation problems. In this example, after the cluster is migrated, the ", + "doc_type":"usermanual2", + "kw":"Performing Additional Tasks,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Performing Additional Tasks", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0314.html", + "product_code":"cce", + "code":"247", + "des":"Both HostPath and Local volumes are local storage volumes. However, the Restic tool integrated in Velero cannot back up the PVs of the HostPath type and supports only the", + "doc_type":"usermanual2", + "kw":"Troubleshooting,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Troubleshooting", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0322.html", + "product_code":"cce", + "code":"248", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"DevOps", + "title":"DevOps", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0324.html", + "product_code":"cce", + "code":"249", + "des":"GitLab is an open-source version management system developed with Ruby on Rails for Git project repository management. It supports web-based access to public and private ", + "doc_type":"usermanual2", + "kw":"Interconnecting GitLab with SWR and CCE for CI/CD,DevOps,User Guide", + "title":"Interconnecting GitLab with SWR and CCE for CI/CD", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0323.html", + "product_code":"cce", + "code":"250", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Disaster Recovery", + "title":"Disaster Recovery", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00220.html", + "product_code":"cce", + "code":"251", + "des":"To achieve high availability for your CCE containers, you can do as follows:Deploy three master nodes for the cluster.When nodes are deployed across AZs, set custom sched", + "doc_type":"usermanual2", + "kw":"Implementing High Availability for Containers in CCE,Disaster Recovery,User Guide", + "title":"Implementing High Availability for Containers in CCE", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0315.html", + "product_code":"cce", + "code":"252", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Security", + "title":"Security", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0317.html", + "product_code":"cce", + "code":"253", + "des":"For security purposes, you are advised to configure a cluster as follows.Kubernetes releases a major version in about four months. CCE follows the same frequency as Kuber", + "doc_type":"usermanual2", + "kw":"Cluster Security,Security,User Guide", + "title":"Cluster Security", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0318.html", + "product_code":"cce", + "code":"254", + "des":"Do not bind an EIP to a node unless necessary to reduce the attack surface.If an EIP must be used, properly configure the firewall or security group rules to restrict acc", + "doc_type":"usermanual2", + "kw":"Node Security,Security,User Guide", + "title":"Node Security", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0319.html", + "product_code":"cce", + "code":"255", + "des":"The nodeSelector or nodeAffinity is used to limit the range of nodes to which applications can be scheduled, preventing the entire cluster from being threatened due to th", + "doc_type":"usermanual2", + "kw":"Container Security,Security,User Guide", + "title":"Container Security", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0320.html", + "product_code":"cce", + "code":"256", + "des":"Currently, CCE has configured static encryption for secret resources. The secrets created by users will be encrypted and stored in etcd of the CCE cluster. Secrets can be", + "doc_type":"usermanual2", + "kw":"Secret Security,Security,User Guide", + "title":"Secret Security", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0090.html", + "product_code":"cce", + "code":"257", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Auto Scaling", + "title":"Auto Scaling", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00282.html", + "product_code":"cce", + "code":"258", + "des":"The best way to handle surging traffic is to automatically adjust the number of machines based on the traffic volume or resource usage, which is called scaling.In CCE, th", + "doc_type":"usermanual2", + "kw":"Using HPA and CA for Auto Scaling of Workloads and Nodes,Auto Scaling,User Guide", + "title":"Using HPA and CA for Auto Scaling of Workloads and Nodes", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0050.html", + "product_code":"cce", + "code":"259", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Cluster", + "title":"Cluster", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00254.html", + "product_code":"cce", + "code":"260", + "des":"When you have multiple CCE clusters, you may find it difficult to efficiently connect to all of them.This section describes how to configure access to multiple clusters b", + "doc_type":"usermanual2", + "kw":"Connecting to Multiple Clusters Using kubectl,Cluster,User Guide", + "title":"Connecting to Multiple Clusters Using kubectl", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00190.html", + "product_code":"cce", + "code":"261", + "des":"You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).When creating a node in a cluster of v1.13.10 or later, if a data disk is not manage", + "doc_type":"usermanual2", + "kw":"Adding a Second Data Disk to a Node in a CCE Cluster,Cluster,User Guide", + "title":"Adding a Second Data Disk to a Node in a CCE Cluster", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0052.html", + "product_code":"cce", + "code":"262", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Networking", + "title":"Networking", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00004.html", + "product_code":"cce", + "code":"263", + "des":"Before creating a cluster on CCE, determine the number of VPCs, number of subnets, container CIDR blocks, and Services for access based on service requirements.This topic", + "doc_type":"usermanual2", + "kw":"Planning CIDR Blocks for a Cluster,Networking,User Guide", + "title":"Planning CIDR Blocks for a Cluster", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00162.html", + "product_code":"cce", + "code":"264", + "des":"CCE uses self-proprietary, high-performance container networking add-ons to support the tunnel network, Cloud Native Network 2.0, and VPC network models.After a cluster i", + "doc_type":"usermanual2", + "kw":"Selecting a Network Model,Networking,User Guide", + "title":"Selecting a Network Model", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00231.html", + "product_code":"cce", + "code":"265", + "des":"Session persistence is one of the most common while complex problems in load balancing.Session persistence is also called sticky sessions. After the sticky session functi", + "doc_type":"usermanual2", + "kw":"Implementing Sticky Session Through Load Balancing,Networking,User Guide", + "title":"Implementing Sticky Session Through Load Balancing", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00035.html", + "product_code":"cce", + "code":"266", + "des":"There may be different types of proxy servers between a client and a container server. How can a container obtain the real source IP address of the client? This section d", + "doc_type":"usermanual2", + "kw":"Obtaining the Client Source IP Address for a Container,Networking,User Guide", + "title":"Obtaining the Client Source IP Address for a Container", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0053.html", + "product_code":"cce", + "code":"267", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Storage", + "title":"Storage", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00198.html", + "product_code":"cce", + "code":"268", + "des":"A data disk is divided depending on the container storage Rootfs:Overlayfs: No independent thin pool is allocated. Image data is stored in the dockersys disk.# lsblk\nNAME", + "doc_type":"usermanual2", + "kw":"Expanding Node Disk Capacity,Storage,User Guide", + "title":"Expanding Node Disk Capacity", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00199.html", + "product_code":"cce", + "code":"269", + "des":"This section describes how to mount OBS buckets and OBS parallel file systems (preferred) of third-party tenants.The CCE cluster of a SaaS service provider needs to be mo", + "doc_type":"usermanual2", + "kw":"Mounting an Object Storage Bucket of a Third-Party Tenant,Storage,User Guide", + "title":"Mounting an Object Storage Bucket of a Third-Party Tenant", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00253_0.html", + "product_code":"cce", + "code":"270", + "des":"The minimum capacity of an SFS Turbo file system is 500 GB, and the SFS Turbo file system cannot be billed by usage. By default, the root directory of an SFS Turbo file s", + "doc_type":"usermanual2", + "kw":"Dynamically Creating and Mounting Subdirectories of an SFS Turbo File System,Storage,User Guide", + "title":"Dynamically Creating and Mounting Subdirectories of an SFS Turbo File System", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0107.html", + "product_code":"cce", + "code":"271", + "des":"In clusters later than v1.15.11-r1, CSI (the everest add-on) has taken over all functions of fuxi FlexVolume (the storage-driver add-on) for managing container storage. Y", + "doc_type":"usermanual2", + "kw":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?,Storage", + "title":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00281_0.html", + "product_code":"cce", + "code":"272", + "des":"When using storage resources in CCE, the most common method is to specify storageClassName to define the type of storage resources to be created when creating a PVC. The ", + "doc_type":"usermanual2", + "kw":"Custom Storage Classes,Storage,User Guide", + "title":"Custom Storage Classes", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00284.html", + "product_code":"cce", + "code":"273", + "des":"EVS disks cannot be attached across AZs. For example, EVS disks in AZ 1 cannot be attached to nodes in AZ 2.If the storage class csi-disk is used for StatefulSets, when a", + "doc_type":"usermanual2", + "kw":"Realizing Automatic Topology for EVS Disks When Nodes Are Deployed Across AZs (csi-disk-topology),St", + "title":"Realizing Automatic Topology for EVS Disks When Nodes Are Deployed Across AZs (csi-disk-topology)", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0051.html", + "product_code":"cce", + "code":"274", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Container", + "title":"Container", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00002.html", + "product_code":"cce", + "code":"275", + "des":"If a node has sufficient memory resources, a container on this node can use more memory resources than requested, but no more than limited. If the memory allocated to a c", + "doc_type":"usermanual2", + "kw":"Properly Allocating Container Computing Resources,Container,User Guide", + "title":"Properly Allocating Container Computing Resources", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00227.html", + "product_code":"cce", + "code":"276", + "des":"To access a Kubernetes cluster from a client, you can use the Kubernetes command line tool kubectl.Create a DaemonSet file.vi daemonSet.yamlAn example YAML file is provid", + "doc_type":"usermanual2", + "kw":"Modifying Kernel Parameters Using a Privileged Container,Container,User Guide", + "title":"Modifying Kernel Parameters Using a Privileged Container", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00228.html", + "product_code":"cce", + "code":"277", + "des":"Before containers running applications are started, one or some init containers are started first. If there are multiple init containers, they will be started in the defi", + "doc_type":"usermanual2", + "kw":"Initializing a Container,Container,User Guide", + "title":"Initializing a Container", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00226.html", + "product_code":"cce", + "code":"278", + "des":"If DNS or other related settings are inappropriate, you can use hostAliases to overwrite the resolution of the host name at the pod level when adding entries to the /etc/", + "doc_type":"usermanual2", + "kw":"Using hostAliases to Configure /etc/hosts in a Pod,Container,User Guide", + "title":"Using hostAliases to Configure /etc/hosts in a Pod", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0325.html", + "product_code":"cce", + "code":"279", + "des":"Linux allows you to create a core dump file if an application crashes, which contains the data the application had in memory at the time of the crash. You can analyze the", + "doc_type":"usermanual2", + "kw":"Configuring Core Dumps,Container,User Guide", + "title":"Configuring Core Dumps", + "githuburl":"" + }, + { + "uri":"cce_01_9999.html", + "product_code":"cce", + "code":"280", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", "kw":"Migrating Data from CCE 1.0 to CCE 2.0", "title":"Migrating Data from CCE 1.0 to CCE 2.0", "githuburl":"" @@ -2362,7 +2802,7 @@ { "uri":"cce_01_9998.html", "product_code":"cce", - "code":"237", + "code":"281", "des":"CCE 2.0 inherits and modifies the features of CCE 1.0, and release new features.Modified features:Clusters in CCE 1.0 are equivalent to Hybrid clusters in CCE 2.0.CCE 2.0", "doc_type":"usermanual2", "kw":"Differences Between CCE 1.0 and CCE 2.0,Migrating Data from CCE 1.0 to CCE 2.0,User Guide", @@ -2372,7 +2812,7 @@ { "uri":"cce_01_9997.html", "product_code":"cce", - "code":"238", + "code":"282", "des":"Migrate the images stored in the image repository of CCE 1.0 to CCE 2.0.A VM is available. The VM is bound to a public IP address and can access the Internet. Docker (ear", "doc_type":"usermanual2", "kw":"Migrating Images,Migrating Data from CCE 1.0 to CCE 2.0,User Guide", @@ -2382,7 +2822,7 @@ { "uri":"cce_01_9996.html", "product_code":"cce", - "code":"239", + "code":"283", "des":"Create Hybrid clusters on the CCE 2.0 console. These new Hybrid clusters should have the same specifications with those created on CCE 1.0.To create clusters using APIs, ", "doc_type":"usermanual2", "kw":"Migrating Clusters,Migrating Data from CCE 1.0 to CCE 2.0,User Guide", @@ -2392,7 +2832,7 @@ { "uri":"cce_01_9995.html", "product_code":"cce", - "code":"240", + "code":"284", "des":"This section describes how to create a Deployment with the same specifications as that in CCE 1.0 on the CCE 2.0 console.It is advised to delete the applications on CCE 1", "doc_type":"usermanual2", "kw":"Migrating Applications,Migrating Data from CCE 1.0 to CCE 2.0,User Guide", @@ -2402,7 +2842,7 @@ { "uri":"cce_01_0300.html", "product_code":"cce", - "code":"241", + "code":"285", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Change History,User Guide", diff --git a/docs/cce/umn/CLASS.TXT.json b/docs/cce/umn/CLASS.TXT.json index bc280604..afd0e381 100644 --- a/docs/cce/umn/CLASS.TXT.json +++ b/docs/cce/umn/CLASS.TXT.json @@ -21,7 +21,7 @@ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Product Bulletin", - "uri":"cce_01_0236.html", + "uri":"cce_bulletin_0000.html", "doc_type":"usermanual2", "p_code":"", "code":"3" @@ -36,7 +36,7 @@ "code":"4" }, { - "desc":"This section describes the Kubernetes version support mechanism of CCE.Version number: The format is x.y.z-r{n}, where x.y is the major version and z is the minor version", + "desc":"This section explains versioning in CCE, and the policies for Kubernetes version support.Version number: The format is x.y.z, where x.y is the major version and z is the ", "product_code":"cce", "title":"Kubernetes Version Support Mechanism", "uri":"cce_bulletin_0003.html", @@ -54,7 +54,7 @@ "code":"6" }, { - "desc":"CCE nodes in Hybrid clusters can run on EulerOS 2.2, EulerOS 2.5, EulerOS 2.9 and CentOS 7.7. The following table lists the supported patches for these OSs.The OS patches", + "desc":"CCE nodes in Hybrid clusters can run on EulerOS 2.5, EulerOS 2.9 and CentOS 7.7. The following table lists the supported patches for these OSs.The OS patches and verifica", "product_code":"cce", "title":"OS Patch Notes for Cluster Nodes", "uri":"cce_bulletin_0301.html", @@ -71,6 +71,15 @@ "p_code":"3", "code":"8" }, + { + "desc":"High-risk vulnerabilities:CCE fixes vulnerabilities as soon as possible after the Kubernetes community detects them and releases fixing solutions. The fixing policies are", + "product_code":"cce", + "title":"Vulnerability Fixing Policies", + "uri":"cce_bulletin_0011.html", + "doc_type":"usermanual2", + "p_code":"8", + "code":"9" + }, { "desc":"Recently, a security research team disclosed a privilege escalation vulnerability (CVE-2021-4034, also dubbed PwnKit) in PolKit's pkexec. Unprivileged users can gain full", "product_code":"cce", @@ -78,7 +87,7 @@ "uri":"CVE-2021-4034.html", "doc_type":"usermanual2", "p_code":"8", - "code":"9" + "code":"10" }, { "desc":"The Linux Kernel SACK vulnerabilities have been fixed. This section describes the solution to these vulnerabilities.On June 18, 2019, Red Hat released a security notice, ", @@ -87,7 +96,16 @@ "uri":"cce_bulletin_0206.html", "doc_type":"usermanual2", "p_code":"8", - "code":"10" + "code":"11" + }, + { + "desc":"In clusters earlier than v1.21, a token is obtained by mounting the secret of the service account to a pod. Tokens obtained this way are permanent. This approach is no lo", + "product_code":"cce", + "title":"Service Account Token Security Improvement", + "uri":"cce_10_0477.html", + "doc_type":"usermanual2", + "p_code":"3", + "code":"12" }, { "desc":"CCE works closely with multiple cloud services to support computing, storage, networking, and monitoring functions. When you log in to the CCE console for the first time,", @@ -96,1772 +114,1790 @@ "uri":"cce_01_9994.html", "doc_type":"usermanual2", "p_code":"", - "code":"11" + "code":"13" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Clusters", - "uri":"cce_01_0027.html", + "uri":"cce_10_0091.html", "doc_type":"usermanual2", "p_code":"", - "code":"12" + "code":"14" }, { - "desc":"Kubernetes is a containerized application software system that can be easily deployed and managed. It facilitates container scheduling and orchestration.For application d", + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Cluster Overview", - "uri":"cce_01_0002.html", + "uri":"cce_10_0002.html", "doc_type":"usermanual2", - "p_code":"12", - "code":"13" + "p_code":"14", + "code":"15" + }, + { + "desc":"Kubernetes allows you to easily deploy and manage containerized application and facilitates container scheduling and orchestration.For developers, Kubernetes is a cluster", + "product_code":"cce", + "title":"Basic Cluster Information", + "uri":"cce_10_0430.html", + "doc_type":"usermanual2", + "p_code":"15", + "code":"16" }, { "desc":"The following table lists the differences between CCE Turbo clusters and CCE clusters:The QingTian architecture consists of data plane (software-hardware synergy) and man", "product_code":"cce", "title":"CCE Turbo Clusters and CCE Clusters", - "uri":"cce_01_0342.html", + "uri":"cce_10_0342.html", "doc_type":"usermanual2", - "p_code":"12", - "code":"14" + "p_code":"15", + "code":"17" + }, + { + "desc":"kube-proxy is a key component of a Kubernetes cluster. It is responsible for load balancing and forwarding between a Service and its backend pod.CCE supports two forwardi", + "product_code":"cce", + "title":"Comparing iptables and IPVS", + "uri":"cce_10_0349.html", + "doc_type":"usermanual2", + "p_code":"15", + "code":"18" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Release Notes", + "uri":"cce_10_0068.html", + "doc_type":"usermanual2", + "p_code":"15", + "code":"19" + }, + { + "desc":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.25.Kubernetes 1", + "product_code":"cce", + "title":"CCE Kubernetes 1.25 Release Notes", + "uri":"cce_10_0467.html", + "doc_type":"usermanual2", + "p_code":"19", + "code":"20" + }, + { + "desc":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.23.Changes in C", + "product_code":"cce", + "title":"CCE Kubernetes 1.23 Release Notes", + "uri":"cce_10_0468.html", + "doc_type":"usermanual2", + "p_code":"19", + "code":"21" + }, + { + "desc":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.21.Kubernetes 1", + "product_code":"cce", + "title":"CCE Kubernetes 1.21 Release Notes", + "uri":"cce_10_0469.html", + "doc_type":"usermanual2", + "p_code":"19", + "code":"22" + }, + { + "desc":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.19.Kubernetes 1", + "product_code":"cce", + "title":"CCE Kubernetes 1.19 Release Notes", + "uri":"cce_10_0470.html", + "doc_type":"usermanual2", + "p_code":"19", + "code":"23" + }, + { + "desc":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.17.All resource", + "product_code":"cce", + "title":"CCE Kubernetes 1.17 Release Notes", + "uri":"cce_10_0471.html", + "doc_type":"usermanual2", + "p_code":"19", + "code":"24" }, { "desc":"CCE Turbo clusters run on a cloud native infrastructure that features software-hardware synergy to support passthrough networking, high security and reliability, and inte", "product_code":"cce", "title":"Creating a CCE Turbo Cluster", - "uri":"cce_01_0298.html", + "uri":"cce_10_0298.html", "doc_type":"usermanual2", - "p_code":"12", - "code":"15" + "p_code":"14", + "code":"25" }, { "desc":"On the CCE console, you can easily create Kubernetes clusters. Kubernetes can manage container clusters at scale. A cluster manages a group of node resources.In CCE, you ", "product_code":"cce", "title":"Creating a CCE Cluster", - "uri":"cce_01_0028.html", + "uri":"cce_10_0028.html", "doc_type":"usermanual2", - "p_code":"12", - "code":"16" + "p_code":"14", + "code":"26" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Using kubectl to Run a Cluster", - "uri":"cce_01_0140.html", + "uri":"cce_10_0140.html", "doc_type":"usermanual2", - "p_code":"12", - "code":"17" + "p_code":"14", + "code":"27" }, { "desc":"This section uses a CCE cluster as an example to describe how to connect to a CCE cluster using kubectl.When you access a cluster using kubectl, CCE uses thekubeconfig.js", "product_code":"cce", "title":"Connecting to a Cluster Using kubectl", - "uri":"cce_01_0107.html", + "uri":"cce_10_0107.html", "doc_type":"usermanual2", - "p_code":"17", - "code":"18" + "p_code":"27", + "code":"28" + }, + { + "desc":"A Subject Alternative Name (SAN) can be signed in to a cluster server certificate. A SAN is usually used by the client to verify the server validity in TLS handshakes. Sp", + "product_code":"cce", + "title":"Customizing a Cluster Certificate SAN", + "uri":"cce_10_0367.html", + "doc_type":"usermanual2", + "p_code":"27", + "code":"29" }, { "desc":"getThe get command displays one or many resources of a cluster.This command prints a table of the most important information about all resources, including cluster nodes,", "product_code":"cce", "title":"Common kubectl Commands", - "uri":"cce_01_0139.html", + "uri":"cce_10_0139.html", "doc_type":"usermanual2", - "p_code":"17", - "code":"19" - }, - { - "desc":"Before running kubectl commands, you should have the kubectl development skills and understand the kubectl operations. For details, see Kubernetes API and kubectl CLI.Go ", - "product_code":"cce", - "title":"kubectl Usage Guide", - "uri":"cce_01_0023.html", - "doc_type":"usermanual2", - "p_code":"17", - "code":"20" - }, - { - "desc":"The Cluster Auto Scaling feature allows CCE to automatically scale out a cluster (adding worker nodes to a cluster) according to custom policies when workloads cannot be ", - "product_code":"cce", - "title":"Setting Cluster Auto Scaling", - "uri":"cce_01_0157.html", - "doc_type":"usermanual2", - "p_code":"12", - "code":"21" + "p_code":"27", + "code":"30" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Upgrading a Cluster", - "uri":"cce_01_0215.html", + "uri":"cce_10_0215.html", "doc_type":"usermanual2", - "p_code":"12", - "code":"22" + "p_code":"14", + "code":"31" }, { "desc":"To enable interoperability from one Kubernetes installation to the next, you must upgrade your Kubernetes clusters before the maintenance period ends.After the latest Kub", "product_code":"cce", - "title":"Overview", - "uri":"cce_01_0197.html", + "title":"Upgrade Overview", + "uri":"cce_10_0197.html", "doc_type":"usermanual2", - "p_code":"22", - "code":"23" + "p_code":"31", + "code":"32" }, { - "desc":"Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Overview.Upgraded clusters ca", + "desc":"Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Upgrade Overview.Upgraded clu", "product_code":"cce", "title":"Before You Start", - "uri":"cce_01_0302.html", + "uri":"cce_10_0302.html", "doc_type":"usermanual2", - "p_code":"22", - "code":"24" + "p_code":"31", + "code":"33" }, { "desc":"You can upgrade your clusters to a newer Kubernetes version on the CCE console.Before the upgrade, learn about the target version to which each CCE cluster can be upgrade", "product_code":"cce", - "title":"Performing Replace/Rolling Upgrade (v1.13 and Earlier)", - "uri":"cce_01_0120.html", + "title":"Performing Replace/Rolling Upgrade", + "uri":"cce_10_0120.html", "doc_type":"usermanual2", - "p_code":"22", - "code":"25" + "p_code":"31", + "code":"34" }, { - "desc":"On the CCE console, You can perform an in-place cluster upgrade to use new cluster features.Before the upgrade, learn about the target version to which each CCE cluster c", + "desc":"You can upgrade your clusters to a newer version on the CCE console.Before the upgrade, learn about the target version to which each CCE cluster can be upgraded in what w", "product_code":"cce", - "title":"Performing In-place Upgrade (v1.15 and Later)", - "uri":"cce_01_0301.html", + "title":"Performing In-place Upgrade", + "uri":"cce_10_0301.html", "doc_type":"usermanual2", - "p_code":"22", - "code":"26" + "p_code":"31", + "code":"35" }, { "desc":"This section describes how to migrate services from a cluster of an earlier version to a cluster of a later version in CCE.This operation is applicable when a cross-versi", "product_code":"cce", "title":"Migrating Services Across Clusters of Different Versions", - "uri":"cce_01_0210.html", + "uri":"cce_10_0210.html", "doc_type":"usermanual2", - "p_code":"22", - "code":"27" - }, - { - "desc":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. To enable interoperability from one Kubernetes installation to the nex", - "product_code":"cce", - "title":"CCE Kubernetes Release Notes", - "uri":"cce_01_0068.html", - "doc_type":"usermanual2", - "p_code":"22", - "code":"28" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"cce", - "title":"Managing a Cluster", - "uri":"cce_01_0031.html", - "doc_type":"usermanual2", - "p_code":"12", - "code":"29" - }, - { - "desc":"This section describes how to delete a cluster.Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workl", - "product_code":"cce", - "title":"Deleting a Cluster", - "uri":"cce_01_0212.html", - "doc_type":"usermanual2", - "p_code":"29", - "code":"30" - }, - { - "desc":"If you do not need to use a cluster temporarily, you are advised to hibernate the cluster to save cluster management costs.After a cluster is hibernated, resources such a", - "product_code":"cce", - "title":"Hibernating and Waking Up a Cluster", - "uri":"cce_01_0214.html", - "doc_type":"usermanual2", - "p_code":"29", - "code":"31" - }, - { - "desc":"CCE clusters allow you to manage Kubernetes parameters, through which you can let core components work under your very requirements.This function is supported only in clu", - "product_code":"cce", - "title":"Configuring Kubernetes Parameters", - "uri":"cce_01_0213.html", - "doc_type":"usermanual2", - "p_code":"29", - "code":"32" - }, - { - "desc":"Before accessing cluster resources through open-source Kubernetes APIs, obtain the cluster's certificate.The downloaded certificate contains three files: client.key, clie", - "product_code":"cce", - "title":"Obtaining a Cluster Certificate", - "uri":"cce_01_0175.html", - "doc_type":"usermanual2", - "p_code":"12", - "code":"33" - }, - { - "desc":"This section describes how to control permissions on resources in a cluster, for example, allow user A to read and write application data in a namespace, and user B to on", - "product_code":"cce", - "title":"Controlling Cluster Permissions", - "uri":"cce_01_0085.html", - "doc_type":"usermanual2", - "p_code":"12", - "code":"34" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"cce", - "title":"Cluster Parameters", - "uri":"cce_01_0347.html", - "doc_type":"usermanual2", - "p_code":"12", - "code":"35" - }, - { - "desc":"The maximum number of pods that can be created on a node is determined by the following parameters:Number of container IP addresses that can be allocated on a node (alpha", - "product_code":"cce", - "title":"Maximum Number of Pods That Can Be Created on a Node", - "uri":"cce_01_0348.html", - "doc_type":"usermanual2", - "p_code":"35", + "p_code":"31", "code":"36" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Nodes", - "uri":"cce_01_0183.html", + "title":"Managing a Cluster", + "uri":"cce_10_0031.html", "doc_type":"usermanual2", - "p_code":"", + "p_code":"14", "code":"37" }, { - "desc":"A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (P", + "desc":"CCE allows you to manage cluster parameters, through which you can let core components work under your very requirements.This function is supported only in clusters of v1", "product_code":"cce", - "title":"Overview", - "uri":"cce_01_0180.html", + "title":"Managing Cluster Components", + "uri":"cce_10_0213.html", "doc_type":"usermanual2", "p_code":"37", "code":"38" }, { - "desc":"A node is a virtual or physical machine that provides computing resources. Sufficient nodes must be available in your project to ensure that operations, such as creating ", + "desc":"This section describes how to delete a cluster.Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workl", "product_code":"cce", - "title":"Creating a Node", - "uri":"cce_01_0033.html", + "title":"Deleting a Cluster", + "uri":"cce_10_0212.html", "doc_type":"usermanual2", "p_code":"37", "code":"39" }, { - "desc":"At least one CCE Turbo cluster is available. For details on how to create a cluster, see Creating a CCE Turbo Cluster.A key pair has been created for identity authenticat", + "desc":"If you do not need to use a cluster temporarily, you are advised to hibernate the cluster.After a cluster is hibernated, resources such as workloads cannot be created or ", "product_code":"cce", - "title":"Creating a Node in a CCE Turbo Cluster", - "uri":"cce_01_0363.html", + "title":"Hibernating and Waking Up a Cluster", + "uri":"cce_10_0214.html", "doc_type":"usermanual2", "p_code":"37", "code":"40" }, { - "desc":"Removing a node from a cluster in CCE will re-install the node OS and clear CCE components on the node.Removing a node will not delete the server (ECS) corresponding to t", + "desc":"If overload control is enabled, concurrent requests are dynamically controlled based on the resource pressure of master nodes to keep them and the cluster available.The c", "product_code":"cce", - "title":"Removing a Node", - "uri":"cce_01_0338.html", + "title":"Cluster Overload Control", + "uri":"cce_10_0602.html", "doc_type":"usermanual2", "p_code":"37", "code":"41" }, { - "desc":"If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).Only login to a running ECS is allowed.Only the user linux can l", + "desc":"This section describes how to obtain the cluster certificate from the console and use it to access Kubernetes clusters.The downloaded certificate contains three files: cl", "product_code":"cce", - "title":"Logging In to a Node", - "uri":"cce_01_0185.html", + "title":"Obtaining a Cluster Certificate", + "uri":"cce_10_0175.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"14", "code":"42" }, { - "desc":"You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.", + "desc":"CCE allows you to change the number of nodes managed in a cluster.This function is supported for clusters of v1.15 and later versions.Starting from v1.15.11, the number o", "product_code":"cce", - "title":"Managing Node Labels", - "uri":"cce_01_0004.html", + "title":"Changing Cluster Scale", + "uri":"cce_10_0403.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"14", "code":"43" }, { - "desc":"Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required.Some inf", + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Synchronizing Node Data", - "uri":"cce_01_0184.html", + "title":"Nodes", + "uri":"cce_10_0183.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"", "code":"44" }, { - "desc":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.A taint is a key-value pair associated with an effect. The following ef", + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Configuring Node Scheduling (Tainting)", - "uri":"cce_01_0352.html", + "title":"Node Overview", + "uri":"cce_10_0180.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"44", "code":"45" }, { - "desc":"You can reset a node to modify the node configuration, such as the node OS and login mode.Resetting a node will reinstall the node OS and the Kubernetes software on the n", + "desc":"A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (P", "product_code":"cce", - "title":"Resetting a Node", - "uri":"cce_01_0003.html", + "title":"Precautions for Using a Node", + "uri":"cce_10_0461.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"45", "code":"46" }, { - "desc":"When a node in a CCE cluster is deleted, services running on the node will also be deleted. Exercise caution when performing this operation.After a CCE cluster is deleted", + "desc":"Container engines, one of the most important components of Kubernetes, manage the lifecycle of images and containers. The kubelet interacts with a container runtime throu", "product_code":"cce", - "title":"Deleting a Node", - "uri":"cce_01_0186.html", + "title":"Container Engine", + "uri":"cce_10_0462.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"45", "code":"47" }, { - "desc":"After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not resu", + "desc":"The most significant difference is that each Kata container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualiz", "product_code":"cce", - "title":"Stopping a Node", - "uri":"cce_01_0036.html", + "title":"Kata Containers and Common Containers", + "uri":"cce_10_0463.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"45", "code":"48" }, { - "desc":"In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.The o", + "desc":"The maximum number of pods that can be created on a node is determined by the following parameters:Number of container IP addresses that can be allocated on a node (alpha", "product_code":"cce", - "title":"Performing Rolling Upgrade for Nodes", - "uri":"cce_01_0276.html", + "title":"Maximum Number of Pods That Can Be Created on a Node", + "uri":"cce_10_0348.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"45", "code":"49" }, { "desc":"Some of the resources on the node need to run some necessary Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total num", "product_code":"cce", "title":"Formula for Calculating the Reserved Resources of a Node", - "uri":"cce_01_0178.html", + "uri":"cce_10_0178.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"45", "code":"50" }, { - "desc":"This section describes how to check whether there are available raw disks and Linux LVM disk partitions and how to create Linux LVM disk partitions.To improve the system ", + "desc":"This section describes how to allocate data disk space.When creating a node, you need to configure a data disk whose capacity is greater than or equal to 100GB for the no", "product_code":"cce", - "title":"Creating a Linux LVM Disk Partition for Docker", - "uri":"cce_01_0200.html", + "title":"Data Disk Space Allocation", + "uri":"cce_10_0341.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"45", "code":"51" }, { - "desc":"When creating a node, you need to configure data disks for the node.The data disk is divided into Kubernetes space and user space. The user space defines the space that i", + "desc":"At least one cluster has been created.A key pair has been created for identity authentication upon remote node login.The node has 2-core or higher CPU, 4 GB or larger mem", "product_code":"cce", - "title":"Data Disk Space Allocation", - "uri":"cce_01_0341.html", + "title":"Creating a Node", + "uri":"cce_10_0363.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"44", "code":"52" }, { - "desc":"You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).When creating a node in a cluster of v1.13.10 or later, if a data disk is not manage", + "desc":"In CCE, you can Creating a Node or add existing nodes (ECSs) into your cluster.While an ECS is being accepted into a cluster, the operating system of the ECS will be rese", "product_code":"cce", - "title":"Adding a Second Data Disk to a Node in a CCE Cluster", - "uri":"cce_01_0344.html", + "title":"Adding Nodes for Management", + "uri":"cce_10_0198.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"44", "code":"53" }, + { + "desc":"Removing a node from a cluster will re-install the node OS and clear CCE components on the node.Removing a node will not delete the server corresponding to the node. You ", + "product_code":"cce", + "title":"Removing a Node", + "uri":"cce_10_0338.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"54" + }, + { + "desc":"You can reset a node to modify the node configuration, such as the node OS and login mode.Resetting a node will reinstall the node OS and the Kubernetes software on the n", + "product_code":"cce", + "title":"Resetting a Node", + "uri":"cce_10_0003.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"55" + }, + { + "desc":"If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).Only login to a running ECS is allowed.Only the user linux can l", + "product_code":"cce", + "title":"Logging In to a Node", + "uri":"cce_10_0185.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"56" + }, + { + "desc":"You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.", + "product_code":"cce", + "title":"Managing Node Labels", + "uri":"cce_10_0004.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"57" + }, + { + "desc":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.A taint is a key-value pair associated with an effect. The following ef", + "product_code":"cce", + "title":"Managing Node Taints", + "uri":"cce_10_0352.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"58" + }, + { + "desc":"Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required.Some inf", + "product_code":"cce", + "title":"Synchronizing Data with Cloud Servers", + "uri":"cce_10_0184.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"59" + }, + { + "desc":"When a node in a CCE cluster is deleted, services running on the node will also be deleted. Exercise caution when performing this operation.After a CCE cluster is deleted", + "product_code":"cce", + "title":"Deleting a Node", + "uri":"cce_10_0186.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"60" + }, + { + "desc":"After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not resu", + "product_code":"cce", + "title":"Stopping a Node", + "uri":"cce_10_0036.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"61" + }, + { + "desc":"In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.The o", + "product_code":"cce", + "title":"Performing Rolling Upgrade for Nodes", + "uri":"cce_10_0276.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"62" + }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Node Pools", - "uri":"cce_01_0035.html", + "uri":"cce_10_0035.html", "doc_type":"usermanual2", "p_code":"", - "code":"54" + "code":"63" }, { "desc":"CCE introduces node pools to help you better manage nodes in Kubernetes clusters. A node pool contains one node or a group of nodes with identical configuration in a clus", "product_code":"cce", "title":"Node Pool Overview", - "uri":"cce_01_0081.html", + "uri":"cce_10_0081.html", "doc_type":"usermanual2", - "p_code":"54", - "code":"55" + "p_code":"63", + "code":"64" }, { - "desc":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.For details abou", + "desc":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.The autoscaler a", "product_code":"cce", "title":"Creating a Node Pool", - "uri":"cce_01_0012.html", + "uri":"cce_10_0012.html", "doc_type":"usermanual2", - "p_code":"54", - "code":"56" + "p_code":"63", + "code":"65" }, { "desc":"The default node pool DefaultPool does not support the following management operations.CCE allows you to highly customize Kubernetes parameter settings on core components", "product_code":"cce", "title":"Managing a Node Pool", - "uri":"cce_01_0222.html", + "uri":"cce_10_0222.html", "doc_type":"usermanual2", - "p_code":"54", - "code":"57" + "p_code":"63", + "code":"66" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Workloads", - "uri":"cce_01_0046.html", + "uri":"cce_10_0046.html", "doc_type":"usermanual2", "p_code":"", - "code":"58" + "code":"67" }, { "desc":"A workload is an application running on Kubernetes. No matter how many components are there in your workload, you can run it in a group of Kubernetes pods. A workload is ", "product_code":"cce", "title":"Overview", - "uri":"cce_01_0006.html", + "uri":"cce_10_0006.html", "doc_type":"usermanual2", - "p_code":"58", - "code":"59" + "p_code":"67", + "code":"68" }, { "desc":"Deployments are workloads (for example, Nginx) that do not store any data or status. You can create Deployments on the CCE console or by running kubectl commands.Before c", "product_code":"cce", "title":"Creating a Deployment", - "uri":"cce_01_0047.html", + "uri":"cce_10_0047.html", "doc_type":"usermanual2", - "p_code":"58", - "code":"60" + "p_code":"67", + "code":"69" }, { "desc":"StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.A conta", "product_code":"cce", "title":"Creating a StatefulSet", - "uri":"cce_01_0048.html", + "uri":"cce_10_0048.html", "doc_type":"usermanual2", - "p_code":"58", - "code":"61" + "p_code":"67", + "code":"70" }, { "desc":"CCE provides deployment and management capabilities for multiple types of containers and supports features of container workloads, including creation, configuration, moni", "product_code":"cce", "title":"Creating a DaemonSet", - "uri":"cce_01_0216.html", + "uri":"cce_10_0216.html", "doc_type":"usermanual2", - "p_code":"58", - "code":"62" + "p_code":"67", + "code":"71" }, { "desc":"Jobs are short-lived and run for a certain time to completion. They can be executed immediately after being deployed. It is completed after it exits normally (exit 0).A j", "product_code":"cce", "title":"Creating a Job", - "uri":"cce_01_0150.html", + "uri":"cce_10_0150.html", "doc_type":"usermanual2", - "p_code":"58", - "code":"63" + "p_code":"67", + "code":"72" }, { "desc":"A cron job runs on a repeating schedule. You can perform time synchronization for all active nodes at a fixed time point.A cron job runs periodically at the specified tim", "product_code":"cce", "title":"Creating a Cron Job", - "uri":"cce_01_0151.html", + "uri":"cce_10_0151.html", "doc_type":"usermanual2", - "p_code":"58", - "code":"64" - }, - { - "desc":"A pod is the smallest and simplest unit in the Kubernetes object model that you create or deploy. A pod encapsulates an application's container (or, in some cases, multip", - "product_code":"cce", - "title":"Managing Pods", - "uri":"cce_01_0013.html", - "doc_type":"usermanual2", - "p_code":"58", - "code":"65" - }, - { - "desc":"After a workload is created, you can scale, upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.Workload/Job managementOperationDescription", - "product_code":"cce", - "title":"Managing Workloads and Jobs", - "uri":"cce_01_0007.html", - "doc_type":"usermanual2", - "p_code":"58", - "code":"66" - }, - { - "desc":"After scaling policies are defined, pods can be automatically added or deleted based on resource changes, fixed time, and fixed periods. You do not need to manually adjus", - "product_code":"cce", - "title":"Scaling a Workload", - "uri":"cce_01_0057.html", - "doc_type":"usermanual2", - "p_code":"58", - "code":"67" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"cce", - "title":"Configuring a Container", - "uri":"cce_01_0130.html", - "doc_type":"usermanual2", - "p_code":"58", - "code":"68" - }, - { - "desc":"CCE allows you to create workloads using images pulled from third-party image repositories.Generally, a third-party image repository can be accessed only after authentica", - "product_code":"cce", - "title":"Using a Third-Party Image", - "uri":"cce_01_0009.html", - "doc_type":"usermanual2", - "p_code":"68", - "code":"69" - }, - { - "desc":"CCE allows you to set resource limits for added containers during workload creation. You can request and limit the CPU and memory quotas used by each pod in the workload.", - "product_code":"cce", - "title":"Setting Container Specifications", - "uri":"cce_01_0163.html", - "doc_type":"usermanual2", - "p_code":"68", - "code":"70" - }, - { - "desc":"CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before sto", - "product_code":"cce", - "title":"Setting Container Lifecycle Parameters", - "uri":"cce_01_0105.html", - "doc_type":"usermanual2", - "p_code":"68", - "code":"71" - }, - { - "desc":"When creating a workload or job, you can use an image to specify the processes running in the container.By default, the image runs the default command. To run a specific ", - "product_code":"cce", - "title":"Setting Container Startup Commands", - "uri":"cce_01_0008.html", - "doc_type":"usermanual2", - "p_code":"68", - "code":"72" - }, - { - "desc":"Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect service except", - "product_code":"cce", - "title":"Setting Health Check for a Container", - "uri":"cce_01_0112.html", - "doc_type":"usermanual2", - "p_code":"68", + "p_code":"67", "code":"73" }, { - "desc":"An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deploy", + "desc":"After a workload is created, you can upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.Workload/Job managementOperationDescriptionMonitor", "product_code":"cce", - "title":"Setting an Environment Variable", - "uri":"cce_01_0113.html", + "title":"Managing Workloads and Jobs", + "uri":"cce_10_0007.html", "doc_type":"usermanual2", - "p_code":"68", + "p_code":"67", "code":"74" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Affinity and Anti-Affinity Scheduling", - "uri":"cce_01_0149.html", + "title":"Configuring a Container", + "uri":"cce_10_0130.html", "doc_type":"usermanual2", - "p_code":"", + "p_code":"67", "code":"75" }, { - "desc":"CCE supports custom and simple scheduling policies. A custom scheduling policy allows you to customize node affinity, workload affinity, and workload anti-affinity to mee", + "desc":"A workload is an abstract model of a group of pods. One pod can encapsulate one or more containers. You can click Add Container in the upper right corner to add multiple ", "product_code":"cce", - "title":"Scheduling Policy Overview", - "uri":"cce_01_0051.html", + "title":"Setting Basic Container Information", + "uri":"cce_10_0396.html", "doc_type":"usermanual2", "p_code":"75", "code":"76" }, { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "desc":"CCE allows you to create workloads using images pulled from third-party image repositories.Generally, a third-party image repository can be accessed only after authentica", "product_code":"cce", - "title":"Custom Scheduling Policies", - "uri":"cce_01_0231.html", + "title":"Using a Third-Party Image", + "uri":"cce_10_0009.html", "doc_type":"usermanual2", "p_code":"75", "code":"77" }, { - "desc":"This section uses Nginx as an example to describe how to configure node affinity.PrerequisitesA workload that uses the nginx container image has been deployed on a node.P", + "desc":"CCE allows you to set resource limits for added containers during workload creation. You can apply for and limit the CPU and memory quotas used by each pod in a workload.", "product_code":"cce", - "title":"Node Affinity", - "uri":"cce_01_0232.html", + "title":"Setting Container Specifications", + "uri":"cce_10_0163.html", "doc_type":"usermanual2", - "p_code":"77", + "p_code":"75", "code":"78" }, { - "desc":"Workload affinity determines the pods as which the target workload will be deployed in the same topology domain.There are two types of pod affinity rules: Required (hard ", + "desc":"CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before sto", "product_code":"cce", - "title":"Workload Affinity", - "uri":"cce_01_0233.html", + "title":"Setting Container Lifecycle Parameters", + "uri":"cce_10_0105.html", "doc_type":"usermanual2", - "p_code":"77", + "p_code":"75", "code":"79" }, { - "desc":"Workload anti-affinity determines the pods from which the target workload will be deployed in a different topology domain.There are two types of pod anti-affinity rules: ", + "desc":"Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect application ex", "product_code":"cce", - "title":"Workload Anti-Affinity", - "uri":"cce_01_0234.html", + "title":"Setting Health Check for a Container", + "uri":"cce_10_0112.html", "doc_type":"usermanual2", - "p_code":"77", + "p_code":"75", "code":"80" }, { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "desc":"An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deploy", "product_code":"cce", - "title":"Simple Scheduling Policies", - "uri":"cce_01_0230.html", + "title":"Setting an Environment Variable", + "uri":"cce_10_0113.html", "doc_type":"usermanual2", "p_code":"75", "code":"81" }, { - "desc":"The created workload will be deployed in the selected AZ.This section uses an Nginx workload as an example to describe how to create a workload using kubectl.Prerequisite", + "desc":"When a workload is created, the container image is pulled from the image repository to the node. The image is also pulled when the workload is restarted or upgraded.By de", "product_code":"cce", - "title":"Workload-AZ Affinity", - "uri":"cce_01_0228.html", + "title":"Configuring an Image Pull Policy", + "uri":"cce_10_0353.html", "doc_type":"usermanual2", - "p_code":"81", + "p_code":"75", "code":"82" }, { - "desc":"The created workload is not deployed on the selected AZ.This section uses Nginx as an example to describe how to create a workload using kubectl.PrerequisitesThe ECS wher", + "desc":"When creating a workload, you can configure containers to use the same time zone as the node. You can enable time zone synchronization when creating a workload.The time z", "product_code":"cce", - "title":"Workload-AZ Anti-Affinity", - "uri":"cce_01_0229.html", + "title":"Configuring Time Zone Synchronization", + "uri":"cce_10_0354.html", "doc_type":"usermanual2", - "p_code":"81", + "p_code":"75", "code":"83" }, { - "desc":"If you select multiple nodes, the system automatically chooses one of them during workload deployment.This section uses an Nginx workload as an example to describe how to", + "desc":"In actual applications, upgrade is a common operation. A Deployment, StatefulSet, or DaemonSet can easily support application upgrade.You can set different upgrade polici", "product_code":"cce", - "title":"Workload-Node Affinity", - "uri":"cce_01_0225.html", + "title":"Configuring the Workload Upgrade Policy", + "uri":"cce_10_0397.html", "doc_type":"usermanual2", - "p_code":"81", + "p_code":"75", "code":"84" }, { - "desc":"If you select multiple nodes, the workload will not be deployed on these nodes.This section uses Nginx as an example to describe how to create a workload using kubectl.Pr", + "desc":"A nodeSelector provides a very simple way to constrain pods to nodes with particular labels, as mentioned in Creating a DaemonSet. The affinity and anti-affinity feature ", "product_code":"cce", - "title":"Workload-Node Anti-Affinity", - "uri":"cce_01_0226.html", + "title":"Scheduling Policy (Affinity/Anti-affinity)", + "uri":"cce_10_0232.html", "doc_type":"usermanual2", - "p_code":"81", + "p_code":"75", "code":"85" }, { - "desc":"The workload to be created will be deployed on the same node as the selected affinity workloads.This section uses Nginx as an example to describe how to create a workload", + "desc":"You can use GPUs in CCE containers.A GPU node has been created. For details, see Creating a Node.The gpu-beta add-on has been installed. During the installation, select t", "product_code":"cce", - "title":"Workload-Workload Affinity", - "uri":"cce_01_0220.html", + "title":"GPU Scheduling", + "uri":"cce_10_0345.html", "doc_type":"usermanual2", - "p_code":"81", + "p_code":"67", "code":"86" }, - { - "desc":"The workload to be created and the selected workloads will be deployed on different nodes.This section uses Nginx as an example to describe how to create a workload using", - "product_code":"cce", - "title":"Workload-Workload Anti-Affinity", - "uri":"cce_01_0227.html", - "doc_type":"usermanual2", - "p_code":"81", - "code":"87" - }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Networking", - "uri":"cce_01_0020.html", + "title":"CPU Core Binding", + "uri":"cce_10_0551.html", "doc_type":"usermanual2", - "p_code":"", + "p_code":"67", + "code":"87" + }, + { + "desc":"By default, kubelet uses CFS quotas to enforce pod CPU limits. When the node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether t", + "product_code":"cce", + "title":"Binding CPU Cores", + "uri":"cce_10_0351.html", + "doc_type":"usermanual2", + "p_code":"87", "code":"88" }, { - "desc":"You can learn about a cluster network from the following two aspects:What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are ru", + "desc":"CCE allows you to add annotations to a YAML file to realize some advanced pod functions. The following table describes the annotations you can add.When you create a workl", "product_code":"cce", - "title":"Overview", - "uri":"cce_01_0010.html", + "title":"Pod Labels and Annotations", + "uri":"cce_10_0386.html", "doc_type":"usermanual2", - "p_code":"88", + "p_code":"67", "code":"89" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Container Network Models", - "uri":"cce_01_0280.html", + "title":"Volcano Scheduling", + "uri":"cce_10_0423.html", "doc_type":"usermanual2", - "p_code":"88", + "p_code":"67", "code":"90" }, { - "desc":"The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:Con", + "desc":"Jobs can be classified into online jobs and offline jobs based on whether services are always online.Online job: Such jobs run for a long time, with regular traffic surge", "product_code":"cce", - "title":"Overview", - "uri":"cce_01_0281.html", + "title":"Hybrid Deployment of Online and Offline Jobs", + "uri":"cce_10_0384.html", "doc_type":"usermanual2", "p_code":"90", "code":"91" }, { - "desc":"The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet pac", + "desc":"When the Cloud Native Network 2.0 model is used, pods use VPC ENIs or sub-ENIs for networking. You can directly bind security groups and EIPs to pods. CCE provides a cust", "product_code":"cce", - "title":"Container Tunnel Network", - "uri":"cce_01_0282.html", + "title":"Security Group Policies", + "uri":"cce_10_0288.html", "doc_type":"usermanual2", - "p_code":"90", + "p_code":"67", "code":"92" }, { - "desc":"The VPC network uses VPC routing to integrate with the underlying network. This network model is suitable for performance-intensive scenarios. The maximum number of nodes", + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"VPC Network", - "uri":"cce_01_0283.html", + "title":"Networking", + "uri":"cce_10_0020.html", "doc_type":"usermanual2", - "p_code":"90", + "p_code":"", "code":"93" }, { - "desc":"Developed by CCE, Cloud Native Network 2.0 deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are all", + "desc":"You can learn about a cluster network from the following two aspects:What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are ru", "product_code":"cce", - "title":"Cloud Native Network 2.0", - "uri":"cce_01_0284.html", + "title":"Overview", + "uri":"cce_10_0010.html", "doc_type":"usermanual2", - "p_code":"90", + "p_code":"93", "code":"94" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Services", - "uri":"cce_01_0247.html", + "title":"Container Network Models", + "uri":"cce_10_0280.html", "doc_type":"usermanual2", - "p_code":"88", + "p_code":"93", "code":"95" }, { - "desc":"After a pod is created, the following problems may occur if you directly access the pod:The pod can be deleted and recreated at any time by a controller such as a Deploym", + "desc":"The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:Tun", "product_code":"cce", "title":"Overview", - "uri":"cce_01_0249.html", + "uri":"cce_10_0281.html", "doc_type":"usermanual2", "p_code":"95", "code":"96" }, { - "desc":"ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.The cluster-internal domain name format is Uploaded Charts for subsequent workload creation.When you upload a chart, the naming rule of the OBS bucket is changed from cce-charts-{region}", - "product_code":"cce", - "title":"Uploading a Chart", - "uri":"cce_01_0145.html", - "doc_type":"usermanual2", - "p_code":"176", - "code":"179" - }, - { - "desc":"In the workload list, if the status is Rollback successful, the workload is rolled back successfully.", - "product_code":"cce", - "title":"Creating a Workload from a Chart", - "uri":"cce_01_0146.html", - "doc_type":"usermanual2", - "p_code":"176", - "code":"180" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"cce", - "title":"Add-ons", - "uri":"cce_01_0064.html", - "doc_type":"usermanual2", - "p_code":"", - "code":"181" - }, - { - "desc":"CCE provides multiple types of add-ons to extend cluster functions and meet feature requirements. You can install add-ons as required.", - "product_code":"cce", - "title":"Overview", - "uri":"cce_01_0277.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"182" - }, - { - "desc":"The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.coredns i", - "product_code":"cce", - "title":"coredns (System Resource Add-on, Mandatory)", - "uri":"cce_01_0129.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"183" - }, - { - "desc":"storage-driver functions as a standard Kubernetes FlexVolume plug-in to allow containers to use IaaS storage resources. By installing and upgrading storage-driver, you ca", - "product_code":"cce", - "title":"storage-driver (System Resource Add-on, Mandatory)", - "uri":"cce_01_0127.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"184" - }, - { - "desc":"Everest is a cloud-native container storage system. Based on Container Storage Interface (CSI), clusters of Kubernetes v1.15 or later can interconnect with cloud storage ", - "product_code":"cce", - "title":"everest (System Resource Add-on, Mandatory)", - "uri":"cce_01_0066.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"185" - }, - { - "desc":"Autoscaler is an important Kubernetes controller. It supports microservice scaling and is key to serverless design.When the CPU or memory usage of a microservice is too h", - "product_code":"cce", - "title":"autoscaler", - "uri":"cce_01_0154.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"186" - }, - { - "desc":"From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly a", - "product_code":"cce", - "title":"metrics-server", - "uri":"cce_01_0205.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"187" - }, - { - "desc":"gpu-beta is a device management add-on that supports GPUs in containers. It supports only NVIDIA Tesla drivers.This add-on is available only in certain regions.This add-o", - "product_code":"cce", - "title":"gpu-beta", - "uri":"cce_01_0141.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"188" + "p_code":"144", + "code":"149" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Auto Scaling", - "uri":"cce_01_0207.html", + "uri":"cce_10_0207.html", "doc_type":"usermanual2", "p_code":"", - "code":"189" + "code":"150" }, { "desc":"Auto scaling is a service that automatically and economically adjusts service resources based on your service requirements and configured policies.More and more applicati", "product_code":"cce", "title":"Overview", - "uri":"cce_01_0279.html", + "uri":"cce_10_0279.html", "doc_type":"usermanual2", - "p_code":"189", - "code":"190" + "p_code":"150", + "code":"151" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Scaling a Workload", - "uri":"cce_01_0293.html", + "uri":"cce_10_0293.html", "doc_type":"usermanual2", - "p_code":"189", - "code":"191" + "p_code":"150", + "code":"152" }, { - "desc":"Scaling policy priority: If you do not manually adjust the number of pods, auto scaling policies will take effect for resource scheduling. If manual scaling is triggered,", + "desc":"HPA is a controller that controls horizontal pod scaling. HPA periodically checks the pod metrics, calculates the number of replicas required to meet the target values co", "product_code":"cce", "title":"Workload Scaling Mechanisms", - "uri":"cce_01_0290.html", + "uri":"cce_10_0290.html", "doc_type":"usermanual2", - "p_code":"191", - "code":"192" + "p_code":"152", + "code":"153" }, { "desc":"Horizontal Pod Autoscaling (HPA) in Kubernetes implements horizontal scaling of pods. In a CCE HPA policy, you can configure different cooldown time windows and scaling t", "product_code":"cce", "title":"Creating an HPA Policy for Workload Auto Scaling", - "uri":"cce_01_0208.html", + "uri":"cce_10_0208.html", "doc_type":"usermanual2", - "p_code":"191", - "code":"193" + "p_code":"152", + "code":"154" }, { "desc":"After an HPA policy is created, you can update, clone, edit, and delete the policy, as well as edit the YAML file.You can view the rules, status, and events of an HPA pol", "product_code":"cce", "title":"Managing Workload Scaling Policies", - "uri":"cce_01_0083.html", + "uri":"cce_10_0083.html", "doc_type":"usermanual2", - "p_code":"191", - "code":"194" - }, - { - "desc":"CCE clusters of v1.15 or earlier support workload scaling based on AOM monitoring data. This function is no longer supported in CCE clusters of v1.17 or later.If you have", - "product_code":"cce", - "title":"Switching from AOM to HPA for Auto Scaling", - "uri":"cce_01_0395.html", - "doc_type":"usermanual2", - "p_code":"191", - "code":"195" + "p_code":"152", + "code":"155" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Scaling a Cluster/Node", - "uri":"cce_01_0291.html", + "title":"Scaling a Node", + "uri":"cce_10_0291.html", "doc_type":"usermanual2", - "p_code":"189", - "code":"196" + "p_code":"150", + "code":"156" }, { "desc":"Kubernetes HPA is designed for pods. However, if the cluster resources are insufficient, you can only add nodes. Scaling of cluster nodes could be laborious. Now with clo", "product_code":"cce", "title":"Node Scaling Mechanisms", - "uri":"cce_01_0296.html", + "uri":"cce_10_0296.html", "doc_type":"usermanual2", - "p_code":"196", - "code":"197" + "p_code":"156", + "code":"157" }, { "desc":"CCE provides auto scaling through the autoscaler add-on. Nodes with different specifications can be automatically added across AZs on demand.If a node scaling policy and ", "product_code":"cce", "title":"Creating a Node Scaling Policy", - "uri":"cce_01_0209.html", + "uri":"cce_10_0209.html", "doc_type":"usermanual2", - "p_code":"196", - "code":"198" + "p_code":"156", + "code":"158" }, { "desc":"After a node scaling policy is created, you can delete, edit, disable, enable, or clone the policy.You can view the associated node pool, rules, and scaling history of a ", "product_code":"cce", "title":"Managing Node Scaling Policies", - "uri":"cce_01_0063.html", + "uri":"cce_10_0063.html", "doc_type":"usermanual2", - "p_code":"196", - "code":"199" + "p_code":"156", + "code":"159" + }, + { + "desc":"The best way to handle surging traffic is to automatically adjust the number of machines based on the traffic volume or resource usage, which is called scaling.In CCE, th", + "product_code":"cce", + "title":"Using HPA and CA for Auto Scaling of Workloads and Nodes", + "uri":"cce_10_0300.html", + "doc_type":"usermanual2", + "p_code":"150", + "code":"160" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Add-ons", + "uri":"cce_10_0064.html", + "doc_type":"usermanual2", + "p_code":"", + "code":"161" + }, + { + "desc":"CCE provides multiple types of add-ons to extend cluster functions and meet feature requirements. You can install add-ons as required.", + "product_code":"cce", + "title":"Overview", + "uri":"cce_10_0277.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"162" + }, + { + "desc":"The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.coredns i", + "product_code":"cce", + "title":"coredns (System Resource Add-On, Mandatory)", + "uri":"cce_10_0129.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"163" + }, + { + "desc":"storage-driver functions as a standard Kubernetes FlexVolume plug-in to allow containers to use EVS, SFS, OBS, and SFS Turbo storage resources. By installing and upgradin", + "product_code":"cce", + "title":"storage-driver (System Resource Add-On, Discarded)", + "uri":"cce_10_0127.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"164" + }, + { + "desc":"Everest is a cloud native container storage system. Based on the Container Storage Interface (CSI), clusters of Kubernetes v1.15.6 or later obtain access to cloud storage", + "product_code":"cce", + "title":"everest (System Resource Add-On, Mandatory)", + "uri":"cce_10_0066.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"165" + }, + { + "desc":"node-problem-detector (npd for short) is an add-on that monitors abnormal events of cluster nodes and connects to a third-party monitoring platform. It is a daemon runnin", + "product_code":"cce", + "title":"npd", + "uri":"cce_10_0132.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"166" + }, + { + "desc":"Autoscaler is an important Kubernetes controller. It supports microservice scaling and is key to serverless design.When the CPU or memory usage of a microservice is too h", + "product_code":"cce", + "title":"autoscaler", + "uri":"cce_10_0154.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"167" + }, + { + "desc":"From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly a", + "product_code":"cce", + "title":"metrics-server", + "uri":"cce_10_0205.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"168" + }, + { + "desc":"gpu-beta is a device management add-on that supports GPUs in containers. If GPU nodes are used in the cluster, the gpu-beta add-on must be installed.The driver to be down", + "product_code":"cce", + "title":"gpu-beta", + "uri":"cce_10_0141.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"169" + }, + { + "desc":"Volcano is a batch processing platform based on Kubernetes. It provides a series of features required by machine learning, deep learning, bioinformatics, genomics, and ot", + "product_code":"cce", + "title":"volcano", + "uri":"cce_10_0193.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"170" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Charts", + "uri":"cce_10_0019.html", + "doc_type":"usermanual2", + "p_code":"", + "code":"171" + }, + { + "desc":"CCE provides a console for managing Helm charts, helping you easily deploy applications using the charts and manage applications on the console.Helm is a package manager ", + "product_code":"cce", + "title":"Overview", + "uri":"cce_10_0191.html", + "doc_type":"usermanual2", + "p_code":"171", + "code":"172" + }, + { + "desc":"On the CCE console, you can upload a Helm chart package, deploy it, and manage the deployed pods.The number of charts that can be uploaded by a single user is limited. Th", + "product_code":"cce", + "title":"Deploying an Application from a Chart", + "uri":"cce_10_0146.html", + "doc_type":"usermanual2", + "p_code":"171", + "code":"173" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Permissions Management", - "uri":"cce_01_0164.html", + "uri":"cce_10_0164.html", "doc_type":"usermanual2", "p_code":"", - "code":"200" + "code":"174" }, { "desc":"CCE permissions management allows you to assign permissions to IAM users and user groups under your tenant accounts. CCE combines the advantages of Identity and Access Ma", "product_code":"cce", "title":"Permissions Overview", - "uri":"cce_01_0187.html", + "uri":"cce_10_0187.html", "doc_type":"usermanual2", - "p_code":"200", - "code":"201" + "p_code":"174", + "code":"175" }, { - "desc":"CCE cluster permissions are assigned based on IAM system policies and custom policies. You can use user groups to assign permissions to IAM users.Cluster permissions are ", + "desc":"CCE cluster-level permissions are assigned based on IAM system policies and custom policies. You can use user groups to assign permissions to IAM users.Cluster permission", "product_code":"cce", "title":"Cluster Permissions (IAM-based)", - "uri":"cce_01_0188.html", + "uri":"cce_10_0188.html", "doc_type":"usermanual2", - "p_code":"200", - "code":"202" + "p_code":"174", + "code":"176" }, { "desc":"You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles. The RBAC API declares four kinds of Kub", "product_code":"cce", "title":"Namespace Permissions (Kubernetes RBAC-based)", - "uri":"cce_01_0189.html", + "uri":"cce_10_0189.html", "doc_type":"usermanual2", - "p_code":"200", - "code":"203" + "p_code":"174", + "code":"177" + }, + { + "desc":"The conventional distributed task scheduling mode is being replaced by Kubernetes. CCE allows you to easily deploy, manage, and scale containerized applications in the cl", + "product_code":"cce", + "title":"Example: Designing and Configuring Permissions for Users in a Department", + "uri":"cce_10_0245.html", + "doc_type":"usermanual2", + "p_code":"174", + "code":"178" + }, + { + "desc":"Some CCE permissions policies depend on the policies of other cloud services. To view or use other cloud resources on the CCE console, you need to enable the system polic", + "product_code":"cce", + "title":"Permission Dependency of the CCE Console", + "uri":"cce_10_0190.html", + "doc_type":"usermanual2", + "p_code":"174", + "code":"179" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Pod Security", + "uri":"cce_10_0465.html", + "doc_type":"usermanual2", + "p_code":"174", + "code":"180" }, { "desc":"A pod security policy (PSP) is a cluster-level resource that controls sensitive security aspects of the pod specification. The PodSecurityPolicy object in Kubernetes defi", "product_code":"cce", - "title":"Pod Security Policies", - "uri":"cce_01_0275.html", + "title":"Configuring a Pod Security Policy", + "uri":"cce_10_0275.html", "doc_type":"usermanual2", - "p_code":"200", - "code":"204" + "p_code":"180", + "code":"181" + }, + { + "desc":"Before using Pod Security Admission, you need to understand Kubernetes Pod Security Standards. These standards define different isolation levels for pods. They let you de", + "product_code":"cce", + "title":"Configuring Pod Security Admission", + "uri":"cce_10_0466.html", + "doc_type":"usermanual2", + "p_code":"180", + "code":"182" + }, + { + "desc":"In clusters earlier than v1.21, a token is obtained by mounting the secret of the service account to a pod. Tokens obtained this way are permanent. This approach is no lo", + "product_code":"cce", + "title":"Service Account Token Security Improvement", + "uri":"cce_10_0477_0.html", + "doc_type":"usermanual2", + "p_code":"174", + "code":"183" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Cloud Trace Service (CTS)", - "uri":"cce_01_0024.html", + "uri":"cce_10_0024.html", "doc_type":"usermanual2", "p_code":"", - "code":"205" + "code":"184" }, { "desc":"Cloud Trace Service (CTS) records operations on cloud service resources, allowing users to query, audit, and backtrack the resource operation requests initiated from the ", "product_code":"cce", "title":"CCE Operations Supported by CTS", - "uri":"cce_01_0025.html", + "uri":"cce_10_0025.html", "doc_type":"usermanual2", - "p_code":"205", - "code":"206" + "p_code":"184", + "code":"185" }, { "desc":"After you enable CTS, the system starts recording operations on CCE resources. Operation records of the last 7 days can be viewed on the CTS management console.Trace Sour", "product_code":"cce", "title":"Querying CTS Logs", - "uri":"cce_01_0026.html", + "uri":"cce_10_0026.html", "doc_type":"usermanual2", - "p_code":"205", + "p_code":"184", + "code":"186" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Storage (FlexVolume)", + "uri":"cce_10_0305.html", + "doc_type":"usermanual2", + "p_code":"", + "code":"187" + }, + { + "desc":"In container storage, you can use different types of volumes and mount them to containers in pods as many as you want.In CCE, container storage is backed both by Kubernet", + "product_code":"cce", + "title":"FlexVolume Overview", + "uri":"cce_10_0306.html", + "doc_type":"usermanual2", + "p_code":"187", + "code":"188" + }, + { + "desc":"In clusters later than v1.15.11-r1, CSI (the everest add-on) has taken over all functions of fuxi FlexVolume (the storage-driver add-on) for managing container storage. Y", + "product_code":"cce", + "title":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?", + "uri":"cce_10_0343.html", + "doc_type":"usermanual2", + "p_code":"187", + "code":"189" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Using EVS Disks as Storage Volumes", + "uri":"cce_10_0309.html", + "doc_type":"usermanual2", + "p_code":"187", + "code":"190" + }, + { + "desc":"To achieve persistent storage, CCE allows you to mount the storage volumes created from Elastic Volume Service (EVS) disks to a path of a container. When the container is", + "product_code":"cce", + "title":"Overview", + "uri":"cce_10_0310.html", + "doc_type":"usermanual2", + "p_code":"190", + "code":"191" + }, + { + "desc":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pvc-evs-auto-example.yamlvi pvc-evs-auto-example.yamlExample YAML file for clu", + "product_code":"cce", + "title":"(kubectl) Automatically Creating an EVS Disk", + "uri":"cce_10_0312.html", + "doc_type":"usermanual2", + "p_code":"190", + "code":"192" + }, + { + "desc":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pv-evs-example.yaml pvc-evs-example.yamlClusters from v1.11.7 to v1.13Example ", + "product_code":"cce", + "title":"(kubectl) Creating a PV from an Existing EVS Disk", + "uri":"cce_10_0313.html", + "doc_type":"usermanual2", + "p_code":"190", + "code":"193" + }, + { + "desc":"After an EVS volume is created or imported to CCE, you can mount it to a workload.EVS disks cannot be attached across AZs. Before mounting a volume, you can run the kubec", + "product_code":"cce", + "title":"(kubectl) Creating a Pod Mounted with an EVS Volume", + "uri":"cce_10_0314.html", + "doc_type":"usermanual2", + "p_code":"190", + "code":"194" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Using SFS Turbo File Systems as Storage Volumes", + "uri":"cce_10_0329.html", + "doc_type":"usermanual2", + "p_code":"187", + "code":"195" + }, + { + "desc":"CCE allows you to mount a volume created from an SFS Turbo file system to a container to store data persistently. Provisioned on demand and fast, SFS Turbo is suitable fo", + "product_code":"cce", + "title":"Overview", + "uri":"cce_10_0330.html", + "doc_type":"usermanual2", + "p_code":"195", + "code":"196" + }, + { + "desc":"CCE allows you to use an existing SFS Turbo file system to create a PersistentVolume (PV). After the creation is successful, you can create a PersistentVolumeClaim (PVC) ", + "product_code":"cce", + "title":"(kubectl) Creating a PV from an Existing SFS Turbo File System", + "uri":"cce_10_0332.html", + "doc_type":"usermanual2", + "p_code":"195", + "code":"197" + }, + { + "desc":"After an SFS Turbo volume is created or imported to CCE, you can mount the volume to a workload.The following configuration example applies to clusters of Kubernetes 1.13", + "product_code":"cce", + "title":"(kubectl) Creating a Deployment Mounted with an SFS Turbo Volume", + "uri":"cce_10_0333.html", + "doc_type":"usermanual2", + "p_code":"195", + "code":"198" + }, + { + "desc":"CCE allows you to use an existing SFS Turbo volume to create a StatefulSet.The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch efs", + "product_code":"cce", + "title":"(kubectl) Creating a StatefulSet Mounted with an SFS Turbo Volume", + "uri":"cce_10_0334.html", + "doc_type":"usermanual2", + "p_code":"195", + "code":"199" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Using OBS Buckets as Storage Volumes", + "uri":"cce_10_0322.html", + "doc_type":"usermanual2", + "p_code":"187", + "code":"200" + }, + { + "desc":"CCE allows you to mount a volume created from an Object Storage Service (OBS) bucket to a container to store data persistently. Object storage is commonly used in cloud w", + "product_code":"cce", + "title":"Overview", + "uri":"cce_10_0323.html", + "doc_type":"usermanual2", + "p_code":"200", + "code":"201" + }, + { + "desc":"During the use of OBS, expected OBS buckets can be automatically created and mounted as volumes. Currently, standard and infrequent access OBS buckets are supported, whic", + "product_code":"cce", + "title":"(kubectl) Automatically Creating an OBS Volume", + "uri":"cce_10_0325.html", + "doc_type":"usermanual2", + "p_code":"200", + "code":"202" + }, + { + "desc":"CCE allows you to use an existing OBS bucket to create a PersistentVolume (PV). You can create a PersistentVolumeClaim (PVC) and bind it to the PV.The following configura", + "product_code":"cce", + "title":"(kubectl) Creating a PV from an Existing OBS Bucket", + "uri":"cce_10_0326.html", + "doc_type":"usermanual2", + "p_code":"200", + "code":"203" + }, + { + "desc":"After an OBS volume is created or imported to CCE, you can mount the volume to a workload.The following configuration example applies to clusters of Kubernetes 1.13 or ea", + "product_code":"cce", + "title":"(kubectl) Creating a Deployment Mounted with an OBS Volume", + "uri":"cce_10_0327.html", + "doc_type":"usermanual2", + "p_code":"200", + "code":"204" + }, + { + "desc":"CCE allows you to use an existing OBS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).The following configuration example applies to clusters of Kube", + "product_code":"cce", + "title":"(kubectl) Creating a StatefulSet Mounted with an OBS Volume", + "uri":"cce_10_0328.html", + "doc_type":"usermanual2", + "p_code":"200", + "code":"205" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Using SFS File Systems as Storage Volumes", + "uri":"cce_10_0315.html", + "doc_type":"usermanual2", + "p_code":"187", + "code":"206" + }, + { + "desc":"CCE allows you to mount a volume created from a Scalable File Service (SFS) file system to a container to store data persistently. SFS volumes are commonly used in ReadWr", + "product_code":"cce", + "title":"Overview", + "uri":"cce_10_0316.html", + "doc_type":"usermanual2", + "p_code":"206", "code":"207" }, + { + "desc":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pvc-sfs-auto-example.yamlvi pvc-sfs-auto-example.yamlExample YAML file:apiVers", + "product_code":"cce", + "title":"(kubectl) Automatically Creating an SFS Volume", + "uri":"cce_10_0318.html", + "doc_type":"usermanual2", + "p_code":"206", + "code":"208" + }, + { + "desc":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pv-sfs-example.yaml pvc-sfs-example.yamlClusters from v1.11 to v1.13Example YA", + "product_code":"cce", + "title":"(kubectl) Creating a PV from an Existing SFS File System", + "uri":"cce_10_0319.html", + "doc_type":"usermanual2", + "p_code":"206", + "code":"209" + }, + { + "desc":"After an SFS volume is created or imported to CCE, you can mount the volume to a workload.The following configuration example applies to clusters of Kubernetes 1.13 or ea", + "product_code":"cce", + "title":"(kubectl) Creating a Deployment Mounted with an SFS Volume", + "uri":"cce_10_0320.html", + "doc_type":"usermanual2", + "p_code":"206", + "code":"210" + }, + { + "desc":"CCE allows you to use an existing SFS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).The following configuration example applies to clusters of Kube", + "product_code":"cce", + "title":"(kubectl) Creating a StatefulSet Mounted with an SFS Volume", + "uri":"cce_10_0321.html", + "doc_type":"usermanual2", + "p_code":"206", + "code":"211" + }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", @@ -1869,16 +1905,7 @@ "uri":"cce_faq_0083.html", "doc_type":"usermanual2", "p_code":"", - "code":"208" - }, - { - "desc":"Cloud Container Engine (CCE) provides highly scalable, high-performance, enterprise-class Kubernetes clusters and supports Docker containers. With CCE, you can easily dep", - "product_code":"cce", - "title":"Checklist for Migrating Containerized Applications to the Cloud", - "uri":"cce_faq_00006.html", - "doc_type":"usermanual2", - "p_code":"208", - "code":"209" + "code":"212" }, { "desc":"When a node is added, EIP is set to Automatically assign. The node cannot be created, and a message indicating that EIPs are insufficient is displayed.Two methods are ava", @@ -1886,8 +1913,8 @@ "title":"How Do I Troubleshoot Insufficient EIPs When a Node Is Added?", "uri":"cce_01_0203.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"210" + "p_code":"212", + "code":"213" }, { "desc":"Before using command line injection, write a script that can format data disks and save it to your OBS bucket. Then, inject a command line that will automatically execute", @@ -1895,8 +1922,8 @@ "title":"How Do I Format a Data Disk Using Command Line Injection?", "uri":"cce_01_0204.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"211" + "p_code":"212", + "code":"214" }, { "desc":"After a cluster of v1.13.10 is created, you can use heapster only after rbac is enabled.kubectl delete clusterrole system:heapsterCopy the following file to a server on w", @@ -1904,8 +1931,8 @@ "title":"How Do I Use heapster in Clusters of v1.13.10?", "uri":"cce_01_0999.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"212" + "p_code":"212", + "code":"215" }, { "desc":"Currently, private CCE clusters use Device Mapper as the Docker storage driver.Device Mapper is developed based on the kernel framework and supports many advanced volume ", @@ -1913,8 +1940,8 @@ "title":"How Do I Change the Mode of the Docker Device Mapper?", "uri":"cce_faq_00096.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"213" + "p_code":"212", + "code":"216" }, { "desc":"If the cluster status is available but some nodes in the cluster are unavailable, perform the following operations to rectify the fault:Check Item 1: Whether the Node Is ", @@ -1922,8 +1949,8 @@ "title":"What Can I Do If My Cluster Status Is Available but the Node Status Is Unavailable?", "uri":"cce_faq_00120.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"214" + "p_code":"212", + "code":"217" }, { "desc":"If the cluster is Unavailable, perform the following operations to rectify the fault:Check Item 1: Whether the Security Group Is ModifiedCheck Item 2: Whether the DHCP Fu", @@ -1931,8 +1958,8 @@ "title":"How Do I Rectify the Fault When the Cluster Status Is Unavailable?", "uri":"cce_faq_00039.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"215" + "p_code":"212", + "code":"218" }, { "desc":"This section uses the Nginx workload as an example to describe how to set the workload access type to LoadBalancer (ELB).An ELB has been created.You have connected an Ela", @@ -1940,8 +1967,8 @@ "title":"How Do I Use kubectl to Set the Workload Access Type to LoadBalancer (ELB)?", "uri":"cce_faq_00099.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"216" + "p_code":"212", + "code":"219" }, { "desc":"You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).Before using this feature, write a script that can format data disks and save it to ", @@ -1949,8 +1976,8 @@ "title":"How Do I Add a Second Data Disk to a Node in a CCE Cluster?", "uri":"cce_faq_00190.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"217" + "p_code":"212", + "code":"220" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -1958,8 +1985,8 @@ "title":"Workload Abnormalities", "uri":"cce_faq_00029.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"218" + "p_code":"212", + "code":"221" }, { "desc":"If a workload is running improperly, you can view events to determine the cause.On the CCE console, choose Workloads > Deployments or StatefulSets in the navigation pane ", @@ -1967,8 +1994,8 @@ "title":"Fault Locating and Troubleshooting for Abnormal Workloads", "uri":"cce_faq_00134.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"219" + "p_code":"221", + "code":"222" }, { "desc":"Viewing K8s Event InformationCheck Item 1: Checking Whether a Node Is Available in the ClusterCheck Item 2: Checking Whether Node Resources (CPU and Memory) Are Sufficien", @@ -1976,8 +2003,8 @@ "title":"Failed to Schedule an Instance", "uri":"cce_faq_00098.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"220" + "p_code":"221", + "code":"223" }, { "desc":"If the workload details page displays an event indicating that image pulling fails, perform the following operations to locate the fault:Check Item 1: Checking Whether im", @@ -1985,8 +2012,8 @@ "title":"Failed to Pull an Image", "uri":"cce_faq_00015.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"221" + "p_code":"221", + "code":"224" }, { "desc":"On the details page of a workload, if an event is displayed indicating that the container fails to be restarted, perform the following operations to locate the fault:Rect", @@ -1994,8 +2021,8 @@ "title":"Failed to Restart a Container", "uri":"cce_faq_00018.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"222" + "p_code":"221", + "code":"225" }, { "desc":"Pod actions are classified into the following two types:kube-controller-manager periodically checks the status of all nodes. If a node is in the NotReady state for a peri", @@ -2003,8 +2030,8 @@ "title":"What Should I Do If An Evicted Pod Exists?", "uri":"cce_faq_00209.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"223" + "p_code":"221", + "code":"226" }, { "desc":"When a node is faulty, pods on the node are evicted to ensure workload availability. If the pods are not evicted when the node is faulty, perform the following steps:Use ", @@ -2012,8 +2039,8 @@ "title":"Instance Eviction Exception", "uri":"cce_faq_00140.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"224" + "p_code":"221", + "code":"227" }, { "desc":"When a node is in the Unavailable state, CCE migrates container pods on the node and sets the pods running on the node to the Terminating state.After the node is restored", @@ -2021,8 +2048,8 @@ "title":"What Should I Do If Pods in the Terminating State Cannot Be Deleted?", "uri":"cce_faq_00210.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"225" + "p_code":"221", + "code":"228" }, { "desc":"The metadata.enable field in the YAML file of the workload is false. As a result, the pod of the workload is deleted and the workload is in the stopped status.The workloa", @@ -2030,8 +2057,8 @@ "title":"What Should I Do If a Workload Is Stopped Caused by Pod Deletion?", "uri":"cce_faq_00012.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"226" + "p_code":"221", + "code":"229" }, { "desc":"The pod remains in the creating state for a long time, and the sandbox-related errors are reported.Select a troubleshooting method for your cluster:Clusters of V1.13 or l", @@ -2039,8 +2066,8 @@ "title":"What Should I Do If Sandbox-Related Errors Are Reported When the Pod Remains in the Creating State?", "uri":"cce_faq_00005.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"227" + "p_code":"221", + "code":"230" }, { "desc":"Workload pods in the cluster fail and are being redeployed constantly.After the following command is run, the command output shows that many pods are in the evicted state", @@ -2048,8 +2075,8 @@ "title":"What Should I Do If a Pod Is in the Evicted State?", "uri":"cce_faq_00199.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"228" + "p_code":"221", + "code":"231" }, { "desc":"If a node has sufficient memory resources, a container on this node can use more memory resources than requested, but no more than limited. If the memory allocated to a c", @@ -2057,8 +2084,8 @@ "title":"What Should I Do If the OOM Killer Is Triggered When a Container Uses Memory Resources More Than Limited?", "uri":"cce_faq_00002.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"229" + "p_code":"221", + "code":"232" }, { "desc":"A workload can be accessed from public networks through a load balancer. LoadBalancer provides higher reliability than EIP-based NodePort because an EIP is no longer boun", @@ -2066,26 +2093,8 @@ "title":"What Should I Do If a Service Released in a Workload Cannot Be Accessed from Public Networks?", "uri":"cce_faq_00202.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"230" - }, - { - "desc":"CCE uses high-performance container networking add-ons, which support the tunnel network and VPC network models.After a cluster is created, the network model cannot be ch", - "product_code":"cce", - "title":"Selecting a Network Model When Creating a Cluster on CCE", - "uri":"cce_bestpractice_00162.html", - "doc_type":"usermanual2", - "p_code":"208", - "code":"231" - }, - { - "desc":"Before creating a cluster on CCE, determine the number of VPCs, number of subnets, container CIDR blocks, and Services for access based on service requirements.This secti", - "product_code":"cce", - "title":"Planning CIDR Blocks for a CCE Cluster", - "uri":"cce_bestpractice_00004.html", - "doc_type":"usermanual2", - "p_code":"208", - "code":"232" + "p_code":"212", + "code":"233" }, { "desc":"A VPC is similar to a private local area network (LAN) managed by a home gateway whose IP address is 192.168.0.0/16. A VPC is a private network built on the cloud and pro", @@ -2093,16 +2102,7 @@ "title":"What Is the Relationship Between Clusters, VPCs, and Subnets?", "uri":"cce_faq_00266.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"233" - }, - { - "desc":"For clusters of v1.15.11-r1 and later, the CSI everest add-on has taken over all functions of the fuxi FlexVolume driver (the storage-driver add-on) for container storage", - "product_code":"cce", - "title":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?", - "uri":"cce_bestpractice_0107.html", - "doc_type":"usermanual2", - "p_code":"208", + "p_code":"212", "code":"234" }, { @@ -2111,9 +2111,405 @@ "title":"How Do I Harden the VPC Security Group Rules for CCE Cluster Nodes?", "uri":"cce_faq_00265.html", "doc_type":"usermanual2", - "p_code":"208", + "p_code":"212", "code":"235" }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Best Practice", + "uri":"cce_bestpractice.html", + "doc_type":"usermanual2", + "p_code":"", + "code":"236" + }, + { + "desc":"Security, efficiency, stability, and availability are common requirements on all cloud services. To meet these requirements, the system availability, data reliability, an", + "product_code":"cce", + "title":"Checklist for Deploying Containerized Applications in the Cloud", + "uri":"cce_bestpractice_00006.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"237" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Migration", + "uri":"cce_bestpractice_00237.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"238" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Migrating On-premises Kubernetes Clusters to CCE", + "uri":"cce_bestpractice_0306.html", + "doc_type":"usermanual2", + "p_code":"238", + "code":"239" + }, + { + "desc":"Containers are growing in popularity and Kubernetes simplifies containerized deployment. Many companies choose to build their own Kubernetes clusters. However, the O&M wo", + "product_code":"cce", + "title":"Solution Overview", + "uri":"cce_bestpractice_0307.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"240" + }, + { + "desc":"CCE allows you to customize cluster resources to meet various service requirements. Table 1 lists the key performance parameters of a cluster and provides the planned val", + "product_code":"cce", + "title":"Planning Resources for the Target Cluster", + "uri":"cce_bestpractice_0308.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"241" + }, + { + "desc":"If your migration does not involve resources outside a cluster listed in Table 1 or you do not need to use other services to update resources after the migration, skip th", + "product_code":"cce", + "title":"Migrating Resources Outside a Cluster", + "uri":"cce_bestpractice_0309.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"242" + }, + { + "desc":"Velero is an open-source backup and migration tool for Kubernetes clusters. It integrates the persistent volume (PV) data backup capability of the Restic tool and can be ", + "product_code":"cce", + "title":"Installing the Migration Tool", + "uri":"cce_bestpractice_0310.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"243" + }, + { + "desc":"WordPress is used as an example to describe how to migrate an application from an on-premises Kubernetes cluster to a CCE cluster. The WordPress application consists of t", + "product_code":"cce", + "title":"Migrating Resources in a Cluster", + "uri":"cce_bestpractice_0311.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"244" + }, + { + "desc":"The WordPress and MySQL images used in this example can be pulled from SWR. Therefore, the image pull failure (ErrImagePull) will not occur. If the application to be migr", + "product_code":"cce", + "title":"Updating Resources Accordingly", + "uri":"cce_bestpractice_0312.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"245" + }, + { + "desc":"Cluster migration involves full migration of application data, which may cause intra-application adaptation problems. In this example, after the cluster is migrated, the ", + "product_code":"cce", + "title":"Performing Additional Tasks", + "uri":"cce_bestpractice_0313.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"246" + }, + { + "desc":"Both HostPath and Local volumes are local storage volumes. However, the Restic tool integrated in Velero cannot back up the PVs of the HostPath type and supports only the", + "product_code":"cce", + "title":"Troubleshooting", + "uri":"cce_bestpractice_0314.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"247" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"DevOps", + "uri":"cce_bestpractice_0322.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"248" + }, + { + "desc":"GitLab is an open-source version management system developed with Ruby on Rails for Git project repository management. It supports web-based access to public and private ", + "product_code":"cce", + "title":"Interconnecting GitLab with SWR and CCE for CI/CD", + "uri":"cce_bestpractice_0324.html", + "doc_type":"usermanual2", + "p_code":"248", + "code":"249" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Disaster Recovery", + "uri":"cce_bestpractice_0323.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"250" + }, + { + "desc":"To achieve high availability for your CCE containers, you can do as follows:Deploy three master nodes for the cluster.When nodes are deployed across AZs, set custom sched", + "product_code":"cce", + "title":"Implementing High Availability for Containers in CCE", + "uri":"cce_bestpractice_00220.html", + "doc_type":"usermanual2", + "p_code":"250", + "code":"251" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Security", + "uri":"cce_bestpractice_0315.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"252" + }, + { + "desc":"For security purposes, you are advised to configure a cluster as follows.Kubernetes releases a major version in about four months. CCE follows the same frequency as Kuber", + "product_code":"cce", + "title":"Cluster Security", + "uri":"cce_bestpractice_0317.html", + "doc_type":"usermanual2", + "p_code":"252", + "code":"253" + }, + { + "desc":"Do not bind an EIP to a node unless necessary to reduce the attack surface.If an EIP must be used, properly configure the firewall or security group rules to restrict acc", + "product_code":"cce", + "title":"Node Security", + "uri":"cce_bestpractice_0318.html", + "doc_type":"usermanual2", + "p_code":"252", + "code":"254" + }, + { + "desc":"The nodeSelector or nodeAffinity is used to limit the range of nodes to which applications can be scheduled, preventing the entire cluster from being threatened due to th", + "product_code":"cce", + "title":"Container Security", + "uri":"cce_bestpractice_0319.html", + "doc_type":"usermanual2", + "p_code":"252", + "code":"255" + }, + { + "desc":"Currently, CCE has configured static encryption for secret resources. The secrets created by users will be encrypted and stored in etcd of the CCE cluster. Secrets can be", + "product_code":"cce", + "title":"Secret Security", + "uri":"cce_bestpractice_0320.html", + "doc_type":"usermanual2", + "p_code":"252", + "code":"256" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Auto Scaling", + "uri":"cce_bestpractice_0090.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"257" + }, + { + "desc":"The best way to handle surging traffic is to automatically adjust the number of machines based on the traffic volume or resource usage, which is called scaling.In CCE, th", + "product_code":"cce", + "title":"Using HPA and CA for Auto Scaling of Workloads and Nodes", + "uri":"cce_bestpractice_00282.html", + "doc_type":"usermanual2", + "p_code":"257", + "code":"258" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Cluster", + "uri":"cce_bestpractice_0050.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"259" + }, + { + "desc":"When you have multiple CCE clusters, you may find it difficult to efficiently connect to all of them.This section describes how to configure access to multiple clusters b", + "product_code":"cce", + "title":"Connecting to Multiple Clusters Using kubectl", + "uri":"cce_bestpractice_00254.html", + "doc_type":"usermanual2", + "p_code":"259", + "code":"260" + }, + { + "desc":"You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).When creating a node in a cluster of v1.13.10 or later, if a data disk is not manage", + "product_code":"cce", + "title":"Adding a Second Data Disk to a Node in a CCE Cluster", + "uri":"cce_bestpractice_00190.html", + "doc_type":"usermanual2", + "p_code":"259", + "code":"261" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Networking", + "uri":"cce_bestpractice_0052.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"262" + }, + { + "desc":"Before creating a cluster on CCE, determine the number of VPCs, number of subnets, container CIDR blocks, and Services for access based on service requirements.This topic", + "product_code":"cce", + "title":"Planning CIDR Blocks for a Cluster", + "uri":"cce_bestpractice_00004.html", + "doc_type":"usermanual2", + "p_code":"262", + "code":"263" + }, + { + "desc":"CCE uses self-proprietary, high-performance container networking add-ons to support the tunnel network, Cloud Native Network 2.0, and VPC network models.After a cluster i", + "product_code":"cce", + "title":"Selecting a Network Model", + "uri":"cce_bestpractice_00162.html", + "doc_type":"usermanual2", + "p_code":"262", + "code":"264" + }, + { + "desc":"Session persistence is one of the most common while complex problems in load balancing.Session persistence is also called sticky sessions. After the sticky session functi", + "product_code":"cce", + "title":"Implementing Sticky Session Through Load Balancing", + "uri":"cce_bestpractice_00231.html", + "doc_type":"usermanual2", + "p_code":"262", + "code":"265" + }, + { + "desc":"There may be different types of proxy servers between a client and a container server. How can a container obtain the real source IP address of the client? This section d", + "product_code":"cce", + "title":"Obtaining the Client Source IP Address for a Container", + "uri":"cce_bestpractice_00035.html", + "doc_type":"usermanual2", + "p_code":"262", + "code":"266" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Storage", + "uri":"cce_bestpractice_0053.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"267" + }, + { + "desc":"A data disk is divided depending on the container storage Rootfs:Overlayfs: No independent thin pool is allocated. Image data is stored in the dockersys disk.# lsblk\nNAME", + "product_code":"cce", + "title":"Expanding Node Disk Capacity", + "uri":"cce_bestpractice_00198.html", + "doc_type":"usermanual2", + "p_code":"267", + "code":"268" + }, + { + "desc":"This section describes how to mount OBS buckets and OBS parallel file systems (preferred) of third-party tenants.The CCE cluster of a SaaS service provider needs to be mo", + "product_code":"cce", + "title":"Mounting an Object Storage Bucket of a Third-Party Tenant", + "uri":"cce_bestpractice_00199.html", + "doc_type":"usermanual2", + "p_code":"267", + "code":"269" + }, + { + "desc":"The minimum capacity of an SFS Turbo file system is 500 GB, and the SFS Turbo file system cannot be billed by usage. By default, the root directory of an SFS Turbo file s", + "product_code":"cce", + "title":"Dynamically Creating and Mounting Subdirectories of an SFS Turbo File System", + "uri":"cce_bestpractice_00253_0.html", + "doc_type":"usermanual2", + "p_code":"267", + "code":"270" + }, + { + "desc":"In clusters later than v1.15.11-r1, CSI (the everest add-on) has taken over all functions of fuxi FlexVolume (the storage-driver add-on) for managing container storage. Y", + "product_code":"cce", + "title":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?", + "uri":"cce_bestpractice_0107.html", + "doc_type":"usermanual2", + "p_code":"267", + "code":"271" + }, + { + "desc":"When using storage resources in CCE, the most common method is to specify storageClassName to define the type of storage resources to be created when creating a PVC. The ", + "product_code":"cce", + "title":"Custom Storage Classes", + "uri":"cce_bestpractice_00281_0.html", + "doc_type":"usermanual2", + "p_code":"267", + "code":"272" + }, + { + "desc":"EVS disks cannot be attached across AZs. For example, EVS disks in AZ 1 cannot be attached to nodes in AZ 2.If the storage class csi-disk is used for StatefulSets, when a", + "product_code":"cce", + "title":"Realizing Automatic Topology for EVS Disks When Nodes Are Deployed Across AZs (csi-disk-topology)", + "uri":"cce_bestpractice_00284.html", + "doc_type":"usermanual2", + "p_code":"267", + "code":"273" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Container", + "uri":"cce_bestpractice_0051.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"274" + }, + { + "desc":"If a node has sufficient memory resources, a container on this node can use more memory resources than requested, but no more than limited. If the memory allocated to a c", + "product_code":"cce", + "title":"Properly Allocating Container Computing Resources", + "uri":"cce_bestpractice_00002.html", + "doc_type":"usermanual2", + "p_code":"274", + "code":"275" + }, + { + "desc":"To access a Kubernetes cluster from a client, you can use the Kubernetes command line tool kubectl.Create a DaemonSet file.vi daemonSet.yamlAn example YAML file is provid", + "product_code":"cce", + "title":"Modifying Kernel Parameters Using a Privileged Container", + "uri":"cce_bestpractice_00227.html", + "doc_type":"usermanual2", + "p_code":"274", + "code":"276" + }, + { + "desc":"Before containers running applications are started, one or some init containers are started first. If there are multiple init containers, they will be started in the defi", + "product_code":"cce", + "title":"Initializing a Container", + "uri":"cce_bestpractice_00228.html", + "doc_type":"usermanual2", + "p_code":"274", + "code":"277" + }, + { + "desc":"If DNS or other related settings are inappropriate, you can use hostAliases to overwrite the resolution of the host name at the pod level when adding entries to the /etc/", + "product_code":"cce", + "title":"Using hostAliases to Configure /etc/hosts in a Pod", + "uri":"cce_bestpractice_00226.html", + "doc_type":"usermanual2", + "p_code":"274", + "code":"278" + }, + { + "desc":"Linux allows you to create a core dump file if an application crashes, which contains the data the application had in memory at the time of the crash. You can analyze the", + "product_code":"cce", + "title":"Configuring Core Dumps", + "uri":"cce_bestpractice_0325.html", + "doc_type":"usermanual2", + "p_code":"274", + "code":"279" + }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", @@ -2121,7 +2517,7 @@ "uri":"cce_01_9999.html", "doc_type":"usermanual2", "p_code":"", - "code":"236" + "code":"280" }, { "desc":"CCE 2.0 inherits and modifies the features of CCE 1.0, and release new features.Modified features:Clusters in CCE 1.0 are equivalent to Hybrid clusters in CCE 2.0.CCE 2.0", @@ -2129,8 +2525,8 @@ "title":"Differences Between CCE 1.0 and CCE 2.0", "uri":"cce_01_9998.html", "doc_type":"usermanual2", - "p_code":"236", - "code":"237" + "p_code":"280", + "code":"281" }, { "desc":"Migrate the images stored in the image repository of CCE 1.0 to CCE 2.0.A VM is available. The VM is bound to a public IP address and can access the Internet. Docker (ear", @@ -2138,8 +2534,8 @@ "title":"Migrating Images", "uri":"cce_01_9997.html", "doc_type":"usermanual2", - "p_code":"236", - "code":"238" + "p_code":"280", + "code":"282" }, { "desc":"Create Hybrid clusters on the CCE 2.0 console. These new Hybrid clusters should have the same specifications with those created on CCE 1.0.To create clusters using APIs, ", @@ -2147,8 +2543,8 @@ "title":"Migrating Clusters", "uri":"cce_01_9996.html", "doc_type":"usermanual2", - "p_code":"236", - "code":"239" + "p_code":"280", + "code":"283" }, { "desc":"This section describes how to create a Deployment with the same specifications as that in CCE 1.0 on the CCE 2.0 console.It is advised to delete the applications on CCE 1", @@ -2156,8 +2552,8 @@ "title":"Migrating Applications", "uri":"cce_01_9995.html", "doc_type":"usermanual2", - "p_code":"236", - "code":"240" + "p_code":"280", + "code":"284" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -2166,6 +2562,6 @@ "uri":"cce_01_0300.html", "doc_type":"usermanual2", "p_code":"", - "code":"241" + "code":"285" } ] \ No newline at end of file diff --git a/docs/cce/umn/cce_01_0002.html b/docs/cce/umn/cce_01_0002.html deleted file mode 100644 index 24eeb3db..00000000 --- a/docs/cce/umn/cce_01_0002.html +++ /dev/null @@ -1,100 +0,0 @@ - - -

Cluster Overview

-

Kubernetes is a containerized application software system that can be easily deployed and managed. It facilitates container scheduling and orchestration.

-

For application developers, Kubernetes can be regarded as a cluster operating system. Kubernetes provides functions such as service discovery, scaling, load balancing, self-healing, and even leader election, freeing developers from infrastructure-related configurations.

-

When using Kubernetes, it is like you run a large number of servers as one on which your applications run. Regardless of the number of servers in a Kubernetes cluster, the method for deploying applications in Kubernetes is always the same.

-

Kubernetes Cluster Architecture

A Kubernetes cluster consists of master nodes (masters) and worker nodes (nodes). Applications are deployed on worker nodes, and you can specify the nodes for deployment.

-

The following figure shows the architecture of a Kubernetes cluster.

-
Figure 1 Kubernetes cluster architecture
-

Master node

-

A master node is the machine where the control plane components run, including API server, Scheduler, Controller manager, and etcd.

-
  • API server: functions as a transit station for components to communicate with each other, receives external requests, and writes information to etcd.
  • Controller manager: performs cluster-level functions, such as component replication, node tracing, and node fault fixing.
  • Scheduler: schedules containers to nodes based on various conditions (such as available resources and node affinity).
  • etcd: serves as a distributed data storage component that stores cluster configuration information.
-

In the production environment, multiple master nodes are deployed to ensure cluster high availability. For example, you can deploy three master nodes for your CCE cluster.

-

Worker node

-

A worker node is a compute node in a cluster, that is, a node running containerized applications. A worker node has the following components:

-
  • kubelet: communicates with the container runtime, interacts with the API server, and manages containers on the node.
  • kube-proxy: serves as an access proxy between application components.
  • Container runtime: functions as the software for running containers. You can download images to build your container runtime, such as Docker.
-
-

Number of Master Nodes and Cluster Scale

When you create a cluster on CCE, the number of master nodes can be set to 1 or 3. Three master nodes can be deployed to create a cluster in HA mode.

-

The master node specifications determine the number of nodes that can be managed by a cluster. When creating a cluster, you can select the cluster management scale, for example, 50 or 200 nodes.

-
-

Cluster Network

From the perspective of the network, all nodes in a cluster are located in a VPC, and containers are running on the nodes. You need to configure node-node, node-container, and container-container communication.

-

A cluster network can be divided into three network types:

-
  • Node network: IP addresses are assigned to nodes in a cluster.
  • Container network: IP addresses are assigned to containers in a cluster for communication between them. Currently, multiple container network models are supported, and each model has its own working mechanism.
  • Service network: A service is a Kubernetes object used to access containers. Each Service has a fixed IP address.
-

When you create a cluster, select a proper CIDR block for each network to ensure that the CIDR blocks do not conflict with each other and each CIDR block has sufficient available IP addresses. After a cluster is created, the container network model cannot be modified. Plan the container network model properly before creating a cluster.

-

You are advised to learn about the cluster network and container network models before creating a cluster. For details, see Overview.

-
-

Cluster Security Groups

When a cluster is created, the following security groups are created to ensure cluster security:

-
  • Cluster name-cce-control-Random number: security group of the master node.
    Observe the following principles when configuring security groups:
    • The source IP addresses defined in the security group rules must be permitted.
    • 4789 (required only for clusters using the container tunnel network model): used for network access between containers.
    • 5443 and 5444: ports to which kube-apiserver of the master node listens. These ports must permit requests from VPC and container CIDR blocks.
    • 9443: used by the network add-on of a worker node to access the master node.
    • 8445: used by the storage add-on of a worker node to access the master node.
    -
    -
  • Cluster name-cce-node-Random number: security group of a worker node.
    Observe the following principles when configuring security groups:
    • The source IP addresses defined in the security group rules must be permitted.
    • 4789 (required only for clusters using the container tunnel network model): used for network access between containers.
    • 10250: used by the master node to access the kubelet component of a worker node (for example, run the kubectl exec {pod} command).
    • 30000-32767: external access port (Nodeport) of a node. These ports need be specified when you create a Service. These ports must permit requests from VPC, container, and ELB CIDR blocks.
    -
    -
-

After a cluster is created, you can view the created security group on the VPC console.

-

Do not delete the security groups and related rules automatically configured during cluster creation. Otherwise, the cluster will exhibit unexpected behavior.

-
-
-

Cluster Lifecycle

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Cluster status

Status

-

Description

-

Creating

-

A cluster is being created and is requesting for cloud resources.

-

Normal

-

A cluster is running properly.

-

Scaling-out

-

A node is being added to a cluster.

-

Scaling-in

-

A node is being deleted from a cluster.

-

Hibernating

-

A cluster is hibernating.

-

Awaking

-

A cluster is being woken up.

-

Upgrading

-

A cluster is being upgraded.

-

Unavailable

-

A cluster is unavailable.

-

Deleting

-

A cluster is being deleted.

-
-
-
Figure 2 Cluster status transition
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0003.html b/docs/cce/umn/cce_01_0003.html deleted file mode 100644 index 3c5ffd78..00000000 --- a/docs/cce/umn/cce_01_0003.html +++ /dev/null @@ -1,21 +0,0 @@ - - -

Resetting a Node

-

Scenario

You can reset a node to modify the node configuration, such as the node OS and login mode.

-

Resetting a node will reinstall the node OS and the Kubernetes software on the node. If a node is unavailable because you modify the node configuration, you can reset the node to rectify the fault.

-
-

Notes and Constraints

  • The cluster version must be v1.13 or later.
-
-

Notes

  • Only worker nodes can be reset. If the node is still unavailable after the resetting, delete the node and create a new one.
  • Resetting a node will reinstall the node OS and interrupt workload services running on the node. Therefore, perform this operation during off-peak hours.
  • Data in the system disk and Docker data disks will be cleared. Back up important data before resetting the node.
  • When an extra data disk is mounted to a node, data in this disk will be cleared if the disk has not been unmounted before the node reset. To prevent data loss, back up data in advance and mount the data disk again after the node reset is complete.
  • The IP addresses of the workload pods on the node will change, but the container network access is not affected.
  • There is remaining EVS disk quota.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes. In the same row as the node you will reset, choose More > Reset.
  2. In the dialog box displayed, enter RESET and reconfigure the key pair for login.

    Figure 1 Resetting the selected node
    -

  3. Click Yes and wait until the node is reset.

    After the node is reset, pods on it are automatically migrated to other available nodes.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0004.html b/docs/cce/umn/cce_01_0004.html deleted file mode 100644 index 92e2ae8c..00000000 --- a/docs/cce/umn/cce_01_0004.html +++ /dev/null @@ -1,95 +0,0 @@ - - -

Managing Node Labels

-

Node Label Usage Scenario

Node labels are mainly used in the following scenarios:

-
  • Node management: Node labels are used to classify nodes.
  • Affinity and anti-affinity between a workload and node:
    • Some workloads require a large CPU, some require a large memory, some require a large I/O, and other workloads may be affected. In this case, you are advised to add different labels to nodes. When deploying a workload, you can select nodes with specified labels for affinity deployment to ensure the normal operation of the system. Otherwise, node anti-affinity deployment can be used.
    • A system can be divided into multiple modules. Each module consists of multiple microservices. To ensure the efficiency of subsequent O&M, you can add a module label to each node so that each module can be deployed on the corresponding node, does not interfere with other modules, and can be easily developed and maintained on its node.
    -
-
-

Inherent Label of a Node

After a node is created, some fixed labels exist and cannot be deleted. For details about these labels, see Table 1.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Inherent label of a node

Key

-

Description

-

New: topology.kubernetes.io/region

-

Old: failure-domain.beta.kubernetes.io/region

-

Region where the node is located

-

New: topology.kubernetes.io/zone

-

Old: failure-domain.beta.kubernetes.io/zone

-

AZ where the node is located

-

New: node.kubernetes.io/baremetal

-

Old: failure-domain.beta.kubernetes.io/is-baremetal

-

Whether the node is a bare metal node

-

false indicates that the node is not a bare metal node.

-

node.kubernetes.io/instance-type

-

Node specifications

-

kubernetes.io/arch

-

Node processor architecture

-

kubernetes.io/hostname

-

Node name

-

kubernetes.io/os

-

OS type

-

node.kubernetes.io/subnetid

-

ID of the subnet where the node is located.

-

os.architecture

-

Node processor architecture

-

For example, amd64 indicates a AMD64-bit processor.

-

os.name

-

Node OS name

-

os.version

-

Node OS kernel version

-
-
-
-

Adding a Node Label

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes.
  2. In the same row as the node for which you will add labels, choose Operation > More > Manage Labels.
  3. In the dialog box displayed, click Add Label below the label list, enter the key and value of the label to be added, and click OK.

    As shown in the figure, the key is deploy_qa and the value is true, indicating that the node is used to deploy the QA (test) environment.

    -

  4. After the label is added, click Manage Labels. Then, you will see the label that you have added.
-
-

Deleting a Node Label

Only labels added by users can be deleted. Labels that are fixed on the node cannot be deleted.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes.
  2. In the same row as the node for which you will delete labels, choose Operation > More > Manage Labels.
  3. Click Delete, and then click OK to delete the label.

    Label updated successfully is displayed.

    -

-
-

Searching for a Node by Label

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes.
  2. In the upper right corner of the node list, click Search by Label.
  3. Enter a Kubernetes label to find the target node.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0007.html b/docs/cce/umn/cce_01_0007.html deleted file mode 100644 index 22d1f7ee..00000000 --- a/docs/cce/umn/cce_01_0007.html +++ /dev/null @@ -1,159 +0,0 @@ - - -

Managing Workloads and Jobs

-

Scenario

After a workload is created, you can scale, upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file. -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Workload/Job management

Operation

-

Description

-

Logging

-

You can view logs of Deployments, StatefulSets, DaemonSets, and jobs.

-

Upgrade

-

You can replace images or image tags to quickly upgrade Deployments, StatefulSets, and DaemonSets without interrupting services.

-

Editing a YAML file

-

You can modify and download the YAML files of Deployments, StatefulSets, DaemonSets, and pods on the CCE console. YAML files of jobs and cron jobs can only be viewed, copied, and downloaded.

-

Scaling

-

A workload can be automatically resized according to scaling policies, freeing you from the efforts to manually adjust resources for fluctuating service traffic. This saves you big on both resources and labors.

-

Monitoring

-

You can view the CPU and memory usage of Deployments, DaemonSets, and pods on the CCE console to determine the resource specifications you may need.

-

Rollback

-

Only Deployments can be rolled back.

-

Pausing

-

Only Deployments can be paused.

-

Resuming

-

Only Deployments can be resumed.

-

Labeling

-

Labels are key-value pairs and can be attached to workloads for affinity and anti-affinity scheduling.

-

Deletion

-

You can delete a workload or job that is no longer needed. Deleted workloads or jobs cannot be recovered.

-

Access settings

-

You can determine how your workloads can be accessed. For details, see Overview.

-

Scheduling policies

-

CCE supports custom and simple scheduling policies. Custom scheduling policies allow you to customize node affinity, workload affinity, and workload anti-affinity. Simple scheduling policies allow easy and convenient scheduling.

-

Event

-

CCE provides event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time by workload or pod.

-
-
-
-
-

Viewing Logs

You can view logs of Deployments, StatefulSets, DaemonSets, and jobs. This section uses a Deployment as an example to describe how to view logs.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the workload you will view, click Logs.

    In the displayed Logs window, view the logs generated in the last 5 minutes, 30 minutes, or 1 hour.

    -

-
-

Upgrading a Workload

You can replace images or image tags to quickly upgrade Deployments, StatefulSets, and DaemonSets without interrupting services.

-

This section uses a Deployment as an example to describe how to upgrade a workload.

-

Before replacing an image or image version, upload the new image to the SWR service.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments, and click Upgrade for the Deployment to be upgraded.

    • Workloads cannot be upgraded in batches.
    • Before performing an in-place StatefulSet upgrade, you must manually delete old pods. Otherwise, the upgrade status is always displayed as Upgrading.
    -
    -

  2. Upgrade the Deployment.

    • Image Name: To replace the Deployment image, click Replace Image and select a new image.
    • Image Version: To replace the Deployment image version, select a new version from the Image Version drop-down list.
    • Container Name: To change the container name, click next to Container Name and enter a new name.
    • Privileged Container: After this function is enabled, the container can access all devices on the host.
    • Container Resources: You can set the CPU, memory and GPU quotas.
    • Advanced Settings:
      • Lifecycle: Commands for starting and running containers can be set. -
      • Health Check: CCE provides two types of probes: liveness probe and readiness probe. They are used to determine whether containers and user services are running properly. For more information, see Setting Health Check for a Container.
        • Liveness Probe: used to restart the unhealthy container.
        • Readiness Probe: used to change the container to the unready state when detecting that the container is unhealthy. In this way, service traffic will not be directed to the container.
        -
      • Environment Variables: Environment variables can be added to a container. In general, environment variables are used to set parameters.
        On the Environment Variables tab page, click Add Environment Variable. Currently, three types of environment variables are supported:
        • Added manually: Set Variable Name and Variable Value/Reference.
        • Added from Secret: Set Variable Name and select the desired secret name and data. A secret must be created in advance. For details, see Creating a Secret.
        • Added from ConfigMap: Set Variable Name and select the desired ConfigMap name and data. A ConfigMap must be created in advance. For details, see Creating a ConfigMap.

          To edit an environment variable that has been set, click Edit. To delete an environment variable that has been set, click Delete.

          -
          -
        -
        -
      • Data Storage: Data storage can be mounted to containers for persistent storage and high disk I/O. Local disks and cloud storage volumes are supported. For details, see Storage (CSI).

        You can add data storage volumes only when creating a StatefulSet.

        -
        -
      • Security Context: Container permissions can be configured to protect CCE and other containers from being affected.

        Enter the user ID to set container permissions and prevent systems and other containers from being affected.

        -
      • Log Policies: Log collection policies and log directory can be configured to collect container logs for unified management and analysis. For details, see Container Logs.
      -
    -

  3. Click Submit.
-
-

Editing a YAML file

You can modify and download the YAML files of Deployments, StatefulSets, DaemonSets, and pods on the CCE console. YAML files of jobs and cron jobs can only be viewed, copied, and downloaded. This section uses a Deployment as an example to describe how to edit the YAML file.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the workload you will edit, choose Operation > More > Edit YAML. In the Edit YAML window, edit the YAML file of the current workload.
  3. Click Edit and then OK to save the changes.
  4. (Optional) In the Edit YAML window, click Download to download the YAML file.
-
-

Scaling a Workload

A workload can be automatically resized according to custom scaling policies, freeing you from the efforts to manually adjust the amount of resources for fluctuating service traffic. This saves you big on both resources and labors. This section uses a Deployment as an example to describe how to scale a workload.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the workload for which you will add a scaling policy, choose Operation > More > Scaling.
  3. On the Scaling tab page, add or edit scaling policies. Scaling policies are classified as auto and manual scaling policies.

    For details, see Scaling a Workload.

    -

-
-

Monitoring a Workload

You can view the CPU and memory usage of Deployments, DaemonSets, and pods on the CCE console to determine the resource specifications you may need. This section uses a Deployment as an example to describe how to monitor a workload.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. Click the name of the Deployment to be monitored. On the displayed Deployment details page, click the Monitoring tab to view CPU usage and memory usage of the Deployment.
  3. Click the Pods tab. Click next to a pod to be monitored and click Monitoring.
  4. Check CPU usage and memory usage of the pod.

    • CPU usage

      The horizontal axis indicates time while the vertical axis indicates the CPU usage. The green line indicates the CPU usage while the red line indicates the CPU usage limit.

      -

      It takes some time to calculate CPU usage. Therefore, when CPU and memory usage are displayed for the first time, CPU usage is displayed about one minute later than memory usage.

      -

      CPU and memory usage are displayed only for pods in the running state.

      -
      -
    • Memory usage

      The horizontal axis indicates time while the vertical axis indicates the memory usage. The green line indicates the memory usage while the red line indicates the memory usage limit.

      -

      Memory usage is displayed only for a running pod.

      -
      -
    -

-
-

Rolling Back a Workload (Available Only for Deployments)

CCE records the release history of all Deployments. You can roll back a Deployment to a specified version.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the Deployment you will roll back, choose Operation > More > Roll Back.
  3. In the Roll Back to This Version drop-down list, select the version to which you will roll back the Deployment. Then, click OK.
-
-

Pausing a Workload (Available Only for Deployments)

You can pause Deployments. After a Deployment is paused, the upgrade command can be successfully issued but will not be applied to the pods.

-

If you are performing a rolling upgrade, the rolling upgrade stops after the pause command is issued. In this case, the new and old pods coexist.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the Deployment you will pause, choose Operation > More > Pause.
  3. In the displayed Pause Workload dialog box, click OK.
  4. Click OK.

    Deployments in the paused state cannot be rolled back.

    -
    -

-
-

Resuming a Workload (Available Only for Deployments)

You can resume paused Deployments. After a Deployment is resumed, it can be upgraded or rolled back. Its pods will inherit the latest updates of the Deployment. If they are inconsistent, the pods are upgraded automatically according to the latest information of the Deployment.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the Deployment you will resume, choose Operation > More > Resume.
  3. In the displayed Resume Workload dialog box, click OK.
-
-

Managing Labels

Labels are key-value pairs and can be attached to workloads. Workload labels are often used for affinity and anti-affinity scheduling. You can add labels to multiple workloads or a specified workload.

-

You can manage the labels of Deployments, StatefulSets, and DaemonSets based on service requirements. This section uses Deployments as an example to describe how to manage labels.

-

In the following figure, three labels (release, env, and role) are defined for workload APP 1, APP 2, and APP 3. The values of these labels vary with workload.

-
  • Label of APP 1: [release:alpha;env:development;role:frontend]
  • Label of APP 2: [release:beta;env:testing;role:frontend]
  • Label of APP 3: [release:alpha;env:production;role:backend]
-

If you set key to role and value to frontend when using workload scheduling or another function, APP 1 and APP 2 will be selected.

-
Figure 1 Label example
-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. Click the name of the workload whose labels will be managed.
  3. On the workload details page, click Manage Label. In the displayed dialog box, click Add Label. Enter the label key and value, and click OK.

    A key-value pair must contain 1 to 63 characters starting and ending with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed.

    -
    -

-
-

Deleting a Workload/Job

You can delete a workload or job that is no longer needed. Deleted workloads or jobs cannot be recovered. Exercise caution when you perform this operation. This section uses a Deployment as an example to describe how to delete a workload.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the workload you will delete, choose Operation > More > Delete.

    Read the system prompts carefully. A workload cannot be recovered after it is deleted. Exercise caution when performing this operation.

    -

  3. Click Yes.

    • If the node where the pod is located is unavailable or shut down and the workload cannot be deleted, you can forcibly delete the pod from the pod list on the workload details page.
    • Ensure that the storage volumes to be deleted are not used by other workloads. If these volumes are imported or have snapshots, you can only unbind them.
    -
    -

-
-

Events

On the workload details page, click the Events or Pods tab to view the events, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time.

Event data will be retained for one hour and then automatically deleted.

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0008.html b/docs/cce/umn/cce_01_0008.html deleted file mode 100644 index 7f7433c8..00000000 --- a/docs/cce/umn/cce_01_0008.html +++ /dev/null @@ -1,177 +0,0 @@ - - -

Setting Container Startup Commands

-

Scenario

When creating a workload or job, you can use an image to specify the processes running in the container.

-

By default, the image runs the default command. To run a specific command or rewrite the default image value, you must perform the following settings:

-
  • Working directory: working directory of the command.

    If the working directory is not specified in the image or on the console, the default value is /.

    -
  • Command: command that controls the running of an image.
  • Args: parameters transferred to the running command.
-

After a container is started, do not modify configurations in the container. If configurations in the container are modified (for example, passwords, certificates, and environment variables of a containerized application are added to the container), the configurations will be lost after the container restarts and container services will become abnormal. An example scenario of container restart is pod rescheduling due to node anomalies.

-

Configurations must be imported to a container as arguments. Otherwise, configurations will be lost after the container restarts.

-
-
-

Commands and Arguments Used to Run a Container

A Docker image has metadata that stores image information. If lifecycle commands and arguments are not set, CCE runs the default commands and arguments, that is, Docker instructions ENTRYPOINT and CMD, provided during image creation.

-

If the commands and arguments used to run a container are set during application creation, the default commands ENTRYPOINT and CMD are overwritten during image build. The rules are as follows:

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Commands and parameters used to run a container

Image Entrypoint

-

Image CMD

-

Command to Run a Container

-

Args to Run a Container

-

Command Executed

-

[touch]

-

[/root/test]

-

Not set

-

Not set

-

[touch /root/test]

-

[touch]

-

[/root/test]

-

[mkdir]

-

Not set

-

[mkdir]

-

[touch]

-

[/root/test]

-

Not set

-

[/opt/test]

-

[touch /opt/test]

-

[touch]

-

[/root/test]

-

[mkdir]

-

[/opt/test]

-

[mkdir /opt/test]

-
-
-
-

Setting the Startup Command

  1. Log in to the CCE console. Expand Lifecycle when adding a container during workload or job creation.
  2. Enter the running command and parameters, as shown in Table 2.

    • The current startup command is provided as a string array and corresponds to the Entrypoint startup command of Docker. The format is as follows: ["executable", "param1", "param2",..]. For details about how to start Kubernetes containers, click here.
    • The lifecycle of a container is the same as that of the startup command. That is, the lifecycle of the container ends after the command is executed.
    -
    - -
    - - - - - - - - - - -
    Table 2 Container startup command

    Configuration Item

    -

    Procedure

    -

    Command

    -

    Enter an executable command, for example, /run/server.

    -

    If there are multiple commands, separate them with spaces. If the command contains a space, you need to add a quotation mark ("").

    -
    NOTE:

    If there are multiple commands, you are advised to run the /bin/sh or other shell commands. Other commands are used as parameters.

    -
    -

    Args

    -

    Enter the argument that controls the container running command, for example, --port=8080.

    -

    If there are multiple arguments, separate them in different lines.

    -
    -
    -

    The following uses Nginx as an example to describe three typical application scenarios of the container startup command:

    -
    Example code:
    nginx -c nginx.conf
    -
    -
    • Scenario 1: Both the command and arguments are set.
      Figure 1 Setting the startup command and parameters
      -

      Example YAML file:

      -
                command:
      -            - nginx
      -          args:
      -            - '-c'
      -            - nginx.conf
      -
    • Scenario 2: Only the command is set.
      Figure 2 Setting the startup command
      -

      A command must be enclosed in double quotes. If no double quotes are added, the command is split into multiple commands based on space character.

      -
      -

      Example YAML file:

      -
                command:
      -            - nginx -c nginx.conf
      -          args:
      -
    • Scenario 3: Only arguments are set.
      Figure 3 Setting startup arguments
      -

      If the container startup command is not added to the system path, run the /bin/sh command to execute the container startup command. The container startup command must be enclosed in double quotes.

      -
      -

      Example YAML file:

      -
                command:
      -            - /bin/sh
      -          args:
      -            - '-c'
      -            - '"nginx -c nginx.conf"'
      -
    -

  3. Check or modify the YAML file.

    • When creating a workload, in the Configure Advanced Settings step, click YAML on the right.
      Figure 4 Checking or editing a YAML file
      -
    • After the workload is created, go to the workload list. In the same row as the workload, choose More > Edit YAML.
    • After the workload is created, go to the workload details page. On the displayed page, click Edit YAML in the upper right corner.
    -

-
-

Example YAML for Setting Container Startup Commands

This section uses Nginx as an example to describe how to set container startup commands using kubectl.

-

Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl. See Using kubectl to create a Deployment or Using kubectl to create a StatefulSet. For more details on how to set container startup commands, see official Kubernetes documentation.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        command:
-        - sleep
-        - '3600'                        #Startup command
-        imagePullPolicy: Always
-        lifecycle:
-          postStart:
-            exec:
-              command:
-              - /bin/bash
-              - install.sh                  #Post-start command
-          preStop:
-            exec:
-              command:
-              - /bin/bash
-              - uninstall.sh                 #Pre-stop command
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0009.html b/docs/cce/umn/cce_01_0009.html deleted file mode 100644 index 7905014f..00000000 --- a/docs/cce/umn/cce_01_0009.html +++ /dev/null @@ -1,37 +0,0 @@ - - -

Using a Third-Party Image

-

Scenario

CCE allows you to create workloads using images pulled from third-party image repositories.

-

Generally, a third-party image repository can be accessed only after authentication (using your account and password). CCE uses the secret-based authentication to pull images. Therefore, you need to create a secret for an image repository before pulling images from the repository.

-
-

Prerequisites

The node where the workload is running is accessible from public networks. You can access public networks through LoadBalancer.

-
-

Using the Console

  1. Create a secret for accessing a third-party image repository.

    In the navigation pane, choose Configuration Center > Secret, and click Create Secret. Type must be set to kubernetes.io/dockerconfigjson. For details, see Creating a Secret.

    -

    Enter the user name and password used to access the third-party image repository.

    -

  2. Create a workload. For details, see Creating a Deployment or Creating a StatefulSet. If the workload will be created from a third-party image, set the image parameters as follows:

    1. Set Secret Authentication to Yes.
    2. Select the secret created in step 1.
    3. Enter the image address.
    -

  3. Click Create.
-
-

Using kubectl

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create a secret of the dockercfg type using kubectl.

    kubectl create secret docker-registry myregistrykey --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
    -

    In the preceding commands, myregistrykey indicates the secret name, and other parameters are described as follows:

    -
    • DOCKER_REGISTRY_SERVER: address of a third-party image repository, for example, www.3rdregistry.com or 10.10.10.10:443
    • DOCKER_USER: account used for logging in to a third-party image repository
    • DOCKER_PASSWORD: password used for logging in to a third-party image repository
    • DOCKER_EMAIL: email of a third-party image repository
    -

  3. Use a third-party image to create a workload.

    A dockecfg secret is used for authentication when you obtain a private image. The following is an example of using the myregistrykey for authentication.
    apiVersion: v1
    -kind: Pod
    -metadata:
    -  name: foo
    -  namespace: default
    -spec:
    -  containers:
    -    - name: foo
    -      image: www.3rdregistry.com/janedoe/awesomeapp:v1
    -  imagePullSecrets:
    -    - name: myregistrykey              #Use the created secret.
    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0010.html b/docs/cce/umn/cce_01_0010.html deleted file mode 100644 index 94e68226..00000000 --- a/docs/cce/umn/cce_01_0010.html +++ /dev/null @@ -1,38 +0,0 @@ - - -

Overview

-

You can learn about a cluster network from the following two aspects:

-
  • What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are running on the nodes. Nodes and containers need to communicate with each other. For details about the cluster network types and their functions, see Cluster Network Structure.
  • How is pod access is implemented in a cluster? Accessing a pod or container is a process of accessing services of a user. Kubernetes provides Service and Ingress to address pod access issues. This section summarizes common network access scenarios. You can select the proper scenario based on site requirements. For details about the network access scenarios, see Access Scenarios.
-

Cluster Network Structure

All nodes in the cluster are located in a VPC and use the VPC network. The container network is managed by dedicated network add-ons.

-

-
  • Node Network

    A node network assigns IP addresses to hosts (nodes in the figure above) in a cluster. You need to select a VPC subnet as the node network of the CCE cluster. The number of available IP addresses in a subnet determines the maximum number of nodes (including master nodes and worker nodes) that can be created in a cluster. This quantity is also affected by the container network. For details, see the container network model.

    -
  • Container Network

    A container network assigns IP addresses to containers in a cluster. CCE inherits the IP-Per-Pod-Per-Network network model of Kubernetes. That is, each pod has an independent IP address on a network plane and all containers in a pod share the same network namespace. All pods in a cluster exist in a directly connected flat network. They can access each other through their IP addresses without using NAT. Kubernetes only provides a network mechanism for pods, but does not directly configure pod networks. The configuration of pod networks is implemented by specific container network add-ons. The container network add-ons are responsible for configuring networks for pods and managing container IP addresses.

    -

    Currently, CCE supports the following container network models:

    -
    • Container tunnel network: The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch.
    • VPC network: The VPC network uses VPC routing to integrate with the underlying network. This network model is applicable to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network. Each node is assigned a CIDR block of a fixed size. This networking model is free from tunnel encapsulation overhead and outperforms the container tunnel network model. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in the cluster can be directly accessed from outside the cluster.
    • Developed by CCE, Cloud Native Network 2.0 deeply integrates Elastic Network Interfaces (ENIs) and Sub Network Interfaces (sub-ENIs) of VPC. Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and elastic IPs (EIPs) are bound to deliver high performance.
    -

    The performance, networking scale, and application scenarios of a container network vary according to the container network model. For details about the functions and features of different container network models, see Overview.

    -
  • Service Network

    Service is also a Kubernetes object. Each Service has a fixed IP address. When creating a cluster on CCE, you can specify the Service CIDR block. The Service CIDR block cannot overlap with the node or container CIDR block. The Service CIDR block can be used only within a cluster.

    -
-
-

Service

A Service is used for pod access. With a fixed IP address, a Service forwards access traffic to pods and performs load balancing for these pods.

-
Figure 1 Accessing pods through a Service
-

You can configure the following types of Services:

-
  • ClusterIP: used to make the Service only reachable from within a cluster.
  • NodePort: used for access from outside a cluster. A NodePort Service is accessed through the port on the node.
  • LoadBalancer: used for access from outside a cluster. It is an extension of NodePort, to which a load balancer routes, and external systems only need to access the load balancer.
  • ENI LoadBalancer: used for access from outside the cluster. An ENI LoadBalancer Service directs traffic from a load balancer at backend pods, reducing the latency and avoiding performance loss for containerized applications.
-

For details about the Service, see Overview.

-
-

Ingress

Services forward requests using layer-4 TCP and UDP protocols. Ingresses forward requests using layer-7 HTTP and HTTPS protocols. Domain names and paths can be used to achieve finer granularities.

-
Figure 2 Ingress and Service
-

For details about the ingress, see Overview.

-
-

Access Scenarios

Workload access scenarios can be categorized as follows:

-
  • Intra-cluster access: A ClusterIP Service is used for workloads in the same cluster to access each other.
  • Access from outside a cluster: A Service (NodePort or LoadBalancer type) or an ingress is recommended for a workload outside a cluster to access workloads in the cluster.
    • Access through the internet requires an EIP to be bound the node or load balancer.
    • Access through an intranet uses only the intranet IP address of the node or load balancer. If workloads are located in different VPCs, a peering connection is required to enable communication between different VPCs.
    -
  • External access initiated by a workload:
    • Accessing an intranet: The workload accesses the intranet address, but the implementation method varies depending on container network models. Ensure that the peer security group allows the access requests from the container CIDR block.
    • Accessing a public network: You need to assign an EIP to the node where the workload runs, or configure SNAT rules through the NAT gateway.
    -
-
Figure 3 Network access diagram
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0011.html b/docs/cce/umn/cce_01_0011.html deleted file mode 100644 index e5d9c508..00000000 --- a/docs/cce/umn/cce_01_0011.html +++ /dev/null @@ -1,124 +0,0 @@ - - -

Intra-Cluster Access (ClusterIP)

-

Scenario

ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.

-

The cluster-internal domain name format is <Service name>.<Namespace of the workload>.svc.cluster.local:<Port>, for example, nginx.default.svc.cluster.local:80.

-

Figure 1 shows the mapping relationships between access channels, container ports, and access ports.

-
Figure 1 Intra-cluster access (ClusterIP)
-
-

Adding a Service When Creating a Workload

You can set the access type (Service) when creating a workload on the CCE console.

-
  1. In the Set Application Access step of Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet, click Add Service and set the following parameters:

    • Access Type: Select ClusterIP.
    • Service Name: Specify a Service name, which can be the same as the workload name.
    • Port Settings
      • Protocol: protocol used by the Service.
      • Container Port: port on which the workload listens. The Nginx application listens on port 80.
      • Access Port: a port mapped to the container port at the cluster-internal IP address. The workload can be accessed at <cluster-internal IP address>:<access port>. The port number range is 1–65535.
      -
    -

  2. After the configuration, click OK and then Next: Configure Advanced Settings. On the page displayed, click Create.
  3. Click View Deployment Details or View StatefulSet Details. On the Services tab page, obtain the access address, for example, 10.247.74.100:8080.
-
-

Adding a Service After Creating a Workload

You can set the Service after creating a workload. This has no impact on the workload status and takes effect immediately. The procedure is as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments. On the workload list, click the name of the workload for which you will create a Service.
  2. On the Services tab page, click Add Service.
  3. On the Create Service page, select ClusterIP from the Access Type drop-down list.
  4. Set intra-cluster access parameters.

    • Service Name: Service name, which can be the same as the workload name.
    • Cluster Name: name of the cluster where the workload runs. The value is inherited from the workload creation page and cannot be changed.
    • Namespace: namespace where the workload is located. The value is inherited from the workload creation page and cannot be changed.
    • Workload: workload for which you want to add a Service.
    • Port Settings
      • Protocol: protocol used by the Service.
      • Container Port: port on which the workload listens. The Nginx application listens on port 80.
      • Access Port: port mapped to the container port at the cluster-internal IP address. The workload can be accessed at <cluster-internal IP address>:<access port>. The port number range is 1–65535.
      -
    -

  5. Click Create. The ClusterIP Service will be added for the workload.
-
-

Setting the Access Type Using kubectl

You can run kubectl commands to set the access type (Service). This section uses a Nginx workload as an example to describe how to implement intra-cluster access using kubectl.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the nginx-deployment.yaml and nginx-clusterip-svc.yaml files.

    The file names are user-defined. nginx-deployment.yaml and nginx-clusterip-svc.yaml are merely example file names.

    -
    vi nginx-deployment.yaml
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: nginx
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      containers:
    -      - image: nginx:latest
    -        name: nginx
    -      imagePullSecrets:
    -      - name: default-secret
    -
    -
    vi nginx-clusterip-svc.yaml
    apiVersion: v1
    -kind: Service
    -metadata:
    -  labels:
    -    app: nginx
    -  name: nginx-clusterip
    -spec:
    -  ports:
    -  - name: service0
    -    port: 8080                # Port for accessing a Service.
    -    protocol: TCP             # Protocol used for accessing a Service. The value can be TCP or UDP.
    -    targetPort: 80            # Port used by a Service to access the target container. This port is closely related to the applications running in a container. In this example, the Nginx image uses port 80 by default.
    -  selector:                   # Label selector. A Service selects a pod based on the label and forwards the requests for accessing the Service to the pod. In this example, select the pod with the app:nginx label.
    -    app: nginx
    -  type: ClusterIP             # Type of a Service. ClusterIP indicates that a Service is only reachable from within the cluster.
    -
    -

  3. Create a workload.

    kubectl create -f nginx-deployment.yaml

    -

    If information similar to the following is displayed, the workload has been created.

    -
    deployment "nginx" created
    -

    kubectl get po

    -

    If information similar to the following is displayed, the workload is running.

    -
    NAME                     READY     STATUS             RESTARTS   AGE
    -nginx-2601814895-znhbr   1/1       Running            0          15s
    -

  4. Create a Service.

    kubectl create -f nginx-clusterip-svc.yaml

    -

    If information similar to the following is displayed, the Service is being created.

    -
    service "nginx-clusterip" created
    -

    kubectl get svc

    -

    If information similar to the following is displayed, the Service has been created, and a cluster-internal IP address has been assigned to the Service.

    -
    # kubectl get svc
    -NAME              TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
    -kubernetes        ClusterIP   10.247.0.1     <none>        443/TCP    4d6h
    -nginx-clusterip   ClusterIP   10.247.74.52   <none>        8080/TCP   14m
    -

  5. Access a Service.

    A Service can be accessed from containers or nodes in a cluster.

    -

    Create a pod, access the pod, and run the curl command to access IP address:Port or the domain name of the Service, as shown in the following figure.

    -

    The domain name suffix can be omitted. In the same namespace, you can directly use nginx-clusterip:8080 for access. In other namespaces, you can use nginx-clusterip.default:8080 for access.

    -
    # kubectl run -i --tty --image nginx:alpine test --rm /bin/sh
    -If you don't see a command prompt, try pressing enter.
    -/ # curl 10.247.74.52:8080
    -<!DOCTYPE html>
    -<html>
    -<head>
    -<title>Welcome to nginx!</title>
    -<style>
    -    body {
    -        width: 35em;
    -        margin: 0 auto;
    -        font-family: Tahoma, Verdana, Arial, sans-serif;
    -    }
    -</style>
    -</head>
    -<body>
    -<h1>Welcome to nginx!</h1>
    -<p>If you see this page, the nginx web server is successfully installed and
    -working. Further configuration is required.</p>
    -
    -<p>For online documentation and support please refer to
    -<a href="http://nginx.org/">nginx.org</a>.<br/>
    -Commercial support is available at
    -<a href="http://nginx.com/">nginx.com</a>.</p>
    -
    -<p><em>Thank you for using nginx.</em></p>
    -</body>
    -</html>
    -/ # curl nginx-clusterip.default.svc.cluster.local:8080
    -...
    -<h1>Welcome to nginx!</h1>
    -...
    -/ # curl nginx-clusterip.default:8080
    -...
    -<h1>Welcome to nginx!</h1>
    -...
    -/ # curl nginx-clusterip:8080
    -...
    -<h1>Welcome to nginx!</h1>
    -...
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0012.html b/docs/cce/umn/cce_01_0012.html deleted file mode 100644 index e8127065..00000000 --- a/docs/cce/umn/cce_01_0012.html +++ /dev/null @@ -1,127 +0,0 @@ - - -

Creating a Node Pool

-

Scenario

This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.

-
-

Notes and Constraints

  • For details about how to add a node pool to a CCE Turbo cluster, see Procedure - for CCE Turbo Clusters.
  • The autoscaler add-on needs to be installed for node auto scaling. For details about the add-on installation and parameter configuration, see autoscaler.
-
-

Procedure

To create a node pool in a cluster, perform the following steps:

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the page, click Create Node Pool.
  3. Set node pool parameters.

    • Current Region: geographic location of the node pool to be created.

      To minimize network latency and resource access time, select the region nearest to your node pool. Cloud resources are region-specific and cannot be used across regions over internal networks.

      -
    • Name: name of the new node pool. By default, the name is in the format of Cluster name-nodepool-Random number. You can also use a custom name.
    • Node Type: Currently, only VM nodes are supported.
    • Nodes: number of nodes to be created for this node pool. The value cannot exceed the maximum number of nodes that can be managed by the cluster.
    • Auto Scaling:
      • By default, this parameter is disabled.
      • After you enable autoscaler by clicking , nodes in the node pool will be automatically created or deleted based on cluster loads.
        • Maximum Nodes and Minimum Nodes: You can set the maximum and minimum number of nodes to ensure that the number of nodes to be scaled is within a proper range.
        • Priority: Set this parameter based on service requirements. A larger value indicates a higher priority. For example, if this parameter is set to 1 and 4 respectively for node pools A and B, B has a higher priority than A. If the priorities of multiple node pools are set to the same value, for example, 2, the node pools are not prioritized and the system performs scaling based on the minimum resource waste principle.

          CCE selects a node pool for auto scaling based on the following policies:

          -
          1. CCE uses algorithms to determine whether a node pool meets the conditions to allow scheduling of a pod in pending state, including whether the node resources are greater than requested by the pod, and whether the nodeSelect, nodeAffinity, and taints meet the conditions. In addition, the node pools that fail to be scaled (due to insufficient resources or other reasons) and are still in the 15-minute cool-down interval are filtered.
          2. If multiple node pools meet the scaling requirements, the system checks the priority of each node pool and selects the node pool with the highest priority for scaling. The value ranges from 0 to 100 and the default priority is 0. The value 100 indicates the highest priority, and the value 0 indicates the lowest priority.
          3. If multiple node pools have the same priority or no priority is configured for them, the system selects the node pool that will consume the least resources based on the configured VM specification.
          4. If the VM specifications of multiple node pools are the same but the node pools are deployed in different AZs, the system randomly selects a node pool to trigger scaling.
          -
          -
        • Scale-In Cooling Interval: Set this parameter in the unit of minute or hour. This parameter indicates the interval between the previous scale-out action and the next scale-in action.

          Scale-in cooling intervals can be configured in the node pool settings and the autoscaler add-on settings.

          -

          Scale-in cooling interval configured in a node pool

          -

          This interval indicates the period during which nodes added to the current node pool after a scale-out operation cannot be deleted. This interval takes effect at the node pool level.

          -

          Scale-in cooling interval configured in the autoscaler add-on

          -

          The interval after a scale-out indicates the period during which the entire cluster cannot be scaled in after the autoscaler add-on triggers scale-out (due to the unschedulable pods, metrics, and scaling policies). This interval takes effect at the cluster level.

          -

          The interval after a node is deleted indicates the period during which the cluster cannot be scaled in after the autoscaler add-on triggers scale-in. This interval takes effect at the cluster level.

          -

          The interval after a failed scale-in indicates the period during which the cluster cannot be scaled in after the autoscaler add-on triggers scale-in. This interval takes effect at the cluster level.

          -
        -

        You are advised not to store important data on nodes in a node pool because after auto scaling, data cannot be restored as nodes may be deleted.

        -
        -

        If Autoscaler is enabled, install the autoscaler add-on to use the auto scaling feature.

        -
      -
    • AZ: An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network.

      Set an AZ based on your requirements. After a node pool is created, AZ cannot be modified. Exercise caution when selecting an AZ for the node pool.

      -

      To enhance workload reliability, you are advised to select Random AZ, allowing nodes to be randomly and evenly distributed among different AZs.

      -

      In a CCE Turbo cluster, an AZ is randomly selected from available AZs, and all nodes are created in the selected AZ.

      -
      -
    • Specifications: Select node specifications that best fit your business needs.
      • General-purpose: provides a balance of computing, memory, and network resources. It is a good choice for many applications, such as web servers, workload development, workload testing, and small-scale databases.
      • Memory-optimized: provides higher memory capacity than general-purpose nodes and is suitable for relational databases, NoSQL, and other workloads that are both memory-intensive and data-intensive.
      • GPU-accelerated: provides powerful floating-point computing and is suitable for real-time, highly concurrent massive computing. Graphical processing units (GPUs) of P series are suitable for deep learning, scientific computing, and CAE. GPUs of G series are suitable for 3D animation rendering and CAD. GPU-accelerated nodes can be created only in clusters of v1.11 or later. GPU-accelerated nodes are available only in certain regions.
      • General computing-plus: provides stable performance and exclusive resources to enterprise-class workloads with high and stable computing performance.
      • Disk-intensive: supports local disk storage and provides high network performance. It is designed for workloads requiring high throughput and data switching, such as big data workloads.
      -

      To ensure node stability, CCE automatically reserves some resources to run necessary system components. For details, see Formula for Calculating the Reserved Resources of a Node.

      -
    • OS: Select an OS for the node to be created.

      Reinstalling the OS or modifying OS configurations could make the node unavailable. Exercise caution when performing these operations.

      -
      -
    • VPC: The value is the same as that of the cluster and cannot be changed.

      This parameter is displayed only for clusters of v1.13.10-r0 and later.

      -
    • Subnet: A subnet improves network security by providing exclusive network resources that are isolated from other networks.

      You can select any subnet in the cluster VPC. Cluster nodes can belong to different subnets.

      -

      Ensure that the DNS server in the subnet can resolve the OBS domain name. Otherwise, nodes cannot be created.

      -

      This parameter is displayed only for clusters of v1.13.10-r0 and later.

      -
    • System Disk: Set the system disk space of the worker node. The value ranges from 40GB to 1024 GB. The default value is 40GB.

      By default, system disks support Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD) EVS disks.

      -
      Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function. This function is available only in certain regions.
      • Encryption is not selected by default.
      • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
      -
      -
    • Data Disk: Set the data disk space of the worker node. The value ranges from 100 GB to 32,768 GB. The default value is 100 GB. The EVS disk types provided for the data disk are the same as those for the system disk.

      If the data disk is uninstalled or damaged, the Docker service becomes abnormal and the node becomes unavailable. You are advised not to delete the data disk.

      -
      -
      • LVM: If this option is selected, CCE data disks are managed by the Logical Volume Manager (LVM). On this condition, you can adjust the disk space allocation for different resources. This option is selected for the first disk by default and cannot be unselected. You can choose to enable or disable LVM for new data disks.
        • This option is selected by default, indicating that LVM management is enabled.
        • You can deselect the check box to disable LVM management.
          • Disk space of the data disks managed by LVM will be allocated according to the ratio you set.
          • When creating a node in a cluster of v1.13.10 or later, if LVM is not selected for a data disk, follow instructions in Adding a Second Data Disk to a Node in a CCE Cluster to fill in the pre-installation script and format the data disk. Otherwise, the data disk will still be managed by LVM.
          • When creating a node in a cluster earlier than v1.13.10, you must format the data disks that are not managed by LVM. Otherwise, either these data disks or the first data disk will be managed by LVM.
          -
          -
        -
      • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function.
        This function is supported only for clusters of v1.13.10 or later in certain regions, and is not displayed for clusters of v1.13.10 or earlier.
        • Encryption is not selected by default.
        • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
        -
        -
      • Add Data Disk: Currently, a maximum of two data disks can be attached to a node. After the node is created, you can go to the ECS console to attach more data disks. This function is available only to clusters of certain versions.
      • Data disk space allocation: Click to specify the resource ratio for Kubernetes Space and User Space. Disk space of the data disks managed by LVM will be allocated according to the ratio you set. This function is available only to clusters of certain versions.
        • Kubernetes Space: You can specify the ratio of the data disk space for storing Docker and kubelet resources. Docker resources include the Docker working directory, Docker images, and image metadata. kubelet resources include pod configuration files, secrets, and emptyDirs.

          The Docker space cannot be less than 10%, and the space size cannot be less than 60 GB. The kubelet space cannot be less than 10%.

          -

          The Docker space size is determined by your service requirements. For details, see Data Disk Space Allocation.

          -
        • User Space: You can set the ratio of the disk space that is not allocated to Kubernetes resources and the path to which the user space is mounted.

          Note that the mount path cannot be /, /home/paas, /var/paas, /var/lib, /var/script, /var/log, /mnt/paas, or /opt/cloud, and cannot conflict with the system directories (such as bin, lib, home, root, boot, dev, etc, lost+found, mnt, proc, sbin, srv, tmp, var, media, opt, selinux, sys, and usr). Otherwise, the system or node installation will fail.

          -
          -
        -
      -
      If the cluster version is v1.13.10-r0 or later and the node specification is Disk-intensive, the following options are displayed for data disks:
      • EVS: Parameters are the same as those when the node type is not Disk-intensive. For details, see Data Disk above.
      • Local disk: Local disks may break down and do not ensure data reliability. It is recommended that you store service data in EVS disks, which are more reliable than local disks.
        Local disk parameters are as follows:
        • Disk Mode: If the node type is disk-intensive, the supported disk mode is HDD.
        • Read/Write Mode: When multiple local disks exist, you can set the read/write mode. The serial and sequential modes are supported. Sequential indicates that data is read and written in linear mode. When a disk is used up, the next disk is used. Serial indicates that data is read and written in striping mode, allowing multiple local disks to be read and written at the same time.
        • Kubernetes Space: You can specify the ratio of the data disk space for storing Docker and kubelet resources. Docker resources include the Docker working directory, Docker images, and image metadata. kubelet resources include pod configuration files, secrets, and emptyDirs.
        • User Space: You can set the ratio of the disk space that is not allocated to Kubernetes resources and the path to which the user space is mounted.
        -
        -
      -
      • The ratio of disk space allocated to the Kubernetes space and user space must be equal to 100% in total. You can click to refresh the data after you have modified the ratio.
      • By default, disks run in the direct-lvm mode. If data disks are removed, the loop-lvm mode will be used and this will impair system stability.
      -
      -
      -
    • Login Mode:
      • Key pair: Select the key pair used to log in to the node. You can select a shared key.

        A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

        -

        When creating a node using a key pair, IAM users can select only the key pairs created by their own, regardless of whether these users are in the same group. For example, user B cannot use the key pair created by user A to create a node, and the key pair is not displayed in the drop-down list on the CCE console.

        -
        -
      -
    -

  4. Advanced ECS Settings (optional): Click to show advanced ECS settings.

    • ECS Group: An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.
      • Anti-affinity: ECSs in an ECS group are deployed on different physical hosts to improve service reliability.
      -

      Select an existing ECS group, or click Create ECS Group to create one. After the ECS group is created, click the refresh button.

      -
    • Resource Tags: By adding tags to resources, you can classify resources.

      You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use predefined tags to improve tag creation and migration efficiency.

      -

      CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag. A maximum of 5 tags can be added.

      -
    • Agency: An agency is created by a tenant administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources. To authorize an ECS or BMS to call cloud services, select Cloud service as the agency type, click Select, and then select ECS BMS.
    • Pre-installation Script: Enter a maximum of 1,000 characters.

      The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed. The script is usually used to format data disks.

      -
    • Post-installation Script: Enter a maximum of 1,000 characters.

      The script will be executed after Kubernetes software is installed and will not affect the installation. The script is usually used to modify Docker parameters.

      -
    • Subnet IP Address: Select Automatically assign IP address (recommended) or Manually assigning IP addresses.
    -

  5. Advanced Kubernetes Settings (optional): Click to show advanced Kubernetes settings.

    • Max Pods: maximum number of pods that can be created on a node, including the system's default pods. If the cluster uses the VPC network model, the maximum value is determined by the number of IP addresses that can be allocated to containers on each node.

      This limit prevents the node from being overloaded by managing too many pods. For details, see Maximum Number of Pods That Can Be Created on a Node.

      -
    • Taints: This field is left blank by default. Taints allow nodes to repel a set of pods. You can add a maximum of 10 taints for each node. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
      -
      • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.
      • After a node pool is created, you can click Edit to modify its configuration. The modification will be synchronized to all nodes in the node pool.
      -
      -
    • K8S Labels: Labels are key/value pairs that are attached to objects, such as pods. Labels are used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system. For more information, see Labels and Selectors.
    • Maximum Data Space per Container: maximum data space that can be used by a container. The value ranges from 10 GB to 500 GB. If the value of this field is larger than the data disk space allocated to Docker resources, the latter will override the value specified here. Typically, 90% of the data disk space is allocated to Docker resources. This parameter is displayed only for clusters of v1.13.10-r0 and later.
    -

  6. Click Next: Confirm to confirm the configured service parameters and specifications.
  7. Click Submit.

    It takes about 6 to 10 minutes to create a node pool. You can click Back to Node Pool List to perform other operations on the node pool or click Go to Node Pool Events to view the node pool details. If the status of the node pool is Normal, the node pool is successfully created.

    -

-
-

Procedure - for CCE Turbo Clusters

  1. Log in to the CCE console.
  2. Click the cluster name to open its details page, choose Nodes on the left, and click the Node Pool tab on the right.
  3. In the upper right corner of the page, click Create Node Pool.
  4. Configure computing parameters.

    • AZ: An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network.

      Set an AZ based on your requirements. After a node pool is created, AZ cannot be modified. Exercise caution when selecting an AZ for the node pool.

      -

      To enhance workload reliability, you are advised to select Random AZ, allowing nodes to be randomly and evenly distributed among different AZs.

      -
    • Container Runtime: runc or kata.

      For details about common containers and secure containers, see Secure Containers and Common Containers.

      -
    • Specifications: Select node specifications that best fit your business needs.
      • General-purpose: provides a balance of computing, memory, and network resources. It is a good choice for many applications, such as web servers, workload development, workload testing, and small-scale databases.
      • Memory-optimized: provides higher memory capacity than general-purpose nodes and is suitable for relational databases, NoSQL, and other workloads that are both memory-intensive and data-intensive.
      • GPU-accelerated: provides powerful floating-point computing and is suitable for real-time, highly concurrent massive computing. Graphical processing units (GPUs) of P series are suitable for deep learning, scientific computing, and CAE. GPUs of G series are suitable for 3D animation rendering and CAD. GPU-accelerated nodes can be created only in clusters of v1.11 or later. GPU-accelerated nodes are available only in certain regions.
      • General computing-plus: provides stable performance and exclusive resources to enterprise-class workloads with high and stable computing performance.
      • Disk-intensive: supports local disk storage and provides high network performance. It is designed for workloads requiring high throughput and data switching, such as big data workloads.
      -

      To ensure node stability, CCE automatically reserves some resources to run necessary system components. For details, see Formula for Calculating the Reserved Resources of a Node.

      -
    • OS: Select an OS for the node to be created. In certain regions, only OSs are displayed and options Public image and Private image are unavailable.
      • Public image: Select an OS for the node.
      • Private image (OBT): If no private image is available, click Creating a Private Image to create one. This function is available only for clusters of v1.15 or later.
      -

      Reinstalling the OS or modifying OS configurations could make the node unavailable. Exercise caution when performing these operations.

      -
      -
    • Login Mode:
      • Key pair: Select the key pair used to log in to the node. You can select a shared key.

        A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

        -

        When creating a node using a key pair, IAM users can select only the key pairs created by their own, regardless of whether these users are in the same group. For example, user B cannot use the key pair created by user A to create a node, and the key pair is not displayed in the drop-down list on the CCE console.

        -
        -
      -
    -

  5. Configure storage parameters.

    • System Disk: Set the system disk space of the worker node. The value ranges from 40GB to 1024 GB. The default value is 50 GB.

      By default, system disks support Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD) EVS disks.

      -
    • Data Disk: Set the data disk space of the worker node. The value ranges from 100 GB to 32,768 GB. The default value is 100 GB. The data disk space size is determined by your service requirements. For details, see Data Disk Space Allocation.

      If the cluster version is v1.13.10-r0 or later and the node type is Disk-intensive, data disks can be EVS disks or local disks.

      -

      If the data disk is uninstalled or damaged, the Docker service becomes abnormal and the node becomes unavailable. You are advised not to delete the data disk.

      -
      -
      • Data disk space allocation: Click Expand and select Allocate Disk Space to customize the data disk space usage.

        You can customize the resource proportion for the container runtime and kubelet in the data disk. By default, 90% of the space is allocated to containers, and the remaining space is allocated to the kubelet component.

        -

        You can also define the maximum space that can be occupied by a single container. The default value is 10 GB.

        -
      • Adding data disks: The node must have at least one data disk, and data disks can be added. Click Add Data Disk. Click Expand to attach the new data disk to the specified directory.

        Note that the mount path cannot be /, /home/paas, /var/paas, /var/lib, /var/script, /var/log, /mnt/paas, or /opt/cloud, and cannot conflict with the system directories (such as bin, lib, home, root, boot, dev, etc, lost+found, mnt, proc, sbin, srv, tmp, var, media, opt, selinux, sys, and usr). Otherwise, the system or node installation will fail.

        -
        -
      • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function.
        • Encryption is not selected by default.
        • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
        -
      -
      -
    -

  6. Configure networking parameters.

    • VPC: The value is the same as that of the cluster and cannot be changed.

      This parameter is displayed only for clusters of v1.13.10-r0 and later.

      -
    • Subnet: A subnet improves network security by providing exclusive network resources that are isolated from other networks.

      You can select any subnet in the cluster VPC. Cluster nodes can belong to different subnets.

      -

      Ensure that the DNS server in the subnet can resolve the OBS domain name. Otherwise, nodes cannot be created.

      -

      This parameter is displayed only for clusters of v1.13.10-r0 and later.

      -
    -

  7. Configure advanced settings.

    • Kubernetes Label: Kubernetes provides labels for you to run kubectl commands to filter node resources by label.
    • Resource Tags: Resource tags can be added to classify resources.

      You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use these tags to improve tagging and resource migration efficiency.

      -

      CCE will automatically create the "CCE-Dynamic-Provisioning-Node=Node ID" tag. A maximum of 5 tags can be added.

      -
    • Taints: Taints allow a node to repel a set of pods and work with tolerations to ensure that pods are not scheduled onto inappropriate nodes. For details, see Configuring Node Scheduling (Tainting).
    • Max Pods: maximum number of pods that can be created on a node, including the system's default pods. If the cluster uses the VPC network model, the maximum value is determined by the number of IP addresses that can be allocated to containers on each node.

      This limit prevents the node from being overloaded by managing too many pods. For details, see Maximum Number of Pods That Can Be Created on a Node.

      -
    • Pre-installation Script: Enter a maximum of 1,000 characters.

      The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed. It is commonly used to format data disks.

      -
    • Post-installation Script: Enter a maximum of 1,000 characters.

      The script will be executed after Kubernetes software is installed and will not affect the installation. It is commonly used to modify Docker parameters.

      -
    • Maximum Data Space per Container: maximum data space that can be used by a container. The value ranges from 10 GB to 500 GB. If the value of this field is larger than the data disk space allocated to Docker resources, the latter will override the value specified here. Typically, 90% of the data disk space is allocated to Docker resources. This parameter is displayed only for clusters of v1.13.10-r0 and later.
    -

  8. Click Next: Confirm.
  9. Click Submit.
-
-

Viewing Node Pools in a Cluster

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the node pool list, select a cluster. All node pools in the cluster will be displayed. You can view the node type, node specifications, autoscaler status, and OS of each node pool.

    • A default node pool DefaultPool is automatically created in each cluster. The default node pool cannot be edited, deleted, or migrated. All nodes created during and after cluster creation are displayed in the default node pool.
    • To display a list of nodes in DefaultPool, click the Nodes subcard in the DefaultPool card.
    -
    -

  3. To filter node pools by autoscaler status, select the autoscaler status in the upper right corner of the node pool list.
  4. In the node pool list, click a node pool name. On the node pool details page, view the basic information, advanced ECS settings, advanced Kubernetes settings, and node list of the node pool.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0013.html b/docs/cce/umn/cce_01_0013.html deleted file mode 100644 index 05431118..00000000 --- a/docs/cce/umn/cce_01_0013.html +++ /dev/null @@ -1,34 +0,0 @@ - - -

Managing Pods

-

Scenario

A pod is the smallest and simplest unit in the Kubernetes object model that you create or deploy. A pod encapsulates an application's container (or, in some cases, multiple containers), storage resources, a unique network identity (IP address), as well as options that govern how the container(s) should run. A pod represents a single instance of an application in Kubernetes, which might consist of either a single container or a small number of containers that are tightly coupled and that share resources.

-

Pods in a Kubernetes cluster can be used in either of the following ways:

-
  • Pods that run a single container. The "one-container-per-pod" model is the most common Kubernetes use case. In this case, a pod functions as a wrapper around a single container, and Kubernetes manages the pods rather than the containers directly.
  • Pods that run multiple containers that need to work together. A pod might encapsulate an application composed of multiple co-located containers that are tightly coupled and need to share resources. The possible scenarios are as follows:
    • Content management systems, file and data loaders, local cache managers, etc;
    • Log and checkpoint backup, compression, rotation, snapshotting, etc;
    • Data change watchers, log tailers, logging and monitoring adapters, event publishers, etc;
    • Proxies, bridges, adapters, etc;
    • Controllers, managers, configurators, and updaters
    -
-

You can easily manage pods on CCE, such as editing YAML files and monitoring pods.

-
-

Editing a YAML File

To edit and download the YAML file of a pod online, do as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Pods.
  2. Click Edit YAML at the same row as the target pod. In the Edit YAML dialog box displayed, modify the YAML file of the pod.
  3. Click Edit and then OK to save the changes.

    If a pod is created by another workload, its YAML file cannot be modified individually on the Pods page.

    -
    -

  4. (Optional) In the Edit YAML window, click Download to download the YAML file.
-
-

Monitoring Pods

On the CCE console, you can view the CPU and memory usage, upstream and downstream rates, and disk read/write rates of a workload pod to determine the required resource specifications.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Pods.
  2. Click Monitoring at the same row as the target pod to view the CPU and memory usage, upstream and downstream rates, and disk read/write rates of the pod.

    You cannot view the monitoring data of a pod that is not running.

    -
    -

-
-

Deleting a Pod

If a pod is no longer needed, you can delete it. Deleted pods cannot be recovered. Exercise caution when performing this operation.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Pods.
  2. Click Delete at the same row as the pod to be deleted.

    Read the system prompts carefully. A pod cannot be restored after it is deleted. Exercise caution when performing this operation.

    -

  3. Click Yes to delete the pod.

    • If the node where the pod is located is unavailable or shut down and the workload cannot be deleted, you can forcibly delete the pod from the pod list on the workload details page.
    • Ensure that the storage volumes to be deleted are not used by other workloads. If these volumes are imported or have snapshots, you can only unbind them.
    -
    -

-
- -
-
- -
- diff --git a/docs/cce/umn/cce_01_0014.html b/docs/cce/umn/cce_01_0014.html deleted file mode 100644 index 160930bf..00000000 --- a/docs/cce/umn/cce_01_0014.html +++ /dev/null @@ -1,711 +0,0 @@ - - -

LoadBalancer

-

Scenario

A workload can be accessed from public networks through a load balancer, which is more secure and reliable than EIP.

-

The LoadBalancer access address is in the format of <IP address of public network load balancer>:<access port>, for example, 10.117.117.117:80.

-

In this access mode, requests are transmitted through an ELB load balancer to a node and then forwarded to the destination pod through the Service.

-
Figure 1 LoadBalancer
-
-

Notes and Constraints

  • LoadBalancer Services allow workloads to be accessed from public networks through ELB. This access mode has the following restrictions:
    • It is recommended that automatically created load balancers not be used by other resources. Otherwise, these load balancers cannot be completely deleted, causing residual resources.
    • Do not change the listener name for the load balancer in clusters of v1.15 and earlier. Otherwise, the load balancer cannot be accessed.
    -
  • After a Service is created, if the affinity setting is switched from the cluster level to the node level, the connection tracing table will not be cleared. You are advised not to modify the Service affinity setting after the Service is created. If you need to modify it, create a Service again.
  • If the service affinity is set to the node level (that is, externalTrafficPolicy is set to Local), the cluster may fail to access the Service by using the ELB address. For details, see Why a Cluster Fails to Access Services by Using the ELB Address.
  • CCE Turbo clusters support only cluster-level service affinity.
  • Dedicated ELB load balancers can be used only in clusters of v1.17 and later.
  • The specifications of dedicated load balancers must use TCP/UDP (network load balancing) and support private networks. If the Service needs to support HTTP, the specifications of dedicated load balancers must use HTTP (application load balancing) in addition to TCP/UDP (network load balancing).
  • If you create a LoadBalancer Service on the CCE console, a random node port is automatically generated. If you use kubectl to create a LoadBalancer Service, a random node port is generated unless you specify one.
  • In a CCE cluster, if the cluster-level affinity is configured for a LoadBalancer Service, requests are distributed to the node ports of each node using SNAT when entering the cluster. The number of node ports cannot exceed the number of available node ports on the node. If the Service affinity is at the node level (local), there is no such constraint. In a CCE Turbo cluster, this constraint applies to shared ELB load balancers, but not dedicated ones. You are advised to use dedicated ELB load balancers in CCE Turbo clusters.
  • When the cluster service forwarding (proxy) mode is IPVS, the node IP cannot be configured as the external IP of the Service. Otherwise, the node is unavailable.
  • Dedicated load balancers are available only in certain regions.
-
-

Adding a Service When Creating a Workload

You can set the Service when creating a workload on the CCE console. An Nginx workload is used as an example.

-
  1. In the Set Application Access step of Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet, click Add Service and set the following parameters:

    • Access Type: Select LoadBalancer (ELB).
    • Service Name: Specify a Service name, which can be the same as the workload name.
    • Service Affinity:
      • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
      • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
      -
    -

    ELB Configuration

    -
    • Elastic Load Balancer: A load balancer automatically distributes Internet access traffic to multiple nodes where the workload is located.
      • Shared: Shared load balancers provide domain name- and URL-based route balancing. Resources are shared among load balancers, and the performance of a load balancer is affected by other load balancers.
      • Dedicated: Resources are exclusively used by a load balancer, and the performance of a load balancer is not affected by other load balancers. IPv6 is supported.
        • AZ: Dedicated load balancers can be deployed across AZs to provide higher reliability.
        • Subnet: subnet where the backend server of the load balancer is located.

          Load balancers occupy different number of subnet IP addresses based on their specifications. Therefore, you are not advised to use the subnet CIDR blocks of other resources (such as clusters and nodes) as the load balancer subnet CIDR block.

          -
        • Specifications: Specifications determine the types of listeners that can be added to a load balancer. Select specifications that best fit your needs.
        -
      -

      You can create public network or private network load balancers.

      -
      • Public network: You can select an existing public network load balancer or have the system automatically create a new one.
      • Private network: You can select an existing private network load balancer or have the system automatically create a new private network load balancer.
      -
      The selected or created load balancer must be in the same VPC as the current cluster, and it must match the load balancer type (private or public network).
      • Enterprise Project: Select an enterprise project in which the load balancer is created.
      • Specifications: This field is displayed only when you select Public network and Automatically created for Elastic Load Balancer. You can click to modify the name, specifications, billing mode, and bandwidth of the load balancer.
      • Algorithm Type: Three algorithms are available: weighted round robin, weighted least connections algorithm, or source IP hash.
        • Weighted round robin: Requests are forwarded to different servers based on their weights, which indicate server processing performance. Backend servers with higher weights receive proportionately more requests, whereas equal-weighted servers receive the same number of requests. This algorithm is often used for short connections, such as HTTP services.
        • Weighted least connections: In addition to the weight assigned to each server, the number of connections processed by each backend server is also considered. Requests are forwarded to the server with the lowest connections-to-weight ratio. Building on least connections, the weighted least connections algorithm assigns a weight to each server based on their processing capability. This algorithm is often used for persistent connections, such as database connections.
        • Source IP hash: The source IP address of each request is calculated using the hash algorithm to obtain a unique hash key, and all backend servers are numbered. The generated key allocates the client to a particular server. This enables requests from different clients to be distributed in load balancing mode and ensures that requests from the same client are forwarded to the same server. This algorithm applies to TCP connections without cookies.
        -
        -
      • Sticky Session: This function is disabled by default. You can select Based on source IP address. Listeners ensure session stickiness based on IP addresses. Requests from the same IP address will be forwarded to the same backend server.
      • Health Check: This function is enabled by default. Enabling it will perform health checks on your load balancer. By default, the Service ports (Node Port and container port of the Service) is used for health check. You can also specify another port for health check. After the port is specified, a Service port (name: cce-healthz; protocol: TCP) will be added for the Service.
      -
      -
    • Port Settings
      • Protocol: protocol used by the Service.
      • Container Port: port defined in the container image and on which the workload listens. The Nginx application listens on port 80.
      • Access Port: port mapped to the container port at the load balancer's IP address. The workload can be accessed at <Load balancer's IP address>:<Access port>. The port number range is 1–65535.
      -
    -

  2. After the configuration is complete, click OK.
  3. On the workload creation page, click Next: Configure Advanced Settings. On the page displayed, click Create.
  4. After the workload is successfully created, choose Workloads > Deployments or Workloads > StatefulSets on the CCE console. Click the name of the workload to view its details. On the workload details page, click the Services tab and obtain the access address.

    -

  5. Click the access address.
-
-

Adding a Service After Creating a Workload

You can set the Service after creating a workload. This has no impact on the workload status and takes effect immediately. The procedure is as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Network.
  2. On the Services tab page, click Create Service.

    The parameters are the same as those in Adding a Service When Creating a Workload.

    -

  3. Click Create.
-
-

Using kubectl to Create a Service (Using an Existing Load Balancer)

You can set the access type when creating a workload using kubectl. This section uses an Nginx workload as an example to describe how to add a LoadBalancer Service using kubectl.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the nginx-deployment.yaml and nginx-elb-svc.yaml files.

    The file names are user-defined. nginx-deployment.yaml and nginx-elb-svc.yaml are merely example file names.

    -

    vi nginx-deployment.yaml

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: nginx
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      containers:
    -      - image: nginx 
    -        name: nginx
    -      imagePullSecrets:
    -      - name: default-secret
    -

    -

    vi nginx-elb-svc.yaml

    -

    Before enabling sticky session, ensure that the following conditions are met:

    -
    • The workload protocol is TCP.
    • Anti-affinity has been configured between pods of the workload. That is, all pods of the workload are deployed on different nodes. For details, see Workload-Node Anti-Affinity.
    -
    -
    apiVersion: v1 
    -kind: Service 
    -metadata: 
    -  annotations:
    -    kubernetes.io/elb.id: 3c7caa5a-a641-4bff-801a-feace27424b6          # Load balancer ID. Replace it with the actual value.
    -    kubernetes.io/elb.class: performance                               # Load balancer type
    -  name: nginx 
    -spec: 
    -  ports: 
    -  - name: service0 
    -    port: 80
    -    protocol: TCP 
    -    targetPort: 80
    -  selector: 
    -    app: nginx 
    -  type: LoadBalancer
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    kubernetes.io/elb.class

    -

    No

    -

    String

    -

    Select a proper load balancer type as required.

    -

    The value can be:

    -
    • union: shared load balancer
    • performance: dedicated load balancer, which can be used only in clusters of v1.17 and later.
    -

    Default value: union

    -

    kubernetes.io/elb.session-affinity-mode

    -

    No

    -

    String

    -

    Listeners ensure session stickiness based on IP addresses. Requests from the same IP address will be forwarded to the same backend server.

    -
    • Disabling sticky session: Do not set this parameter.
    • Enabling sticky session: Set this parameter to SOURCE_IP, indicating that the sticky session is based on the source IP address.
    -

    kubernetes.io/elb.session-affinity-option

    -

    No

    -

    Table 2 Object

    -

    This parameter specifies the sticky session timeout.

    -

    kubernetes.io/elb.id

    -

    Yes

    -

    String

    -

    This parameter indicates the ID of a load balancer. The value can contain 1 to 100 characters.

    -

    Mandatory when an existing load balancer is to be associated.

    -

    Obtaining the load balancer ID:

    -

    On the management console, click Service List, and choose Networking > Elastic Load Balance. Click the name of the target load balancer. On the Summary tab page, find and copy the ID.

    -

    kubernetes.io/elb.subnet-id

    -

    -

    -

    String

    -

    This parameter indicates the ID of the subnet where the cluster is located. The value can contain 1 to 100 characters.

    -
    • Mandatory when a cluster of v1.11.7-r0 or earlier is to be automatically created.
    • Optional for clusters later than v1.11.7-r0.
    -

    kubernetes.io/elb.lb-algorithm

    -

    No

    -

    String

    -

    This parameter indicates the load balancing algorithm of the backend server group. The default value is ROUND_ROBIN.

    -

    Options:

    -
    • ROUND_ROBIN: weighted round robin algorithm
    • LEAST_CONNECTIONS: weighted least connections algorithm
    • SOURCE_IP: source IP hash algorithm
    -

    When the value is SOURCE_IP, the weights of backend servers in the server group are invalid.

    -

    kubernetes.io/elb.health-check-flag

    -

    No

    -

    String

    -

    Whether to enable the ELB health check.

    -

    Enabled by default.

    -
    • Enabling health check: Leave blank this parameter or set it to on.
    • Disabling health check: Set this parameter to off.
    -

    kubernetes.io/elb.health-check-option

    -

    No

    -

    Table 3 Object

    -

    ELB health check configuration items.

    -

    port

    -

    Yes

    -

    Integer

    -

    Access port that is registered on the load balancer and mapped to the cluster-internal IP address.

    -

    targetPort

    -

    Yes

    -

    String

    -

    Container port set on the CCE console.

    -
    -
    - -
    - - - - - - - - - - - -
    Table 2 Data structure of the elb.session-affinity-option field

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    persistence_timeout

    -

    Yes

    -

    String

    -

    Sticky session timeout, in minutes. This parameter is valid only when elb.session-affinity-mode is set to SOURCE_IP.

    -

    Value range: 1 to 60. Default value: 60

    -
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 3 Data structure description of the elb.health-check-option field

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    delay

    -

    No

    -

    String

    -

    Initial waiting time (in seconds) for starting the health check.

    -

    Value range: 1 to 50. Default value: 5

    -

    timeout

    -

    No

    -

    String

    -

    Health check timeout, in seconds.

    -

    Value range: 1 to 50. Default value: 10

    -

    max_retries

    -

    No

    -

    String

    -

    Maximum number of health check retries.

    -

    Value range: 1 to 10. Default value: 3

    -

    protocol

    -

    No

    -

    String

    -

    Health check protocol.

    -

    Default value: protocol of the associated Service

    -

    Value options: TCP, UDP_CONNECT, or HTTP

    -

    path

    -

    No

    -

    String

    -

    Health check URL. This parameter needs to be configured when the protocol is HTTP.

    -

    Default value: /

    -

    The value contains 1 to 10,000 characters.

    -
    -
    -

  3. Create a workload.

    kubectl create -f nginx-deployment.yaml

    -

    If information similar to the following is displayed, the workload has been created.

    -
    deployment "nginx" created
    -

    kubectl get pod

    -

    If information similar to the following is displayed, the workload is running.

    -
    NAME                     READY     STATUS             RESTARTS   AGE
    -nginx-2601814895-c1xwh   1/1       Running            0          6s
    -

  4. Create a Service.

    kubectl create -f nginx-elb-svc.yaml

    -

    If information similar to the following is displayed, the Service has been created.

    -
    service "nginx" created
    -

    kubectl get svc

    -

    If information similar to the following is displayed, the access type has been set successfully, and the workload is accessible.

    -
    NAME         TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
    -kubernetes   ClusterIP      10.247.0.1       <none>        443/TCP        3d
    -nginx        LoadBalancer   10.247.130.196   10.78.42.242   80:31540/TCP   51s
    -

  5. Enter the URL in the address box of the browser, for example, 10.78.42.242:80. 10.78.42.242 indicates the IP address of the load balancer, and 80 indicates the access port displayed on the CCE console.

    The Nginx is accessible.

    -
    Figure 2 Accessing Nginx through the LoadBalancer Service
    -

-
-

Using kubectl to Create a Service (Automatically Creating a Load Balancer)

You can add a Service when creating a workload using kubectl. This section uses an Nginx workload as an example to describe how to add a LoadBalancer Service using kubectl.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the nginx-deployment.yaml and nginx-elb-svc.yaml files.

    The file names are user-defined. nginx-deployment.yaml and nginx-elb-svc.yaml are merely example file names.

    -

    vi nginx-deployment.yaml

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: nginx
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      containers:
    -      - image: nginx 
    -        name: nginx
    -      imagePullSecrets:
    -      - name: default-secret
    -

    -

    vi nginx-elb-svc.yaml

    -

    Before enabling sticky session, ensure that the following conditions are met:

    -
    • The workload protocol is TCP.
    • Anti-affinity has been configured between pods of the workload. That is, all pods of the workload are deployed on different nodes. For details, see Workload-Node Anti-Affinity.
    -
    -
    Example of a Service using a shared, public network load balancer:
    apiVersion: v1 
    -kind: Service 
    -metadata: 
    -  annotations:   
    -    kubernetes.io/elb.class: union
    -    kubernetes.io/elb.autocreate: 
    -        '{
    -            "type": "public",
    -            "bandwidth_name": "cce-bandwidth-1551163379627",
    -            "bandwidth_chargemode":"traffic",
    -            "bandwidth_size": 5,
    -            "bandwidth_sharetype": "PER",
    -            "eip_type": "5_bgp",
    -            "name": "james"
    -        }'
    -  labels: 
    -    app: nginx 
    -  name: nginx 
    -spec: 
    -  ports: 
    -  - name: service0 
    -    port: 80
    -    protocol: TCP 
    -    targetPort: 80
    -  selector: 
    -    app: nginx 
    -  type: LoadBalancer
    -
    -

    Example of a Service using a dedicated, public network load balancer:

    -
    apiVersion: v1
    -kind: Service
    -metadata:
    -  name: nginx
    -  labels:
    -    app: nginx
    -  namespace: default
    -  annotations:
    -    kubernetes.io/elb.class: performance
    -    kubernetes.io/elb.autocreate: 
    -        '{
    -            "type": "public",
    -            "bandwidth_name": "cce-bandwidth-1626694478577",
    -            "bandwidth_chargemode": "traffic",
    -            "bandwidth_size": 5,
    -            "bandwidth_sharetype": "PER",
    -            "eip_type": "5_bgp",
    -            "available_zone": [
    -                "eu-de-01"
    -            ],
    -            "l4_flavor_name": "L4_flavor.elb.s1.small"
    -        }'
    -spec:
    -  selector:
    -    app: nginx
    -  ports:
    -  - name: cce-service-0
    -    targetPort: 80
    -    nodePort: 0
    -    port: 80
    -    protocol: TCP
    -  type: LoadBalancer
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 4 Key parameters

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    kubernetes.io/elb.class

    -

    No

    -

    String

    -

    Select a proper load balancer type as required.

    -

    The value can be:

    -
    • union: shared load balancer
    • performance: dedicated load balancer, which can be used only in clusters of v1.17 and later.
    -

    Default value: union

    -

    kubernetes.io/elb.subnet-id

    -

    -

    -

    String

    -

    This parameter indicates the ID of the subnet where the cluster is located. The value can contain 1 to 100 characters.

    -
    • Mandatory when a cluster of v1.11.7-r0 or earlier is to be automatically created.
    • Optional for clusters later than v1.11.7-r0.
    -

    kubernetes.io/elb.enterpriseID

    -

    No

    -

    String

    -

    Clusters of v1.15 and later versions support this field. In clusters earlier than v1.15, load balancers are created in the default project by default.

    -

    This parameter indicates the ID of the enterprise project in which the ELB load balancer will be created.

    -

    If this parameter is not specified or is set to 0, resources will be bound to the default enterprise project.

    -

    How to obtain:

    -

    Log in to the management console and choose Enterprise > Project Management on the top menu bar. In the list displayed, click the name of the target enterprise project, and copy the ID on the enterprise project details page.

    -

    kubernetes.io/elb.session-affinity-option

    -

    No

    -

    Table 2 Object

    -

    Sticky session timeout.

    -

    kubernetes.io/elb.autocreate

    -

    Yes

    -

    elb.autocreate object

    -

    Whether to automatically create a load balancer associated with the Service.

    -

    Example:

    -
    • Automatically created public network load balancer:

      {"type":"public","bandwidth_name":"cce-bandwidth-1551163379627","bandwidth_chargemode":"traffic","bandwidth_size":5,"bandwidth_sharetype":"PER","eip_type":"5_bgp","name":"james"}

      -
    • Automatically created private network load balancer:

      {"type":"inner","name":"A-location-d-test"}

      -
    -

    kubernetes.io/elb.lb-algorithm

    -

    No

    -

    String

    -

    This parameter indicates the load balancing algorithm of the backend server group. The default value is ROUND_ROBIN.

    -

    Options:

    -
    • ROUND_ROBIN: weighted round robin algorithm
    • LEAST_CONNECTIONS: weighted least connections algorithm
    • SOURCE_IP: source IP hash algorithm
    -

    When the value is SOURCE_IP, the weights of backend servers in the server group are invalid.

    -

    kubernetes.io/elb.health-check-flag

    -

    No

    -

    String

    -

    Whether to enable the ELB health check.

    -

    Disabled by default.

    -
    • Enabling health check: Leave blank this parameter or set it to on.
    • Disabling health check: Set this parameter to off.
    -

    kubernetes.io/elb.health-check-option

    -

    No

    -

    Table 3 Object

    -

    ELB health check configuration items.

    -

    kubernetes.io/elb.session-affinity-mode

    -

    No

    -

    String

    -

    Listeners ensure session stickiness based on IP addresses. Requests from the same IP address will be forwarded to the same backend server.

    -
    • Disabling sticky session: Do not set this parameter.
    • Enabling sticky session: Set this parameter to SOURCE_IP, indicating that the sticky session is based on the source IP address.
    -

    kubernetes.io/elb.session-affinity-option

    -

    No

    -

    Table 2 Object

    -

    Sticky session timeout.

    -

    kubernetes.io/hws-hostNetwork

    -

    No

    -

    String

    -

    This parameter indicates whether the workload Services use the host network. Setting this parameter to true will enable the load balancer to forward requests to the host network.

    -

    The host network is not used by default. The value can be true or false.

    -

    externalTrafficPolicy

    -

    No

    -

    String

    -

    If sticky session is enabled, add this parameter so that requests are transferred to a fixed node. If a LoadBalancer Service with this parameter set to Local is created, a client can access the target backend only if the client is installed on the same node as the backend.

    -
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 5 Data structure of the elb.autocreate field

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    name

    -

    No

    -

    String

    -

    Name of the automatically created load balancer.

    -

    Value range: a string of 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

    -

    Default name: cce-lb+service.UID

    -

    type

    -

    No

    -

    String

    -

    Network type of the load balancer.

    -
    • public: public network load balancer
    • inner: private network load balancer
    -

    Default value: inner

    -

    bandwidth_name

    -

    Yes for public network load balancers

    -

    String

    -

    Bandwidth name. The default value is cce-bandwidth-******.

    -

    Value range: a string of 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

    -

    bandwidth_chargemode

    -

    No

    -

    String

    -

    Bandwidth billing mode.

    -
    • traffic: billed by traffic
    -

    bandwidth_size

    -

    Yes for public network load balancers

    -

    Integer

    -

    Bandwidth size. The default value is 1 to 2000 Mbit/s. Set this parameter based on the bandwidth range allowed in your region.

    -

    bandwidth_sharetype

    -

    Yes for public network load balancers

    -

    String

    -

    Bandwidth sharing mode.

    -
    • PER: dedicated bandwidth
    -

    eip_type

    -

    Yes for public network load balancers

    -

    String

    -

    EIP type, which may vary depending on sites. For details, see the type parameter specified when creating an EIP.

    -
    • 5_bgp: dynamic BGP
    • 5_gray: dedicated load balancer
    -

    available_zone

    -

    Yes

    -

    Array of strings

    -

    AZ where the load balancer is located.

    -

    This parameter is available only for dedicated load balancers.

    -

    l4_flavor_name

    -

    Yes

    -

    String

    -

    Flavor name of the layer-4 load balancer.

    -

    This parameter is available only for dedicated load balancers.

    -

    l7_flavor_name

    -

    No

    -

    String

    -

    Flavor name of the layer-7 load balancer.

    -

    This parameter is available only for dedicated load balancers.

    -

    elb_virsubnet_ids

    -

    No

    -

    Array of strings

    -

    Subnet where the backend server of the load balancer is located. If this parameter is left blank, the default cluster subnet is used.

    -

    Load balancers occupy different number of subnet IP addresses based on their specifications. Therefore, you are not advised to use the subnet CIDR blocks of other resources (such as clusters and nodes) as the load balancer CIDR block.

    -

    This parameter is available only for dedicated load balancers.

    -

    Example:

    -
    "elb_virsubnet_ids": [
    -   "14567f27-8ae4-42b8-ae47-9f847a4690dd"
    - ]
    -
    -
    -

  3. Create a workload.

    kubectl create -f nginx-deployment.yaml

    -

    If information similar to the following is displayed, the workload is being created.

    -
    deployment "nginx" created
    -

    kubectl get po

    -

    If information similar to the following is displayed, the workload is running.

    -
    NAME                     READY     STATUS             RESTARTS   AGE
    -nginx-2601814895-c1xwh   1/1       Running            0          6s
    -

  4. Create a Service.

    kubectl create -f nginx-elb-svc.yaml

    -

    If information similar to the following is displayed, the Service has been created.

    -
    service "nginx" created
    -

    kubectl get svc

    -

    If information similar to the following is displayed, the access type has been set successfully, and the workload is accessible.

    -
    NAME         TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
    -kubernetes   ClusterIP      10.247.0.1       <none>        443/TCP        3d
    -nginx        LoadBalancer   10.247.130.196   10.78.42.242   80:31540/TCP   51s
    -

  5. Enter the URL in the address box of the browser, for example, 10.78.42.242:80. 10.78.42.242 indicates the IP address of the load balancer, and 80 indicates the access port displayed on the CCE console.

    The Nginx is accessible.

    -
    Figure 3 Accessing Nginx through the LoadBalancer Service
    -

-
-

Why a Cluster Fails to Access Services by Using the ELB Address

If the service affinity of a LoadBalancer Service is set to the node level, that is, the value of externalTrafficPolicy is Local, the ELB address may fail to be accessed from the cluster (specifically, nodes or containers).

-

This is because when the LoadBalancer Service is created, kube-proxy adds the ELB access address (external-ip) to iptables or IPVS. When the ELB address is accessed from the cluster, the ELB load balancer is not used. Instead, kube-proxy directly forwards the access request. The case depends on which container network model and service forwarding mode you use.

-

The following methods can be used to solve this problem:

-
  • (Recommended) In the cluster, use the ClusterIP Service or service domain name for access.
  • Set externalTrafficPolicy of the Service to Cluster, which means cluster-level service affinity. Note that this affects source address persistence.
    apiVersion: v1 
    -kind: Service
    -metadata: 
    -  annotations:   
    -    kubernetes.io/elb.class: union
    -    kubernetes.io/elb.autocreate: '{"type":"public","bandwidth_name":"cce-bandwidth","bandwidth_chargemode":"traffic","bandwidth_size":5,"bandwidth_sharetype":"PER","eip_type":"5_bgp","name":"james"}'
    -  labels: 
    -    app: nginx 
    -  name: nginx 
    -spec: 
    -  externalTrafficPolicy: Cluster
    -  ports: 
    -  - name: service0 
    -    port: 80
    -    protocol: TCP 
    -    targetPort: 80
    -  selector: 
    -    app: nginx 
    -  type: LoadBalancer
    -
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0016.html b/docs/cce/umn/cce_01_0016.html deleted file mode 100644 index 737f296c..00000000 --- a/docs/cce/umn/cce_01_0016.html +++ /dev/null @@ -1,86 +0,0 @@ - - -

Using a Secret

-

The following secrets are used by the CCE system. Do not perform any operations on them.

-
  • Do not operate secrets under kube-system.
  • Do not operate default-secret and paas.elb in any of the namespaces. The default-secret is used to pull the private image of SWR, and the paas.elb is used to connect the service in the namespace to the ELB service.
-
- -

The following example shows how to use a secret.

-
apiVersion: v1
-kind: Secret
-metadata:
-  name: mysecret
-type: Opaque
-data:
-  username: ****** #The value must be Base64-encoded.
-  password: ******  #The value must be encoded using Base64.
-

When a secret is used in a pod, the pod and secret must be in the same cluster and namespace.

-
-

Configuring the Data Volume of a Pod

A secret can be used as a file in a pod. As shown in the following example, the username and password of the mysecret secret are saved in the /etc/foo directory as files.
apiVersion: v1
-kind: Pod
-metadata:
-  name: mypod
-spec:
-  containers:
-  - name: mypod
-    image: redis
-    volumeMounts:
-    - name: foo
-      mountPath: "/etc/foo"
-      readOnly: true
-  volumes:
-  - name: foo
-    secret:
-      secretName: mysecret
-
-
In addition, you can specify the directory and permission to access a secret. The username is stored in the /etc/foo/my-group/my-username directory of the container.
apiVersion: v1
-kind: Pod
-metadata:
-  name: mypod
-spec:
-  containers:
-  - name: mypod
-    image: redis
-    volumeMounts:
-    - name: foo
-      mountPath: "/etc/foo"
-  volumes:
-  - name: foo
-    secret:
-      secretName: mysecret
-      items:
-      - key: username
-        path: my-group/my-username
-        mode: 511
-
-

To mount a secret to a data volume, you can also perform operations on the CCE console. When creating a workload, set advanced settings for the container, choose Data Storage > Local Volume, click Add Local Volume, and select Secret. For details, see Secret.

-
-

Setting Environment Variables of a Pod

A secret can be used as an environment variable of a pod. As shown in the following example, the username and password of the mysecret secret are defined as an environment variable of the pod.
apiVersion: v1
-kind: Pod
-metadata:
-  name: secret-env-pod
-spec:
-  containers:
-  - name: mycontainer
-    image: redis
-    env:
-      - name: SECRET_USERNAME
-        valueFrom:
-          secretKeyRef:
-            name: mysecret
-            key: username
-      - name: SECRET_PASSWORD
-        valueFrom:
-          secretKeyRef:
-            name: mysecret
-            key: password
-  restartPolicy: Never
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0018.html b/docs/cce/umn/cce_01_0018.html deleted file mode 100644 index a5f57dc6..00000000 --- a/docs/cce/umn/cce_01_0018.html +++ /dev/null @@ -1,217 +0,0 @@ - - -

Container Logs

-

Scenario

CCE allows you to configure policies for collecting, managing, and analyzing workload logs periodically to prevent logs from being over-sized.

-

CCE works with AOM to collect workload logs. When a node is created, the ICAgent (the DaemonSet named icagent in the kube-system namespace of the cluster) of AOM is installed by default. After the ICAgent collects workload logs and reports them to AOM, you can view workload logs on the CCE or AOM console.

-
  • By default, the ICAgent collects the standard outputs of containers. You do not need to perform any configuration.
  • You can also configure the path for storing container logs when creating a workload so that the ICAgent collects logs from this path.
    You can select either of the following modes for container logs:
    • HostPath: The host path is mounted to the specified container path (mount path). In the node host path, you can view the container logs output into the mount path.
    • EmptyDir: The temporary path of the node is mounted to the specified path (mount path). Log data that exists in the temporary path but is not reported by the collector to AOM will disappear after the pod is deleted.
    -
    -
-
-

Precautions

The ICAgent only collects *.log, *.trace, and *.out text log files.

-
-

Setting the Path for Storing Container Logs

  1. When creating a workload on the CCE console, add a container and expand Log Policies.
  2. In the Log Policies area, click Add Log Policy. Configure parameters in the log policy. The following uses Nginx as an example.

    Figure 1 Adding a log policy
    -

  3. Set Storage Type to Host Path or Container Path.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Configuring log policies

    Parameter

    -

    Description

    -

    Storage Type

    -
    • Host Path: In HostPath mode, the host path is mounted to the specified container path (mount path). In the node host path, you can view the container logs output into the mount path.
    • Container Path: In EmptyDir mode, the temporary path of the node is mounted to the specified path (mount path). Log data that exists in the temporary path but is not reported by the collector to AOM will disappear after the pod is deleted.
    -

    Add Container Path

    -

    *Host Path

    -

    Enter the host path, for example, /var/paas/sys/log/nginx.

    -

    Container Path

    -
    Container path (for example, /tmp) to which the storage resources will be mounted.
    NOTICE:
    • Do not mount storage to a system directory such as / or /var/run; this action may cause a container error to occur. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
    • When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.
    • AOM collects only the first 20 log files that have been modified recently. It collects files from 2 levels of subdirectories by default.
    • AOM only collects .log, .trace, and .out text log files in mounting paths.
    • For details about how to set permissions for mount points in a container, see Configure a Security Context for a Pod or Container.
    -
    -
    -

    Extended Host Path

    -

    This parameter is mandatory only if Storage Type is set to Host Path.

    -

    Extended host paths contain pod IDs or container names to distinguish different containers into which the host path is mounted.

    -

    A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

    -
    • None: No extended path is configured.
    • PodUID: ID of a pod.
    • PodName: name of a pod.
    • PodUID/ContainerName: ID of a pod or name of a container.
    • PodName/ContainerName: name of a pod or container.
    -

    Log Dumping

    -

    Log dump refers to rolling log files on a local host.

    -
    • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
    • Disabled: AOM does not dump log files.
    -
    NOTE:
    • Log file rolling of AOM is implemented in the copytruncate mode. Before enabling log dumping, ensure that log files are written in the append mode. Otherwise, file holes may occur.
    • Currently, mainstream log components such as Log4j and Logback support log file rolling. If your log files already support rolling, skip the configuration. Otherwise, conflicts may occur.
    • You are advised to configure log file rolling for your own services to flexibly control the size and number of rolled files.
    -
    -

    Multi-line Log

    -

    Some program logs (for example, Java program logs) contain a log that occupies multiple lines. By default, the log collection system collects logs by line. If you want to display logs as a single log message in the log collection system, you can enable the multi-line log function and use the log time or regular pattern mode. When a line of log message matches the preset time format or regular expression, it is considered as the start of a log message and the next line starts with this line of log message is considered as the end identifier of the log message.

    -

    Split Mode

    -
    • Log Time: Enter a time wildcard. For example, if the time in the log is 2017-01-01 23:59:59, the wildcard is YYYY-MM-DD hh:mm:ss.
    • Regular Pattern: Enter a regular expression.
    -
    -
    -

  4. Click OK.
-
-

Using kubectl to Set the Container Log Storage Path

You can set the container log storage path by defining a YAML file.

-

As shown in the following figure, EmptyDir is mounted a temporary path to /var/log/nginx. In this way, the ICAgent collects logs in /var/log/nginx. The policy field is customized by CCE and allows the ICAgent to identify and collect logs.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: testlog
-  namespace: default
-spec:
-  selector:
-    matchLabels:
-      app: testlog
-  template:
-    replicas: 1
-    metadata:
-      labels:
-        app: testlog
-    spec:
-      containers:
-        - image: 'nginx:alpine'
-          name: container-0
-          resources:
-            requests:
-              cpu: 250m
-              memory: 512Mi
-            limits:
-              cpu: 250m
-              memory: 512Mi
-          volumeMounts:
-            - name: vol-log
-              mountPath: /var/log/nginx
-              policy:
-                logs:
-                  rotate: ''
-      volumes:
-        - emptyDir: {}
-          name: vol-log
-      imagePullSecrets:
-        - name: default-secret
-

The following shows how to use the HostPath mode. Compared with the EmptyDir mode, the type of volume is changed to hostPath, and the path on the host needs to be configured for this hostPath volume. In the following example, /tmp/log on the host is mounted to /var/log/nginx. In this way, the ICAgent can collects logs in /var/log/nginx, without deleting the logs from /tmp/log.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: testlog
-  namespace: default
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: testlog
-  template:
-    metadata:
-      labels:
-        app: testlog
-    spec:
-      containers:
-        - image: 'nginx:alpine'
-          name: container-0
-          resources:
-            requests:
-              cpu: 250m
-              memory: 512Mi
-            limits:
-              cpu: 250m
-              memory: 512Mi
-          volumeMounts:
-            - name: vol-log
-              mountPath: /var/log/nginx
-              readOnly: false
-              extendPathMode: PodUID
-              policy:
-                logs:
-                  rotate: Hourly
-                  annotations:
-                    
-                    format: ''
-      volumes:
-        - hostPath:
-            path: /tmp/log
-          name: vol-log
-      imagePullSecrets:
-        - name: default-secret
- -
- - - - - - - - - - - - - - - - - -
Table 2 Parameter description

Parameter

-

Explanation

-

Description

-

extendPathMode

-

Extended host path

-

Extended host paths contain pod IDs or container names to distinguish different containers into which the host path is mounted.

-

A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

-
  • None: No extended path is configured.
  • PodUID: ID of a pod.
  • PodName: name of a pod.
  • PodUID/ContainerName: ID of a pod or name of a container.
  • PodName/ContainerName: name of a pod or container.
-

policy.logs.rotate

-

Log dumping

-

Log dump refers to rolling log files on a local host.

-
  • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
  • Disabled: AOM does not dump log files.
-
NOTE:
  • Log file rolling of AOM is implemented in the copytruncate mode. Before enabling log dumping, ensure that log files are written in the append mode. Otherwise, file holes may occur.
  • Currently, mainstream log components such as Log4j and Logback support log file rolling. If your log files already support rolling, skip the configuration. Otherwise, conflicts may occur.
  • You are advised to configure log file rolling for your own services to flexibly control the size and number of rolled files.
-
-

policy.logs.annotations.format

-

Multi-line log matching

-

Some program logs (for example, Java program logs) contain a log that occupies multiple lines. By default, the log collection system collects logs by line. If you want to display logs as a single log message in the log collection system, you can enable the multi-line log function and use the log time or regular pattern mode. When a line of log message matches the preset time format or regular expression, it is considered as the start of a log message and the next line starts with this line of log message is considered as the end identifier of the log message.

-

The format is as follows:

-
{
-    "multi": {
-        "mode": "time",
-        "value": "YYYY-MM-DD hh:mm:ss"
-    }
-}
-

multi indicates the multi-line mode.

-
  • time: log time. Enter a time wildcard. For example, if the time in the log is 2017-01-01 23:59:59, the wildcard is YYYY-MM-DD hh:mm:ss.
  • regular: regular pattern. Enter a regular expression.
-
-
-
-

Viewing Logs

After a log collection path is configured and the workload is created, the ICAgent collects log files from the configured path. The collection takes about 1 minute.

-

After the log collection is complete, go to the workload details page and click Logs in the upper right corner to view logs.

-

You can also view logs on the AOM console.

-

You can also run the kubectl logs command to view the standard output of a container.

-
# View logs of a specified pod.
-kubectl logs <pod_name>
-kubectl logs -f <pod_name> # Similar to tail -f
-
-# View logs of a specified container in a specified pod.
-kubectl logs <pod_name> -c <container_name>
-
-kubectl logs pod_name -c container_name -n namespace (one-off query)
-kubectl logs -f <pod_name> -n namespace (real-time query in tail -f mode)
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0019.html b/docs/cce/umn/cce_01_0019.html deleted file mode 100644 index 6567cb8c..00000000 --- a/docs/cce/umn/cce_01_0019.html +++ /dev/null @@ -1,11 +0,0 @@ - - -

Charts (Helm)

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0020.html b/docs/cce/umn/cce_01_0020.html deleted file mode 100644 index 76ddc328..00000000 --- a/docs/cce/umn/cce_01_0020.html +++ /dev/null @@ -1,21 +0,0 @@ - - -

Networking

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0023.html b/docs/cce/umn/cce_01_0023.html deleted file mode 100644 index 686278bc..00000000 --- a/docs/cce/umn/cce_01_0023.html +++ /dev/null @@ -1,28 +0,0 @@ - - -

kubectl Usage Guide

- -
- -
- diff --git a/docs/cce/umn/cce_01_0025.html b/docs/cce/umn/cce_01_0025.html deleted file mode 100644 index ce0be9fe..00000000 --- a/docs/cce/umn/cce_01_0025.html +++ /dev/null @@ -1,598 +0,0 @@ - - -

CCE Operations Supported by CTS

-

CTS is available only in certain regions.

-
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 CCE operations supported by CTS

Operation

-

Resource Type

-

Event Name

-

Creating an agency

-

Cluster

-

createUserAgencies

-

Creating a cluster

-

Cluster

-

createCluster

-

Updating the description of a cluster

-

Cluster

-

updateCluster

-

Upgrading a cluster

-

Cluster

-

clusterUpgrade

-

Deleting a cluster

-

Cluster

-

claimCluster/deleteCluster

-

Downloading a cluster certificate

-

Cluster

-

getClusterCertByUID

-

Binding and unbinding an EIP

-

Cluster

-

operateMasterEIP

-

Waking up a cluster and resetting node management (V2)

-

Cluster

-

operateCluster

-

Hibernating a cluster (V3)

-

Cluster

-

hibernateCluster

-

Waking up a cluster (V3)

-

Cluster

-

awakeCluster

-

Changing the specifications of a cluster

-

Cluster

-

resizeCluster

-

Modifying configurations of a cluster

-

Cluster

-

updateConfiguration

-

Creating a node pool

-

Node pool

-

createNodePool

-

Updating a node pool

-

Node pool

-

updateNodePool

-

Deleting a node pool

-

Node pool

-

claimNodePool

-

Migrating a node pool

-

Node pool

-

migrateNodepool

-

Modifying node pool configurations

-

Node pool

-

updateConfiguration

-

Creating a node

-

Node

-

createNode

-

Deleting all the nodes from a specified cluster

-

Node

-

deleteAllHosts

-

Deleting a single node

-

Node

-

deleteOneHost/claimOneHost

-

Updating the description of a node

-

Node

-

updateNode

-

Creating an add-on instance

-

Add-on instance

-

createAddonInstance

-

Deleting an add-on instance

-

Add-on instance

-

deleteAddonInstance

-

Uploading a chart

-

Chart

-

uploadChart

-

Updating a chart

-

Chart

-

updateChart

-

Deleting a chart

-

Chart

-

deleteChart

-

Creating a release

-

Release

-

createRelease

-

Upgrading a release

-

Release

-

updateRelease

-

Deleting a release

-

Release

-

deleteRelease

-

Creating a ConfigMap

-

Kubernetes resource

-

createConfigmaps

-

Creating a DaemonSet

-

Kubernetes resource

-

createDaemonsets

-

Creating a Deployment

-

Kubernetes resource

-

createDeployments

-

Creating an event

-

Kubernetes resource

-

createEvents

-

Creating an Ingress

-

Kubernetes resource

-

createIngresses

-

Creating a job

-

Kubernetes resource

-

createJobs

-

Creating a namespace

-

Kubernetes resource

-

createNamespaces

-

Creating a node

-

Kubernetes resource

-

createNodes

-

Creating a PersistentVolumeClaim

-

Kubernetes resource

-

createPersistentvolumeclaims

-

Creating a pod

-

Kubernetes resource

-

createPods

-

Creating a replica set

-

Kubernetes resource

-

createReplicasets

-

Creating a resource quota

-

Kubernetes resource

-

createResourcequotas

-

Creating a secret

-

Kubernetes resource

-

createSecrets

-

Creating a service

-

Kubernetes resource

-

createServices

-

Creating a StatefulSet

-

Kubernetes resource

-

createStatefulsets

-

Creating a volume

-

Kubernetes resource

-

createVolumes

-

Deleting a ConfigMap

-

Kubernetes resource

-

deleteConfigmaps

-

Deleting a DaemonSet

-

Kubernetes resource

-

deleteDaemonsets

-

Deleting a Deployment

-

Kubernetes resource

-

deleteDeployments

-

Deleting an event

-

Kubernetes resource

-

deleteEvents

-

Deleting an Ingress

-

Kubernetes resource

-

deleteIngresses

-

Deleting a job

-

Kubernetes resource

-

deleteJobs

-

Deleting a namespace

-

Kubernetes resource

-

deleteNamespaces

-

Deleting a node

-

Kubernetes resource

-

deleteNodes

-

Deleting a Pod

-

Kubernetes resource

-

deletePods

-

Deleting a replica set

-

Kubernetes resource

-

deleteReplicasets

-

Deleting a resource quota

-

Kubernetes resource

-

deleteResourcequotas

-

Deleting a secret

-

Kubernetes resource

-

deleteSecrets

-

Deleting a service

-

Kubernetes resource

-

deleteServices

-

Deleting a StatefulSet

-

Kubernetes resource

-

deleteStatefulsets

-

Deleting volumes

-

Kubernetes resource

-

deleteVolumes

-

Replacing a specified ConfigMap

-

Kubernetes resource

-

updateConfigmaps

-

Replacing a specified DaemonSet

-

Kubernetes resource

-

updateDaemonsets

-

Replacing a specified Deployment

-

Kubernetes resource

-

updateDeployments

-

Replacing a specified event

-

Kubernetes resource

-

updateEvents

-

Replacing a specified ingress

-

Kubernetes resource

-

updateIngresses

-

Replacing a specified job

-

Kubernetes resource

-

updateJobs

-

Replacing a specified namespace

-

Kubernetes resource

-

updateNamespaces

-

Replacing a specified node

-

Kubernetes resource

-

updateNodes

-

Replacing a specified PersistentVolumeClaim

-

Kubernetes resource

-

updatePersistentvolumeclaims

-

Replacing a specified pod

-

Kubernetes resource

-

updatePods

-

Replacing a specified replica set

-

Kubernetes resource

-

updateReplicasets

-

Replacing a specified resource quota

-

Kubernetes resource

-

updateResourcequotas

-

Replacing a specified secret

-

Kubernetes resource

-

updateSecrets

-

Replacing a specified service

-

Kubernetes resource

-

updateServices

-

Replacing a specified StatefulSet

-

Kubernetes resource

-

updateStatefulsets

-

Replacing the specified status

-

Kubernetes resource

-

updateStatus

-

Uploading a chart

-

Kubernetes resource

-

uploadChart

-

Updating a component template

-

Kubernetes resource

-

updateChart

-

Deleting a chart

-

Kubernetes resource

-

deleteChart

-

Creating a template application

-

Kubernetes resource

-

createRelease

-

Updating a template application

-

Kubernetes resource

-

updateRelease

-

Deleting a template application

-

Kubernetes resource

-

deleteRelease

-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0026.html b/docs/cce/umn/cce_01_0026.html deleted file mode 100644 index 9b0b379a..00000000 --- a/docs/cce/umn/cce_01_0026.html +++ /dev/null @@ -1,21 +0,0 @@ - - -

Querying CTS Logs

-

Scenario

After you enable CTS, the system starts recording operations on CCE resources. Operation records of the last 7 days can be viewed on the CTS management console.

-
-

Procedure

  1. Log in to the management console.
  2. Click in the upper left corner and select a region.
  3. Choose Service List from the main menu. Choose Management & Deployment > Cloud Trace Service.
  4. In the navigation pane of the CTS console, choose Cloud Trace Service > Trace List.
  5. On the Trace List page, query operation records based on the search criteria. Currently, the trace list supports trace query based on the combination of the following search criteria:

    • Trace Source, Resource Type, and Search By

      Select the search criteria from the drop-down lists. Select CCE from the Trace Source drop-down list.

      -

      If you select Trace name from the Search By drop-down list, specify the trace name.

      -

      If you select Resource ID from the Search By drop-down list, select or enter a specific resource ID.

      -

      If you select Resource name from the Search By drop-down list, select or enter a specific resource name.

      -
    • Operator: Select a specific operator (at user level rather than account level).
    • Trace Status: Set this parameter to any of the following values: All trace statuses, normal, warning, and incident.
    • Time range: You can query traces generated during any time range in the last seven days.
    -

  6. Click on the left of a trace to expand its details, as shown below.

    Figure 1 Expanding trace details
    -

  7. Click View Trace in the Operation column. The trace details are displayed.

    Figure 2 Viewing event details
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0027.html b/docs/cce/umn/cce_01_0027.html deleted file mode 100644 index 5c0cd79e..00000000 --- a/docs/cce/umn/cce_01_0027.html +++ /dev/null @@ -1,31 +0,0 @@ - - -

Clusters

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0028.html b/docs/cce/umn/cce_01_0028.html deleted file mode 100644 index 161b419f..00000000 --- a/docs/cce/umn/cce_01_0028.html +++ /dev/null @@ -1,233 +0,0 @@ - - -

Creating a CCE Cluster

-

On the CCE console, you can easily create Kubernetes clusters. Kubernetes can manage container clusters at scale. A cluster manages a group of node resources.

-

In CCE, you can create a CCE cluster to manage VMs as nodes. By using high-performance network models, hybrid clusters provide a multi-scenario, secure, and stable runtime environment for containers.

-

Notes and Constraints

  • During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
  • You can create a maximum of 50 clusters in a single region.
  • After a cluster is created, the following items cannot be changed:
    • Number of master nodes in the cluster.
    • AZ of a master node.
    • Network configuration of the cluster, such as the VPC, subnet, container CIDR block, Service CIDR block, and kube-proxy (forwarding) settings.
    • Network model. For example, change the tunnel network to the VPC network.
    -
-
-

Procedure

  1. Log in to the CCE console. On the Dashboard page, click Create Cluster. Alternatively, choose Resource Management > Clusters in the navigation pane and click Create next to CCE Cluster.
  2. Set cluster parameters by referring to Table 1. Pay attention to the parameters marked with an asterisk (*).

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating a cluster

    Parameter

    -

    Description

    -

    Region

    -

    Select a region near you to ensure the lowest latency possible.

    -

    *Cluster Name

    -

    Name of the new cluster, which cannot be changed after the cluster is created.

    -

    A cluster name contains 4 to 128 characters starting with a letter and not ending with a hyphen (-). Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    Version

    -

    Kubernetes community baseline version. The latest version is recommended.

    -

    If a Beta version is available, you can use it for trial. However, it is not recommended for commercial use.

    -

    Management Scale

    -

    Maximum number of worker nodes that can be managed by the master nodes of the current cluster. You can select 50 nodes, 200 nodes, or 1,000 nodes for your cluster, or 2,000 nodes if you are buying a cluster of v1.15.11 or later.

    -

    If you select 1000 nodes, the master nodes of the cluster can manage a maximum of 1000 worker nodes. The configuration fee varies depending on the specifications of master nodes for different management scales.

    -

    Number of master nodes

    -

    3: Three master nodes will be created to make the cluster highly available. If a master node is faulty, the cluster can still be available without affecting service functions. Click Change. In the dialog box displayed, you can configure the following parameters:

    -

    Disaster recovery level

    -
    • AZ: Master nodes are deployed in different AZs for disaster recovery.
    • Fault domain: Master nodes are deployed in different failure domains in the same AZ for disaster recovery. This option is displayed only when the environment supports failure domains.
    • Host computer: Master nodes are deployed on different hosts in the same AZ for disaster recovery.
    • Customize: You can select different locations to deploy different master nodes. In the fault domain mode, master nodes must be in the same AZ.
    -

    1: Only one master node is created in the cluster, which cannot ensure SLA for the cluster. Single-master clusters (non-HA clusters) are not recommended for commercial scenarios. Click Change. In the AZ Settings dialog box, select an AZ for the master node.

    -
    NOTE:
    • You are advised to create multiple master nodes to improve the cluster DR capability in commercial scenarios.
    • The multi-master mode cannot be changed after the cluster is created. A single-master cluster cannot be upgraded to a multi-master cluster. For a single-master cluster, if a master node is faulty, services will be affected.
    • To ensure reliability, the multi-master mode is enabled by default for a cluster with 1,000 or more nodes.
    -
    -

    *VPC

    -

    VPC where the cluster is located. The value cannot be changed after the cluster is created.

    -

    A VPC provides a secure and logically isolated network environment.

    -

    If no VPC is available, click Create a VPC to create a VPC. After the VPC is created, click the refresh icon.

    -

    *Subnet

    -

    Subnet where the node VM runs. The value cannot be changed after the cluster is created.

    -

    A subnet provides dedicated network resources that are logically isolated from other networks for network security.

    -

    If no subnet is available, click Create Subnet to create a subnet. After the subnet is created, click the refresh icon. For details about the relationship between VPCs, subnets, and clusters, see Cluster Overview.

    -

    During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.

    -

    The selected subnet cannot be changed after the cluster is created.

    -

    Network Model

    -

    After a cluster is created, the network model cannot be changed. Exercise caution when selecting a network model. For details about how to select a network model, see Overview.

    -

    VPC network

    -

    In this network model, each node occupies one VPC route. The number of VPC routes supported by the current region and the number of container IP addresses that can be allocated to each node (that is, the maximum number of pods that can be created) are displayed on the console.

    -
    • The container network uses VPC routes to integrate with the underlying network. This network model is applicable to performance-intensive scenarios. However, each node occupies one VPC route, and the maximum number of nodes allowed in a cluster depends on the VPC route quota.
    • Each node is assigned a CIDR block of a fixed size. VPC networks are free from packet encapsulation overheads and outperform container tunnel networks. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in the cluster can be directly accessed from outside the cluster.
      NOTE:
      • In the VPC network model, extended CIDR blocks and network policies are not supported.
      • When creating multiple clusters using the VPC network model in one VPC, select a CIDR block for each cluster that does not overlap with the VPC address or other container CIDR blocks.
      -
      -
    -

    Tunnel network

    -
    • The container network is an overlay tunnel network on top of a VPC network and uses the VXLAN technology. This network model is applicable when there is no high requirements on performance.
    • VXLAN encapsulates Ethernet packets as UDP packets for tunnel transmission. Though at some cost of performance, the tunnel encapsulation enables higher interoperability and compatibility with advanced features (such as network policy-based isolation), meeting the requirements of most applications.
    -

    Container Network Segment

    -

    An IP address range that can be allocated to container pods. After the cluster is created, the value cannot be changed.

    -
    • If Automatically select is deselected, enter a CIDR block manually. If the CIDR block you specify conflicts with a subnet CIDR block, the system prompts you to select another CIDR block. The recommended CIDR blocks are 10.0.0.0/8-18, 172.16.0.0/16-18, and 192.168.0.0/16-18.

      If different clusters share a container CIDR block, an IP address conflict will occur and access to applications may fail.

      -
    • If Automatically select is selected, the system automatically assigns a CIDR block that does not conflict with any subnet CIDR block.
    -

    The mask of the container CIDR block must be appropriate. It determines the number of available nodes in a cluster. A too small mask value will cause the cluster to soon fall short of nodes. After the mask is set, the estimated maximum number of containers supported by the current CIDR block will be displayed.

    -

    Service Network Segment

    -

    An IP address range that can be allocated to Kubernetes Services. After the cluster is created, the value cannot be changed. The Service CIDR block cannot conflict with the created route. If they conflict, select another CIDR block.

    -
    • Default: The default CIDR block 10.247.0.0/16 will be used.
    • Custom: Manually set a CIDR block and mask based on service requirements. The mask determines the maximum number of Service IP addresses available in the cluster.
    -

    Authorization Mode

    -

    RBAC is selected by default and cannot be deselected.

    -

    After RBAC is enabled, IAM users access resources in the cluster according to fine-grained permissions policies. For details, see Namespace Permissions (Kubernetes RBAC-based).

    -

    Authentication Mode

    -

    The authentication mechanism controls user permission on resources in a cluster.

    -

    The X.509-based authentication mode is enabled by default. X.509 is a commonly used certificate format.

    -

    If you want to perform permission control on the cluster, select Enhanced authentication. The cluster will identify users based on the header of the request for authentication.

    -

    You need to upload your own CA certificate, client certificate, and client certificate private key (for details about how to create a certificate, see Certificates), and select I have confirmed that the uploaded certificates are valid.

    -
    CAUTION:
    • Upload a file smaller than 1 MB. The CA certificate and client certificate can be in .crt or .cer format. The private key of the client certificate can only be uploaded unencrypted.
    • The validity period of the client certificate must be longer than five years.
    • The uploaded CA certificate is used for both the authentication proxy and the kube-apiserver aggregation layer configuration. If the certificate is invalid, the cluster cannot be created.
    -
    -

    Cluster Description

    -

    Optional. Enter the description of the new container cluster.

    -

    Advanced Settings

    -

    Click Advanced Settings to expand the details page. The following functions are supported (unsupported functions in current AZs are hidden):

    -

    Service Forwarding Mode

    -
    • iptables: Traditional kube-proxy uses iptables rules to implement Service load balancing. In this mode, too many iptables rules will be generated when many Services are deployed. In addition, non-incremental updates will cause a latency and even obvious performance issues in the case of heavy service traffic.
    • ipvs: optimized kube-proxy mode to achieve higher throughput and faster speed, ideal for large-sized clusters. This mode supports incremental updates and can keep connections uninterrupted during Service updates.

      In this mode, when the ingress and Service use the same ELB instance, the ingress cannot be accessed from the nodes and containers in the cluster.

      -
    -
    NOTE:
    • ipvs provides better scalability and performance for large clusters.
    • Compared with iptables, ipvs supports more complex load balancing algorithms such as least load first (LLF) and weighted least connections (WLC).
    • ipvs supports server health checking and connection retries.
    -
    -

    CPU Policy

    -

    This parameter is displayed only for clusters of v1.13.10-r0 and later.

    -
    • On: Exclusive CPU cores can be allocated to workload pods. Select On if your workload is sensitive to latency in CPU cache and scheduling.
    • Off: Exclusive CPU cores will not be allocated to workload pods. Select Off if you want a large pool of shareable CPU cores.
    -

    For details about CPU management policies, see Feature Highlight: CPU Manager.

    -

    After CPU Policy is enabled, workloads cannot be started or created on nodes after the node specifications are changed.

    -

    Open EIP

    -

    An independent public IP address that is reachable from public networks. Select an EIP that has not been bound to any node. A cluster's EIP is preset in the cluster's certificate. Do no delete the EIP after the cluster has been created. Otherwise, two-way authentication will fail.

    -
    • Do not configure: The cluster's master node will not have an EIP.
    • Configure now: If no EIP is available for selection, create one.
    -
    -
    -

  3. Click Next: Create Node and set the following parameters.

    • Create Node
      • Create now: Create a node when creating a cluster. Currently, only VM nodes are supported. If a node fails to be created, the cluster will be rolled back.
      • Create later: No node will be created. Only an empty cluster will be created.
      -
    • Current Region: geographic location of the nodes to be created.
    • AZ: Set this parameter based on the site requirements. An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network.

      You are advised to deploy worker nodes in different AZs after the cluster is created to make your workloads more reliable. When creating a cluster, you can deploy nodes only in one AZ.

      -
    • Node Type
      • VM node: A VM node will be created in the cluster.
      -
    • Node Name: Enter a node name. A node name contains 1 to 56 characters starting with a lowercase letter and not ending with a hyphen (-). Only lowercase letters, digits, and hyphens (-) are allowed.
    • Specifications: Select node specifications that best fit your business needs.
      • General-purpose: provides a balance of computing, memory, and network resources. It is a good choice for many applications, such as web servers, workload development, workload testing, and small-scale databases.
      • Memory-optimized: provides higher memory capacity than general-purpose nodes and is suitable for relational databases, NoSQL, and other workloads that are both memory-intensive and data-intensive.
      • GPU-accelerated: provides powerful floating-point computing and is suitable for real-time, highly concurrent massive computing. Graphical processing units (GPUs) of P series are suitable for deep learning, scientific computing, and CAE. GPUs of G series are suitable for 3D animation rendering and CAD. GPU-accelerated nodes can be created only in clusters of v1.11 or later. GPU-accelerated nodes are available only in certain regions.
      • General computing-plus: provides stable performance and exclusive resources to enterprise-class workloads with high and stable computing performance.
      • Disk-intensive: supports local disk storage and provides high network performance. It is designed for workloads requiring high throughput and data switching, such as big data workloads.
      -

      To ensure node stability, CCE automatically reserves some resources to run necessary system components. For details, see Formula for Calculating the Reserved Resources of a Node.

      -
    • OS: Select an OS for the node to be created. -

      Reinstalling the OS or modifying OS configurations could make the node unavailable. Exercise caution when performing these operations.

      -
    • System Disk: Set the system disk space of the worker node. The value ranges from 40GB to 1024 GB. The default value is 40GB.

      By default, system disks support Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD) EVS disks.

      -
      Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function. This function is available only in certain regions.
      • Encryption is not selected by default.
      • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
      -
      -
    • Data Disk: Set the data disk space of the worker node. The value ranges from 100 GB to 32,768 GB. The default value is 100 GB. The EVS disk types provided for the data disk are the same as those for the system disk.

      If the data disk is uninstalled or damaged, the Docker service becomes abnormal and the node becomes unavailable. You are advised not to delete the data disk.

      -
      -
      • LVM: If this option is selected, CCE data disks are managed by the Logical Volume Manager (LVM). On this condition, you can adjust the disk space allocation for different resources. This option is selected for the first disk by default and cannot be unselected. You can choose to enable or disable LVM for new data disks.
        • This option is selected by default, indicating that LVM management is enabled.
        • You can deselect the check box to disable LVM management.
          • Disk space of the data disks managed by LVM will be allocated according to the ratio you set.
          • When creating a node in a cluster of v1.13.10 or later, if LVM is not selected for a data disk, follow instructions in Adding a Second Data Disk to a Node in a CCE Cluster to fill in the pre-installation script and format the data disk. Otherwise, the data disk will still be managed by LVM.
          • When creating a node in a cluster earlier than v1.13.10, you must format the data disks that are not managed by LVM. Otherwise, either these data disks or the first data disk will be managed by LVM.
          -
          -
        -
      • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function.
        This function is supported only for clusters of v1.13.10 or later in certain regions, and is not displayed for clusters of v1.13.10 or earlier.
        • Encryption is not selected by default.
        • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
        -
        -
      • Add Data Disk: Currently, a maximum of two data disks can be attached to a node. After the node is created, you can go to the ECS console to attach more data disks. This function is available only to clusters of certain versions.
      • Data disk space allocation: Click to specify the resource ratio for Kubernetes Space and User Space. Disk space of the data disks managed by LVM will be allocated according to the ratio you set. This function is available only to clusters of certain versions.
        • Kubernetes Space: You can specify the ratio of the data disk space for storing Docker and kubelet resources. Docker resources include the Docker working directory, Docker images, and image metadata. kubelet resources include pod configuration files, secrets, and emptyDirs.

          The Docker space cannot be less than 10%, and the space size cannot be less than 60 GB. The kubelet space cannot be less than 10%.

          -

          The Docker space size is determined by your service requirements. For details, see Data Disk Space Allocation.

          -
        • User Space: You can set the ratio of the disk space that is not allocated to Kubernetes resources and the path to which the user space is mounted.

          Note that the mount path cannot be /, /home/paas, /var/paas, /var/lib, /var/script, /var/log, /mnt/paas, or /opt/cloud, and cannot conflict with the system directories (such as bin, lib, home, root, boot, dev, etc, lost+found, mnt, proc, sbin, srv, tmp, var, media, opt, selinux, sys, and usr). Otherwise, the system or node installation will fail.

          -
          -
        -
      -
      If the cluster version is v1.13.10-r0 or later and the node specification is Disk-intensive, the following options are displayed for data disks:
      • EVS: Parameters are the same as those when the node type is not Disk-intensive. For details, see Data Disk above.
      • Local disk: Local disks may break down and do not ensure data reliability. It is recommended that you store service data in EVS disks, which are more reliable than local disks.
        Local disk parameters are as follows:
        • Disk Mode: If the node type is disk-intensive, the supported disk mode is HDD.
        • Read/Write Mode: When multiple local disks exist, you can set the read/write mode. The serial and sequential modes are supported. Sequential indicates that data is read and written in linear mode. When a disk is used up, the next disk is used. Serial indicates that data is read and written in striping mode, allowing multiple local disks to be read and written at the same time.
        • Kubernetes Space: You can specify the ratio of the data disk space for storing Docker and kubelet resources. Docker resources include the Docker working directory, Docker images, and image metadata. kubelet resources include pod configuration files, secrets, and emptyDirs.
        • User Space: You can set the ratio of the disk space that is not allocated to Kubernetes resources and the path to which the user space is mounted.
        -
        -
      -
      • The ratio of disk space allocated to the Kubernetes space and user space must be equal to 100% in total. You can click to refresh the data after you have modified the ratio.
      • By default, disks run in the direct-lvm mode. If data disks are removed, the loop-lvm mode will be used and this will impair system stability.
      -
      -
      -
    • VPC: A VPC where the current cluster is located. This parameter cannot be changed and is displayed only for clusters of v1.13.10-r0 or later.
    • Subnet: A subnet improves network security by providing exclusive network resources that are isolated from other networks. You can select any subnet in the cluster VPC. Cluster nodes can belong to different subnets.

      During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.

      -

      -
    • EIP: an independent public IP address. If the nodes to be created require public network access, select Automatically assign or Use existing.
      An EIP bound to the node allows public network access. EIP bandwidth can be modified at any time. An ECS without a bound EIP cannot access the Internet or be accessed by public networks.
      • Do not use: A node without an EIP cannot be accessed from public networks. It can be used only as a cloud server for deploying services or clusters on a private network.
      • Automatically assign: An EIP with specified configurations is automatically assigned to each node. If the number of EIPs is smaller than the number of nodes, the EIPs are randomly bound to the nodes.

        Configure the EIP specifications, billing factor, bandwidth type, and bandwidth size as required. When creating an ECS, ensure that the elastic IP address quota is sufficient.

        -
      • Use existing: Existing EIPs are assigned to the nodes to be created.
      -

      By default, VPC's SNAT feature is disabled for CCE. If SNAT is enabled, you do not need to use EIPs to access public networks. For details about SNAT, see Custom Policies.

      -
      -
      -
    • Login Mode:
      • Key pair: Select the key pair used to log in to the node. You can select a shared key.

        A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

        -

        When creating a node using a key pair, IAM users can select only the key pairs created by their own, regardless of whether these users are in the same group. For example, user B cannot use the key pair created by user A to create a node, and the key pair is not displayed in the drop-down list on the CCE console.

        -
        -
      -
    • Advanced ECS Settings (optional): Click to show advanced ECS settings.
      • ECS Group: An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.
        • Anti-affinity: ECSs in an ECS group are deployed on different physical hosts to improve service reliability.
        -

        Select an existing ECS group, or click Create ECS Group to create one. After the ECS group is created, click the refresh button.

        -
      • Resource Tags: By adding tags to resources, you can classify resources.

        You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use predefined tags to improve tag creation and migration efficiency.

        -

        CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag. A maximum of 5 tags can be added.

        -
      • Agency: An agency is created by a tenant administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources. To authorize an ECS or BMS to call cloud services, select Cloud service as the agency type, click Select, and then select ECS BMS.
      • Pre-installation Script: Enter a maximum of 1,000 characters.

        The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed. The script is usually used to format data disks.

        -
      • Post-installation Script: Enter a maximum of 1,000 characters.

        The script will be executed after Kubernetes software is installed and will not affect the installation. The script is usually used to modify Docker parameters.

        -
      • Subnet IP Address: Select Automatically assign IP address (recommended) or Manually assigning IP addresses.
      -
    • Advanced Kubernetes Settings: (Optional) Click to show advanced cluster settings.
      • Max Pods: maximum number of pods that can be created on a node, including the system's default pods. If the cluster uses the VPC network model, the maximum value is determined by the number of IP addresses that can be allocated to containers on each node.

        This limit prevents the node from being overloaded by managing too many pods. For details, see Maximum Number of Pods That Can Be Created on a Node.

        -
      • Maximum Data Space per Container: maximum data space that can be used by a container. The value ranges from 10 GB to 500 GB. If the value of this field is larger than the data disk space allocated to Docker resources, the latter will override the value specified here. Typically, 90% of the data disk space is allocated to Docker resources. This parameter is displayed only for clusters of v1.13.10-r0 and later.
      -
    • Nodes: The value cannot exceed the management scale you select when configuring cluster parameters. Set this parameter based on service requirements and the remaining quota displayed on the page. Click to view the factors that affect the number of nodes to be added (depending on the factor with the minimum value).
    -

  4. Click Next: Install Add-on, and select the add-ons to be installed in the Install Add-on step.

    System resource add-ons must be installed. Advanced functional add-ons are optional.

    -

    You can also install all add-ons after the cluster is created. To do so, choose Add-ons in the navigation pane of the CCE console and select the add-on you will install. For details, see Add-ons.

    -

  5. Click Next: Confirm. Read the product instructions and select I am aware of the above limitations. Confirm the configured parameters, specifications, and fees.
  6. Click Submit.

    It takes about 6 to 10 minutes to create a cluster. You can click Back to Cluster List to perform other operations on the cluster or click Go to Cluster Events to view the cluster details. If the cluster status is Available, the cluster is successfully created.

    -

-
-

Related Operations

-
  • Create a namespace. You can create multiple namespaces in a cluster and organize resources in the cluster into different namespaces. These namespaces serve as logical groups and can be managed separately. For more information about how to create a namespace for a cluster, see Namespaces.
  • Create a workload. Once the cluster is created, you can use an image to create an application that can be accessed from public networks. For details, see Creating a Deployment or Creating a StatefulSet.
  • Click the cluster name to view cluster details. -
    - - - - - - - - - - - - - - - - - - - -
    Table 2 Cluster details

    Tab

    -

    Description

    -

    Cluster Details

    -

    View the details and operating status of the cluster.

    -

    Monitoring

    -

    You can view the CPU and memory allocation rates of all nodes in the cluster (that is, the maximum allocated amount), as well as the CPU usage, memory usage, and specifications of the master node(s).

    -

    Events

    -
    • View cluster events on the Events tab page.
    • Set search criteria. For example, you can set the time segment or enter an event name to view corresponding events.
    -

    Auto Scaling

    -

    You can configure auto scaling to add or reduce worker nodes in a cluster to meet service requirements. For details, see Setting Cluster Auto Scaling.

    -

    Clusters of v1.17 do not support auto scaling using AOM. You can use node pools for auto scaling. For details, see Node Pool Overview.

    -

    kubectl

    -

    To access a Kubernetes cluster from a PC, you need to use the Kubernetes command line tool kubectl. For details, see Connecting to a Cluster Using kubectl.

    -
    -
    -
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0030.html b/docs/cce/umn/cce_01_0030.html deleted file mode 100644 index ece577f5..00000000 --- a/docs/cce/umn/cce_01_0030.html +++ /dev/null @@ -1,17 +0,0 @@ - - -

Namespaces

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0031.html b/docs/cce/umn/cce_01_0031.html deleted file mode 100644 index 7891cf88..00000000 --- a/docs/cce/umn/cce_01_0031.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

Managing a Cluster

-
-
- - - -
- diff --git a/docs/cce/umn/cce_01_0033.html b/docs/cce/umn/cce_01_0033.html deleted file mode 100644 index 6aeabcec..00000000 --- a/docs/cce/umn/cce_01_0033.html +++ /dev/null @@ -1,76 +0,0 @@ - - -

Creating a Node

-

Scenario

A node is a virtual or physical machine that provides computing resources. Sufficient nodes must be available in your project to ensure that operations, such as creating workloads, can be performed.

-
-

Prerequisites

  • At least one cluster is available. For details on how to create a cluster, see Creating a CCE Cluster.
  • A key pair has been created. The key pair will be used for identity authentication upon remote node login.
-
-

Notes and Constraints

  • During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
  • Only KVM nodes can be created. Non-KVM nodes cannot be used after being created.
  • Once a node is created, its AZ cannot be changed.
  • CCE supports GPUs through an add-on named gpu-beta. You need to install this add-on to use GPU-enabled nodes in your cluster.
-
-

Procedure

  1. Log in to the CCE console. Use either of the following methods to add a node:

    • In the navigation pane, choose Resource Management > Nodes. Select the cluster to which the node will belong and click Create Node on the upper part of the node list page.
    • In the navigation pane, choose Resource Management > Clusters. In the card view of the cluster to which you will add nodes, click Create Node.
    -

  2. Select a region and an AZ.

    • Current Region: geographic location of the nodes to be created.
    • AZ: Set this parameter based on the site requirements. An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network.

      You are advised to deploy worker nodes in different AZs after the cluster is created to make your workloads more reliable. When creating a cluster, you can deploy nodes only in one AZ.

      -
    -

  3. Configure node parameters.

    • Node Type
      • VM node: A VM node will be created in the cluster.
      -
    • Node Name: Enter a node name. A node name contains 1 to 56 characters starting with a lowercase letter and not ending with a hyphen (-). Only lowercase letters, digits, and hyphens (-) are allowed.
    • Specifications: Select node specifications that best fit your business needs.
      • General-purpose: provides a balance of computing, memory, and network resources. It is a good choice for many applications, such as web servers, workload development, workload testing, and small-scale databases.
      • Memory-optimized: provides higher memory capacity than general-purpose nodes and is suitable for relational databases, NoSQL, and other workloads that are both memory-intensive and data-intensive.
      • GPU-accelerated: provides powerful floating-point computing and is suitable for real-time, highly concurrent massive computing. Graphical processing units (GPUs) of P series are suitable for deep learning, scientific computing, and CAE. GPUs of G series are suitable for 3D animation rendering and CAD. GPU-accelerated nodes can be created only in clusters of v1.11 or later. GPU-accelerated nodes are available only in certain regions.
      • General computing-plus: provides stable performance and exclusive resources to enterprise-class workloads with high and stable computing performance.
      • Disk-intensive: supports local disk storage and provides high network performance. It is designed for workloads requiring high throughput and data switching, such as big data workloads.
      -

      To ensure node stability, CCE automatically reserves some resources to run necessary system components. For details, see Formula for Calculating the Reserved Resources of a Node.

      -
    • OS: Select an OS for the node to be created. -

      Reinstalling the OS or modifying OS configurations could make the node unavailable. Exercise caution when performing these operations.

      -
    • System Disk: Set the system disk space of the worker node. The value ranges from 40GB to 1024 GB. The default value is 40GB.

      By default, system disks support Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD) EVS disks.

      -
      Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function. This function is available only in certain regions.
      • Encryption is not selected by default.
      • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
      -
      -
    • Data Disk: Set the data disk space of the worker node. The value ranges from 100 GB to 32,768 GB. The default value is 100 GB. The EVS disk types provided for the data disk are the same as those for the system disk.

      If the data disk is uninstalled or damaged, the Docker service becomes abnormal and the node becomes unavailable. You are advised not to delete the data disk.

      -
      -
      • LVM: If this option is selected, CCE data disks are managed by the Logical Volume Manager (LVM). On this condition, you can adjust the disk space allocation for different resources. This option is selected for the first disk by default and cannot be unselected. You can choose to enable or disable LVM for new data disks.
        • This option is selected by default, indicating that LVM management is enabled.
        • You can deselect the check box to disable LVM management.
          • Disk space of the data disks managed by LVM will be allocated according to the ratio you set.
          • When creating a node in a cluster of v1.13.10 or later, if LVM is not selected for a data disk, follow instructions in Adding a Second Data Disk to a Node in a CCE Cluster to fill in the pre-installation script and format the data disk. Otherwise, the data disk will still be managed by LVM.
          • When creating a node in a cluster earlier than v1.13.10, you must format the data disks that are not managed by LVM. Otherwise, either these data disks or the first data disk will be managed by LVM.
          -
          -
        -
      • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function.
        This function is supported only for clusters of v1.13.10 or later in certain regions, and is not displayed for clusters of v1.13.10 or earlier.
        • Encryption is not selected by default.
        • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
        -
        -
      • Add Data Disk: Currently, a maximum of two data disks can be attached to a node. After the node is created, you can go to the ECS console to attach more data disks. This function is available only to clusters of certain versions.
      • Data disk space allocation: Click to specify the resource ratio for Kubernetes Space and User Space. Disk space of the data disks managed by LVM will be allocated according to the ratio you set. This function is available only to clusters of certain versions.
        • Kubernetes Space: You can specify the ratio of the data disk space for storing Docker and kubelet resources. Docker resources include the Docker working directory, Docker images, and image metadata. kubelet resources include pod configuration files, secrets, and emptyDirs.

          The Docker space cannot be less than 10%, and the space size cannot be less than 60 GB. The kubelet space cannot be less than 10%.

          -

          The Docker space size is determined by your service requirements. For details, see Data Disk Space Allocation.

          -
        • User Space: You can set the ratio of the disk space that is not allocated to Kubernetes resources and the path to which the user space is mounted.

          Note that the mount path cannot be /, /home/paas, /var/paas, /var/lib, /var/script, /var/log, /mnt/paas, or /opt/cloud, and cannot conflict with the system directories (such as bin, lib, home, root, boot, dev, etc, lost+found, mnt, proc, sbin, srv, tmp, var, media, opt, selinux, sys, and usr). Otherwise, the system or node installation will fail.

          -
          -
        -
      -
      If the cluster version is v1.13.10-r0 or later and the node specification is Disk-intensive, the following options are displayed for data disks:
      • EVS: Parameters are the same as those when the node type is not Disk-intensive. For details, see Data Disk above.
      • Local disk: Local disks may break down and do not ensure data reliability. It is recommended that you store service data in EVS disks, which are more reliable than local disks.
        Local disk parameters are as follows:
        • Disk Mode: If the node type is disk-intensive, the supported disk mode is HDD.
        • Read/Write Mode: When multiple local disks exist, you can set the read/write mode. The serial and sequential modes are supported. Sequential indicates that data is read and written in linear mode. When a disk is used up, the next disk is used. Serial indicates that data is read and written in striping mode, allowing multiple local disks to be read and written at the same time.
        • Kubernetes Space: You can specify the ratio of the data disk space for storing Docker and kubelet resources. Docker resources include the Docker working directory, Docker images, and image metadata. kubelet resources include pod configuration files, secrets, and emptyDirs.
        • User Space: You can set the ratio of the disk space that is not allocated to Kubernetes resources and the path to which the user space is mounted.
        -
        -
      -
      • The ratio of disk space allocated to the Kubernetes space and user space must be equal to 100% in total. You can click to refresh the data after you have modified the ratio.
      • By default, disks run in the direct-lvm mode. If data disks are removed, the loop-lvm mode will be used and this will impair system stability.
      -
      -
      -
    • VPC: A VPC where the current cluster is located. This parameter cannot be changed and is displayed only for clusters of v1.13.10-r0 or later.
    • Subnet: A subnet improves network security by providing exclusive network resources that are isolated from other networks. You can select any subnet in the cluster VPC. Cluster nodes can belong to different subnets.

      During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.

      -

      -
    -

  4. EIP: an independent public IP address. If the nodes to be created require public network access, select Automatically assign or Use existing.

    An EIP bound to the node allows public network access. EIP bandwidth can be modified at any time. An ECS without a bound EIP cannot access the Internet or be accessed by public networks.
    • Do not use: A node without an EIP cannot be accessed from public networks. It can be used only as a cloud server for deploying services or clusters on a private network.
    • Automatically assign: An EIP with specified configurations is automatically assigned to each node. If the number of EIPs is smaller than the number of nodes, the EIPs are randomly bound to the nodes.

      Configure the EIP specifications, billing factor, bandwidth type, and bandwidth size as required. When creating an ECS, ensure that the elastic IP address quota is sufficient.

      -
    • Use existing: Existing EIPs are assigned to the nodes to be created.
    -

    By default, VPC's SNAT feature is disabled for CCE. If SNAT is enabled, you do not need to use EIPs to access public networks. For details about SNAT, see Custom Policies.

    -
    -
    -

  5. Login Mode:

    • Key pair: Select the key pair used to log in to the node. You can select a shared key.

      A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

      -

      When creating a node using a key pair, IAM users can select only the key pairs created by their own, regardless of whether these users are in the same group. For example, user B cannot use the key pair created by user A to create a node, and the key pair is not displayed in the drop-down list on the CCE console.

      -
      -
    -

  6. Advanced ECS Settings (optional): Click to show advanced ECS settings.

    • ECS Group: An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.
      • Anti-affinity: ECSs in an ECS group are deployed on different physical hosts to improve service reliability.
      -

      Select an existing ECS group, or click Create ECS Group to create one. After the ECS group is created, click the refresh button.

      -
    • Resource Tags: By adding tags to resources, you can classify resources.

      You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use predefined tags to improve tag creation and migration efficiency.

      -

      CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag. A maximum of 5 tags can be added.

      -
    • Agency: An agency is created by a tenant administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources. To authorize an ECS or BMS to call cloud services, select Cloud service as the agency type, click Select, and then select ECS BMS.
    • Pre-installation Script: Enter a maximum of 1,000 characters.

      The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed. The script is usually used to format data disks.

      -
    • Post-installation Script: Enter a maximum of 1,000 characters.

      The script will be executed after Kubernetes software is installed and will not affect the installation. The script is usually used to modify Docker parameters.

      -
    • Subnet IP Address: Select Automatically assign IP address (recommended) or Manually assigning IP addresses.
    -

  7. Advanced Kubernetes Settings: (Optional) Click to show advanced cluster settings.

    • Max Pods: maximum number of pods that can be created on a node, including the system's default pods. If the cluster uses the VPC network model, the maximum value is determined by the number of IP addresses that can be allocated to containers on each node.

      This limit prevents the node from being overloaded by managing too many pods. For details, see Maximum Number of Pods That Can Be Created on a Node.

      -
    • Maximum Data Space per Container: maximum data space that can be used by a container. The value ranges from 10 GB to 500 GB. If the value of this field is larger than the data disk space allocated to Docker resources, the latter will override the value specified here. Typically, 90% of the data disk space is allocated to Docker resources. This parameter is displayed only for clusters of v1.13.10-r0 and later.
    -

  8. Nodes: The value cannot exceed the management scale you select when configuring cluster parameters. Set this parameter based on service requirements and the remaining quota displayed on the page. Click to view the factors that affect the number of nodes to be added (depending on the factor with the minimum value).
  9. Click Next: Confirm. After confirming that the configuration is correct, click Submit.

    The node list page is displayed. If the node status is Available, the node is added successfully. It takes about 6 to 10 minutes to create a node.
    • Do not delete the security groups and related rules automatically configured during cluster creation. Otherwise, the cluster will exhibit unexpected behavior.
    -
    -
    -

  10. Click Back to Node List. The node has been created successfully if it changes to the Available state.

    The allocatable resources are calculated based on the resource request value (Request), which indicates the upper limit of resources that can be requested by pods on this node, but does not indicate the actual available resources of the node.

    -

    The calculation formula is as follows:

    -
    • Allocatable CPUs = Total CPUs – Requested CPUs of all pods – Reserved CPUs for other resources
    • Allocatable memory = Total memory – Requested memory of all pods – Reserved memory for other resources
    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0035.html b/docs/cce/umn/cce_01_0035.html deleted file mode 100644 index 578ca003..00000000 --- a/docs/cce/umn/cce_01_0035.html +++ /dev/null @@ -1,15 +0,0 @@ - - -

Node Pools

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0036.html b/docs/cce/umn/cce_01_0036.html deleted file mode 100644 index 553e85b1..00000000 --- a/docs/cce/umn/cce_01_0036.html +++ /dev/null @@ -1,18 +0,0 @@ - - -

Stopping a Node

-

Scenario

After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not result in adverse impacts.

-
-

Notes and Constraints

  • Deleting a node will lead to pod migration, which may affect services. Therefore, delete nodes during off-peak hours.
  • Unexpected risks may occur during node deletion. Back up related data in advance.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
  • Only worker nodes can be stopped.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes.
  2. In the node list, click the name of the node to be stopped.
  3. On the node details page displayed, click the node name.

    Figure 1 Nodes details page
    -

  4. In the upper right corner of the ECS details page, click Stop. In the Stop ECS dialog box, click Yes.

    Figure 2 ECS details page
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0042.html b/docs/cce/umn/cce_01_0042.html deleted file mode 100644 index 08ca90ec..00000000 --- a/docs/cce/umn/cce_01_0042.html +++ /dev/null @@ -1,28 +0,0 @@ - - -

Storage (CSI)

-

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0044.html b/docs/cce/umn/cce_01_0044.html deleted file mode 100644 index 77545b16..00000000 --- a/docs/cce/umn/cce_01_0044.html +++ /dev/null @@ -1,18 +0,0 @@ - - -

EVS Volumes

-

-
-
- - - -
- diff --git a/docs/cce/umn/cce_01_0045.html b/docs/cce/umn/cce_01_0045.html deleted file mode 100644 index d6f81ab3..00000000 --- a/docs/cce/umn/cce_01_0045.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

Configuration Center

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0046.html b/docs/cce/umn/cce_01_0046.html deleted file mode 100644 index eef0c613..00000000 --- a/docs/cce/umn/cce_01_0046.html +++ /dev/null @@ -1,29 +0,0 @@ - - -

Workloads

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0047.html b/docs/cce/umn/cce_01_0047.html deleted file mode 100644 index 0d933869..00000000 --- a/docs/cce/umn/cce_01_0047.html +++ /dev/null @@ -1,294 +0,0 @@ - - -

Creating a Deployment

-

Scenario

Deployments are workloads (for example, Nginx) that do not store any data or status. You can create Deployments on the CCE console or by running kubectl commands.

-
-

Prerequisites

  • Before creating a containerized workload, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Cluster.
  • To enable public access to a workload, ensure that an EIP or load balancer has been bound to at least one node in the cluster.

    If a pod has multiple containers, ensure that the ports used by the containers do not conflict with each other. Otherwise, creating the Deployment will fail.

    -
    -
-
-

Using the CCE Console

CCE provides multiple methods for creating a workload. You can use any of the following methods:
  • Use an image in Third-Party Images. You do not need to upload any image before using it.
  • Use an image that you have uploaded to SWR.
  • Use a shared image to create a workload. Specifically, other tenants share an image with you by using the SWR service.
  • Use a YAML file to create a workload. You can click Create YAML on the right of the Configure Advanced Settings page when creating a Deployment. For details about YAML, see Table 3. After the YAML file is written, click Create to create a workload.

    Settings in the YAML file are synchronized with those on the console. You can edit the YAML file on the console to create a workload. For example:

    -
    • If you enter a workload name on the console, the name will automatically appear in the YAML file.
    • If you add an image on the console, the image will be automatically added to the YAML file.
    -

    When you click Create YAML on the right of the console, do not create multiple YAML files in the YAML definition pane displayed. You need to create them one by one. Otherwise, an error will be reported during the creation.

    -
    -
-
-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments. On the page displayed, click Create Deployment. Set basic workload parameters as described in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Basic workload parameters

    Parameter

    -

    Description

    -

    * Workload Name

    -

    Name of the workload to be created. The name must be unique.

    -

    Enter 4 to 63 characters starting with a letter and ending with a letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    * Cluster Name

    -

    Cluster to which the workload belongs.

    -

    * Namespace

    -

    In a single cluster, data in different namespaces is isolated from each other. This enables applications to share the services of the same cluster without interfering each other. If no namespace is set, the default namespace is used.

    -

    * Instances

    -

    Number of pods in the workload. A workload can have one or more pods. You can set the number of pods. The default value is 2 and can be set to 1.

    -

    Each workload pod consists of the same containers. Configuring multiple pods for a workload ensures that the workload can still run properly even if a pod is faulty. If only one pod is used, a node or pod exception may cause service exceptions.

    -

    * Container runtime

    -

    Select a container runtime, which cannot be changed after creation. This parameter is available only for CCE Turbo clusters.

    -
    • runc: Common containers will run on the node.
    • kata: Secure containers will be used and the workload can run only on the node that uses the secure runtime.
    -

    For details about common containers and secure containers, see Secure Containers and Common Containers.

    -

    Time Zone Synchronization

    -

    If this parameter is enabled, the container and the node use the same time zone.

    -
    NOTICE:

    After time zone synchronization is enabled, disks of the hostPath type will be automatically added and listed in the Data Storage > Local Volume area. Do not modify or delete the disks.

    -
    -

    Description

    -

    Description of the workload.

    -
    -
    -

  2. Click Next: Add Container.

    1. Click Add Container and select the image to be deployed.
      • My Images: Create a workload using an image in the image repository you created.
      • Third-Party Images: Create a workload using an image from any third-party image repository. When you create a workload using a third-party image, ensure that the node where the workload is running can access public networks. For details on how to create a workload using a third-party image, see Using a Third-Party Image.
        • If your image repository does not require authentication, set Secret Authentication to No, enter an image pull address, and then click OK.
        • If your image repository must be authenticated (account and password), you need to create a secret and then use a third-party image. For details, see Using a Third-Party Image.
        -
      • Shared Images: Create a workload using an image shared by another tenant through the SWR service.
      -
    2. Configure basic image information.

      A workload is an abstract model of a group of pods. One pod can encapsulate one or more containers. You can click Add Container in the upper right corner to add multiple container images and set them separately.

      - -
      - - - - - - - - - - - - - - - - - - - -
      Table 2 Image parameters

      Parameter

      -

      Description

      -

      Image Name

      -

      Name of the image. You can click Change Image to update it.

      -

      *Image Version

      -

      Select the image tag to be deployed.

      -

      *Container Name

      -

      Name of the container. You can modify it.

      -

      Privileged Container

      -

      Programs in a privileged container have certain privileges.

      -

      If Privileged Container is On, the container is granted superuser permissions. For example, privileged containers can manipulate network devices on the host machine and modify kernel parameters.

      -

      Container Resources

      -

      CPU

      -
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      -

      Memory

      -
      • Request: minimum amount of memory required by a container. The default value is 512 MiB.
      • Limit: maximum amount of memory available for a container. When memory usage exceeds the specified memory limit, the container will be terminated.
      -

      For more information about Request and Limit, see Setting Container Specifications.

      -

      GPU: configurable only when the cluster contains GPU nodes.

      -

      It indicates the percentage of GPU resources reserved for a container. Select Use and set the percentage. For example, if this parameter is set to 10%, the container is allowed to use 10% of GPU resources. If you do not select Use or set this parameter to 0, no GPU resources can be used.

      -

      GPU/Graphics Card: The workload's pods will be scheduled to the node with the specified GPU.

      -

      If Any GPU type is selected, the container uses a random GPU in the node. If you select a specific GPU, the container uses this GPU accordingly.

      -
      -
      -
    3. Lifecycle: Commands for starting and running containers can be set. -
    4. Health Check: CCE provides two types of probes: liveness probe and readiness probe. They are used to determine whether containers and user services are running properly. For more information, see Setting Health Check for a Container.
      • Liveness Probe: used to restart the unhealthy container.
      • Readiness Probe: used to change the container to the unready state when detecting that the container is unhealthy. In this way, service traffic will not be directed to the container.
      -
    5. Environment Variables: Environment variables can be added to a container. In general, environment variables are used to set parameters.
      On the Environment Variables tab page, click Add Environment Variable. Currently, three types of environment variables are supported:
      • Added manually: Set Variable Name and Variable Value/Reference.
      • Added from Secret: Set Variable Name and select the desired secret name and data. A secret must be created in advance. For details, see Creating a Secret.
      • Added from ConfigMap: Set Variable Name and select the desired ConfigMap name and data. A ConfigMap must be created in advance. For details, see Creating a ConfigMap.

        To edit an environment variable that has been set, click Edit. To delete an environment variable that has been set, click Delete.

        -
        -
      -
      -
    6. Data Storage: Data storage can be mounted to containers for persistent storage and high disk I/O. Local volume and cloud storage are supported. For details, see Storage (CSI).

      Currently, cloud storage cannot be mounted to secure (Kata) containers in a CCE Turbo cluster.

      -
      -
    7. Security Context: Container permissions can be configured to protect CCE and other containers from being affected.

      Enter the user ID to set container permissions and prevent systems and other containers from being affected.

      -
    8. Log Policies: Log collection policies and log directory can be configured to collect container logs for unified management and analysis. For details, see Container Logs.
    -

  3. Click Next: Set Application Access. Then, click Add Service and set the workload access type.

    If your workload will be reachable to other workloads or public networks, add a Service to define the workload access type.

    -

    The workload access type determines the network attributes of the workload. Workloads with different access types can provide different network capabilities. For details, see Overview.

    -

  4. Click Next: Configure Advanced Settings to configure advanced policies.

    • Upgrade Mode: You can specify the upgrade mode of a Deployment, including Rolling upgrade and In-place upgrade.
      • Rolling upgrade: Old pods are gradually replaced with new ones. During the upgrade, service traffic is evenly distributed to both pods to ensure service continuity.
        • Maximum Number of Unavailable Pods: maximum number of unavailable pods allowed in a rolling upgrade. If the number is equal to the total number of pods, services may be interrupted. Minimum number of alive pods = Total pods – Maximum number of unavailable pods
        -
      • In-place upgrade: Old pods are deleted before new pods are created. Services will be interrupted during an in-place upgrade.
      -
    • Graceful Deletion: A time window can be set for workload deletion and reserved for executing commands in the pre-stop phase in the lifecycle. If workload processes are not terminated after the time window elapses, the workload will be forcibly deleted.
      • Graceful Time Window (s): Set a time window (0–9999s) for pre-stop commands to finish execution before a workload is deleted. The default value is 30s.
      • Scale Order: Choose Prioritize new pods or Prioritize old pods based on service requirements. Prioritize new pods indicates that new pods will be first deleted when a scale-in is triggered.
      -
    • Migration Policy: When the node where a workload's pods are located is unavailable for the specified amount of time, the pods will be rescheduled to other available nodes.
      • Migration Time Window (s): Set a time window for migration. The default value is 300s.
      -
    • Scheduling Policies: You can combine static global scheduling policies or dynamic runtime scheduling policies as required. For details, see Scheduling Policy Overview.
    • Advanced Pod Settings
      • Pod Label: The built-in app label is specified when the workload is created. It is used to set affinity and anti-affinity scheduling and cannot be modified. You can click Add Label to add labels.
      -
      Figure 1 Advanced pod settings
      -
    • Client DNS Configuration: A CCE cluster has a built-in DNS add-on (CoreDNS) to provide domain name resolution for workloads in the cluster.
      • DNS Policy
        • ClusterFirst: The default DNS configuration overrides the Nameserver and DNS Search Domain configurations of the client.
        • None: Only the Nameserver and DNS Search Domain configurations are used for domain name resolution.
        • Default: The pod inherits the DNS configuration from the node on which the pod runs.
        -
      • Nameserver: You can configure a domain name server for a user-defined domain name. The value is one or a group of DNS IP addresses, for example, 1.2.3.4.
      • DNS Search Domain: a search list for host-name lookup. When a domain name cannot be resolved, DNS queries will be attempted combining the domain name with each domain in the search list in turn until a match is found or all domains in the search list are tried.
      • Timeout (s): amount of time the resolver will wait for a response from a remote name server before retrying the query on a different name server. Set it based on the site requirements.
      • ndots: threshold for the number of dots that must appear in a domain name before an initial absolute query will be made. If a domain name has ndots or more than ndots dots, the name is a fully qualified domain name (FQDN) and will be tried first as an absolute name. If a domain name has less than ndots dots, the operating system will look up the name in a list of search domain names.
      -
    -

  5. After the preceding configurations are complete, click Create. On the page displayed, click Return to Workload List to view the workload status.

    If the workload is in the Running state, it has been successfully created.

    -

    Workload status is not updated in real time. Click in the upper right corner or press F5 to refresh the page.

    -

  6. To access the workload in a browser, go to the workload list on the Deployments page. Copy the corresponding External Access Address and paste it into the address box in the browser.

    • External access addresses are available only if the Deployment access type is set to NodePort and an EIP is assigned to any node in the cluster, or if the Deployment access type is set to LoadBalancer (ELB).
    • If the workload list contains more than 500 records, the Kubernetes pagination mechanism will be used. Specifically, you can only go to the first page or the next page, but cannot go to the previous page. In addition, if resources are divided into discrete pages, the total number of resources displayed is the maximum number of resources that can be queried at a time, not the actual total number of resources.
    -
    -

-
-

Using kubectl

The following procedure uses Nginx as an example to describe how to create a workload using kubectl.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the nginx-deployment.yaml file. nginx-deployment.yaml is an example file name. You can rename it as required.

    vi nginx-deployment.yaml

    -

    The following is an example YAML file. For more information about Deployments, see Kubernetes documentation.

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: nginx
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  strategy:
    -    type: RollingUpdate
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      containers:
    -      - image: nginx    # If you use an image in My Images, obtain the image path from SWR.
    -        imagePullPolicy: Always
    -        name: nginx
    -      imagePullSecrets:
    -      - name: default-secret
    -

    For details about these parameters, see Table 3.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 3 Deployment YAML parameters

    Parameter

    -

    Description

    -

    Mandatory/Optional

    -

    apiVersion

    -

    API version.

    -
    NOTE:

    Set this parameter based on the cluster version.

    -
    • For clusters of v1.17 or later, the apiVersion format of Deployments is apps/v1.
    • For clusters of v1.15 or earlier, the apiVersion format of Deployments is extensions/v1beta1.
    -
    -

    Mandatory

    -

    kind

    -

    Type of a created object.

    -

    Mandatory

    -

    metadata

    -

    Metadata of a resource object.

    -

    Mandatory

    -

    name

    -

    Name of the Deployment.

    -

    Mandatory

    -

    Spec

    -

    Detailed description of the Deployment.

    -

    Mandatory

    -

    replicas

    -

    Number of pods.

    -

    Mandatory

    -

    selector

    -

    Determines container pods that can be managed by the Deployment.

    -

    Mandatory

    -

    strategy

    -

    Upgrade mode. Possible values:

    -
    • RollingUpdate
    • ReplaceUpdate
    -

    By default, rolling update is used.

    -

    Optional

    -

    template

    -

    Detailed description of a created container pod.

    -

    Mandatory

    -

    metadata

    -

    Metadata.

    -

    Mandatory

    -

    labels

    -

    metadata.labels: Container labels.

    -

    Optional

    -

    spec:

    -

    containers

    -
    • image (mandatory): Name of a container image.
    • imagePullPolicy (optional): Policy for obtaining an image. The options include Always (attempting to download images each time), Never (only using local images), and IfNotPresent (using local images if they are available; downloading images if local images are unavailable). The default value is Always.
    • name (mandatory): Container name.
    -

    Mandatory

    -

    imagePullSecrets

    -

    Name of the secret used during image pulling. If a private image is used, this parameter is mandatory.

    -
    • To pull an image from the Software Repository for Container (SWR), set this parameter to default-secret.
    • To pull an image from a third-party image repository, set this parameter to the name of the created secret.
    -

    Optional

    -
    -
    -

  3. Create a Deployment.

    kubectl create -f nginx-deployment.yaml

    -

    If the following information is displayed, the Deployment is being created.

    -
    deployment "nginx" created
    -

  4. Query the Deployment status.

    kubectl get deployment

    -

    If the following information is displayed, the Deployment is running.

    -
    NAME           READY     UP-TO-DATE   AVAILABLE   AGE 
    -nginx          1/1       1            1           4m5s
    -

    Parameter description

    -
    • NAME: pod name
    • READY: number of pod replicas that have been deployed
    • STATUS: status of the Deployment
    • RESTARTS: restart times
    • AGE: period the Deployment keeps running
    -

  5. If the Deployment will be accessed through a ClusterIP or NodePort Service, add the corresponding Service. For details, see Networking.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0048.html b/docs/cce/umn/cce_01_0048.html deleted file mode 100644 index 18d3e23c..00000000 --- a/docs/cce/umn/cce_01_0048.html +++ /dev/null @@ -1,224 +0,0 @@ - - -

Creating a StatefulSet

-

Scenario

StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.

-

A container can be migrated between different hosts, but data is not stored on the hosts. To store StatefulSet data persistently, attach HA storage volumes provided by CCE to the container.

-
-

Prerequisites

  • Before creating a workload, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Cluster.
  • To enable public access to a workload, ensure that an EIP or load balancer has been bound to at least one node in the cluster.

    If a pod has multiple containers, ensure that the ports used by the containers do not conflict with each other. Otherwise, creating the StatefulSet will fail.

    -
    -
-
-

Using the CCE Console

CCE provides multiple methods for creating a workload. You can use any of the following methods:
  1. Use an image in Third-Party Images. You do not need to upload any image before using it.
  2. Use an image that you have uploaded to SWR.
  3. Use a shared image to create a workload. Specifically, other tenants share an image with you by using the SWR service.
  4. Use a YAML file to create a workload. You can click Create YAML on the right of the Create StatefulSet page. For details about YAML, see Using kubectl. After the YAML file is written, click Create to create a workload.

    Settings in the YAML file are synchronized with those on the console. You can edit the YAML file on the console to create a workload. For example:

    -
    • If you enter a workload name on the console, the name will automatically appear in the YAML file.
    • If you add an image on the console, the image will be automatically added to the YAML file.
    -

    When you click Create YAML on the right of the console, do not create multiple YAML files in the YAML definition pane displayed. You need to create them one by one. Otherwise, an error will be reported during the creation.

    -
    -
-
-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > StatefulSets. On the displayed page, click Create StatefulSet. Set basic workload parameters as described in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Basic workload parameters

    Parameter

    -

    Description

    -

    * Workload Name

    -

    Name of a workload, which must be unique.

    -

    Enter 4 to 52 characters starting with a lowercase letter and ending with a letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    * Cluster Name

    -

    Cluster to which the workload belongs.

    -

    * Namespace

    -

    In a single cluster, data in different namespaces is isolated from each other. This enables applications to share the services of the same cluster without interfering each other. If no namespace is set, the default namespace is used.

    -

    * Instances

    -

    Number of pods in a workload. A workload can have one or more pods. The default value is 2. You can customize the value, for example, setting it to 1.

    -

    Each workload pod consists of the same containers. You can configure multiple pods for a workload to ensure high reliability. For such a workload, if one pod is faulty, the workload can still run properly. If only one pod is used, a node or pod exception may cause service exceptions.

    -

    Time Zone Synchronization

    -

    If this parameter is enabled, the container and the node use the same time zone.

    -
    NOTICE:

    After time zone synchronization is enabled, disks of the hostPath type will be automatically added and listed in the Data Storage > Local Volume area. Do not modify or delete the disks.

    -
    -

    Description

    -

    Description of the workload.

    -
    -
    -

  2. Click Next: Add Container.

    1. Click Add Container and select the image to be deployed.
      • My Images: Create a workload using an image in the image repository you created.
      • Third-Party Images: Create a workload using an image from any third-party image repository. When you create a workload using a third-party image, ensure that the node where the workload is running can access public networks. For details on how to create a workload using a third-party image, see Using a Third-Party Image.
        • If your image repository does not require authentication, set Secret Authentication to No, enter an image pull address, and then click OK.
        • If your image repository must be authenticated (account and password), you need to create a secret and then use a third-party image. For details, see Using a Third-Party Image.
        -
      • Shared Images: Create a workload using an image shared by another tenant through the SWR service.
      -
    2. Configure basic image information.
      A workload is an abstract model of a group of pods. One pod can encapsulate one or more containers. You can click Add Container in the upper right corner to add multiple container images and set them separately. -
      - - - - - - - - - - - - - - - - - - - -
      Table 2 Image parameters

      Parameter

      -

      Description

      -

      Image Name

      -

      Name of the image. You can click Change Image to update it.

      -

      *Image Version

      -

      Select the image tag to be deployed.

      -

      *Container Name

      -

      Name of the container. You can modify it.

      -

      Privileged Container

      -

      Programs in a privileged container have certain privileges.

      -

      If Privileged Container is On, the container is granted superuser permissions. For example, privileged containers can manipulate network devices on the host machine and modify kernel parameters.

      -

      Container Resources

      -

      CPU

      -
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      -

      Memory

      -
      • Request: minimum amount of memory required by a container. The default value is 512 MiB.
      • Limit: maximum amount of memory available for a container. When memory usage exceeds the specified memory limit, the container will be terminated.
      -

      For more information about Request and Limit, see Setting Container Specifications.

      -

      GPU: configurable only when the cluster contains GPU nodes.

      -

      It indicates the percentage of GPU resources reserved for a container. Select Use and set the percentage. For example, if this parameter is set to 10%, the container is allowed to use 10% of GPU resources. If you do not select Use or set this parameter to 0, no GPU resources can be used.

      -

      GPU/Graphics Card: The workload's pods will be scheduled to the node with the specified GPU.

      -

      If Any GPU type is selected, the container uses a random GPU in the node. If you select a specific GPU, the container uses this GPU accordingly.

      -
      -
      -
      -
    3. Lifecycle: Commands for starting and running containers can be set. -
    4. Health Check: CCE provides two types of probes: liveness probe and readiness probe. They are used to determine whether containers and user services are running properly. For more information, see Setting Health Check for a Container.
      • Liveness Probe: used to restart the unhealthy container.
      • Readiness Probe: used to change the container to the unready state when detecting that the container is unhealthy. In this way, service traffic will not be directed to the container.
      -
    5. Environment Variables: Environment variables can be added to a container. In general, environment variables are used to set parameters.
      On the Environment Variables tab page, click Add Environment Variable. Currently, three types of environment variables are supported:
      • Added manually: Set Variable Name and Variable Value/Reference.
      • Added from Secret: Set Variable Name and select the desired secret name and data. A secret must be created in advance. For details, see Creating a Secret.
      • Added from ConfigMap: Set Variable Name and select the desired ConfigMap name and data. A ConfigMap must be created in advance. For details, see Creating a ConfigMap.

        To edit an environment variable that has been set, click Edit. To delete an environment variable that has been set, click Delete.

        -
        -
      -
      -
    6. Data Storage: Data storage can be mounted to containers for persistent storage and high disk I/O. Local volume and cloud storage are supported. For details, see Storage (CSI).

      You can add data storage volumes only when creating a StatefulSet.

      -
      -
    7. Security Context: Container permissions can be configured to protect CCE and other containers from being affected.

      Enter the user ID to set container permissions and prevent systems and other containers from being affected.

      -
    8. Log Policies: Log collection policies and log directory can be configured to collect container logs for unified management and analysis. For details, see Container Logs.
    -

  3. Click Next: Set Application Access and set Headless Service and workload access type.

    Table 3 describes the parameters in the Headless Service area. -
    - - - - - - - - - - - - - -
    Table 3 Parameter description

    Parameter

    -

    Description

    -

    Service Name

    -

    Name of the Service corresponding to the workload for mutual access between pods. This Service is used for internal discovery of pods, and does not require an independent IP address or load balancing.

    -

    Port Name

    -

    Name of the container port. You are advised to enter a name that indicates the function of the port.

    -

    Container Port

    -

    Listening port inside the container.

    -
    -
    -
    -

    Click Add Service and set the workload access type.

    -

    If your workload will be reachable to other workloads or public networks, add a Service to define the workload access type.

    -

    The workload access type determines the network attributes of the workload. Workloads with different access types can provide different network capabilities. For details, see Overview.

    -

  4. Click Next: Configure Advanced Settings.

    • Upgrade Policy: Only Rolling upgrade is supported.

      During a rolling upgrade, old pods are gradually replaced with new ones, and service traffic is evenly distributed to both pods to ensure service continuity.

      -
    • Pod Management Policy: There are two types of policies: ordered and parallel.

      Ordered: The StatefulSet will deploy, delete, or scale pods in order and one by one (the StatefulSet waits until each pod is ready before continuing). This is the default policy.

      -

      Parallel: The StatefulSet will create pods in parallel to match the desired scale without waiting, and will delete all pods at once.

      -
    • Graceful Deletion: A time window can be set for workload deletion and reserved for executing commands in the pre-stop phase in the lifecycle. If workload processes are not terminated after the time window elapses, the workload will be forcibly deleted.
      • Graceful Time Window (s): Set a time window (0–9999s) for pre-stop commands to finish execution before a workload is deleted. The default value is 30s.
      • Scale Order: Choose Prioritize new pods or Prioritize old pods based on service requirements. Prioritize new pods indicates that new pods will be first deleted when a scale-in is triggered.
      -
    • Scheduling Policies: You can combine static global scheduling policies or dynamic runtime scheduling policies as required. For details, see Scheduling Policy Overview.
    • Advanced Pod Settings
      • Pod Label: The built-in app label is specified when the workload is created. It is used to set affinity and anti-affinity scheduling and cannot be modified. You can click Add Label to add labels.
      -
      Figure 1 Advanced pod settings
      -
    • Client DNS Configuration: A CCE cluster has a built-in DNS add-on (CoreDNS) to provide domain name resolution for workloads in the cluster.
      • DNS Policy
        • ClusterFirst: The default DNS configuration overrides the Nameserver and DNS Search Domain configurations of the client.
        • None: Only the Nameserver and DNS Search Domain configurations are used for domain name resolution.
        • Default: The pod inherits the DNS configuration from the node on which the pod runs.
        -
      • Nameserver: You can configure a domain name server for a user-defined domain name. The value is one or a group of DNS IP addresses, for example, 1.2.3.4.
      • DNS Search Domain: a search list for host-name lookup. When a domain name cannot be resolved, DNS queries will be attempted combining the domain name with each domain in the search list in turn until a match is found or all domains in the search list are tried.
      • Timeout (s): amount of time the resolver will wait for a response from a remote name server before retrying the query on a different name server. Set it based on the site requirements.
      • ndots: threshold for the number of dots that must appear in a domain name before an initial absolute query will be made. If a domain name has ndots or more than ndots dots, the name is a fully qualified domain name (FQDN) and will be tried first as an absolute name. If a domain name has less than ndots dots, the operating system will look up the name in a list of search domain names.
      -
    -

  5. Click Create and then Back to StatefulSet List. If the workload is in the Running state, it has been successfully created. If the workload status is not updated, click in the upper right corner or press F5 to refresh the page.

    • When a node is unavailable, pods become Unready. In this case, you need to manually delete the pods of the StatefulSet so that the pods can be migrated to a normal node.
    • If the workload list contains more than 500 records, the Kubernetes pagination mechanism will be used. Specifically, you can only go to the first page or the next page, but cannot go to the previous page. In addition, if resources are divided into discrete pages, the total number of resources displayed is the maximum number of resources that can be queried at a time, not the actual total number of resources.
    -
    -

-
-

Using kubectl

The following procedure uses an etcd workload as an example to describe how to create a workload using kubectl.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the etcd-statefulset.yaml file.

    etcd-statefulset.yaml is an example file name, and you can change it as required.

    -

    vi etcd-statefulset.yaml

    -

    The following provides an example of the file contents. For more information on StatefulSet, see the Kubernetes documentation.

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: etcd
    -spec:
    -  replicas: 2
    -  selector:
    -    matchLabels:
    -      app: etcd
    -  serviceName: etcd-svc
    -  template:
    -    metadata:
    -      labels:
    -        app: etcd
    -    spec:
    -      containers:
    -      - env:
    -        - name: PAAS_APP_NAME
    -          value: tesyhhj
    -        - name: PAAS_NAMESPACE
    -          value: default
    -        - name: PAAS_PROJECT_ID
    -          value: 9632fae707ce4416a0ab1e3e121fe555
    -        image: etcd # If you use an image in My Images, obtain the image path from SWR.
    -        imagePullPolicy: IfNotPresent
    -        name: container-0
    -  updateStrategy:
    -    type: RollingUpdate
    -

    vi etcd-headless.yaml

    -
    apiVersion: v1
    -kind: Service
    -metadata:
    -  labels:
    -    app: etcd
    -  name: etcd-svc
    -spec:
    -  clusterIP: None
    -  ports:
    -  - name: etcd-svc
    -    port: 3120
    -    protocol: TCP
    -    targetPort: 3120
    -  selector:
    -    app: etcd
    -  sessionAffinity: None
    -  type: ClusterIP
    -

  3. Create a workload and the corresponding headless service.

    kubectl create -f etcd-statefulset.yaml

    -

    If the following information is displayed, the StatefulSet has been successfully created.

    -
    statefulset.apps/etcd created
    -

    kubectl create -f etcd-headless.yaml

    -

    If the following information is displayed, the headless service has been successfully created.

    -
    service/etcd-svc created
    -

  4. If the workload will be accessed through a ClusterIP or NodePort Service, set the corresponding workload access type. For details, see Networking.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0051.html b/docs/cce/umn/cce_01_0051.html deleted file mode 100644 index cbc5181d..00000000 --- a/docs/cce/umn/cce_01_0051.html +++ /dev/null @@ -1,29 +0,0 @@ - - -

Scheduling Policy Overview

-

Custom Scheduling Policies

You can configure node affinity, workload affinity, and workload anti-affinity in custom scheduling policies.

- -

Custom scheduling policies depend on node labels and pod labels. You can use default labels or customize labels as required.

-
-
-

Simple Scheduling Policies

A simple scheduling policy allows you to configure affinity between workloads and AZs, between workloads and nodes, and between workloads.

-
-
  • Workload-AZ affinity: Multiple AZ-based scheduling policies (including affinity and anti-affinity policies) can be configured. However, scheduling is performed as long as one of the scheduling policies is met. -
  • Workload-node affinity: Multiple node-based scheduling policies (including affinity and anti-affinity scheduling) can be configured. However, scheduling is performed as long as one of the scheduling policies is met. For example, if a cluster contains nodes A, B, and C and two scheduling policies are set (one policy defines node A as an affinity node and the other policy defines node B as an anti-affinity node), then the workload can be scheduled to any node other than B. -
  • Workload-workload affinity: Multiple workload-based scheduling policies can be configured, but the labels in these policies must belong to the same workload.
    • Affinity between workloads: For details, see Workload-Workload Affinity. You can deploy workloads on the same node to reduce consumption of network resources.
      Figure 1 shows an example of affinity deployment, in which all workloads are deployed on the same node.
      Figure 1 Affinity between workloads
      -
      -
    • Anti-affinity between workloads: For details, see Workload-Workload Anti-Affinity. Constraining multiple instances of the same workload from being deployed on the same node reduces the impact of system breakdowns. Anti-affinity deployment is also recommended for workloads that may interfere with each other.
      Figure 2 shows an example of anti-affinity deployment, in which four workloads are deployed on four different nodes.
      Figure 2 Anti-affinity between workloads
      -
      -
    -
-

When setting workload-workload affinity and workload-node affinity, ensure that the affinity relationships do not contradict each other; otherwise, workload deployment will fail.

-

For example, Workload 3 will fail to be deployed when the following conditions are met:

-
  • Anti-affinity is configured for Workload 1 and Workload 2. Workload 1 is deployed on Node A and Workload 2 is deployed on Node B.
  • Affinity is configured between Workload 2 and Workload 3, but the target node on which Workload 3 is to be deployed is Node C or Node A.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0053.html b/docs/cce/umn/cce_01_0053.html deleted file mode 100644 index 9a3fd38b..00000000 --- a/docs/cce/umn/cce_01_0053.html +++ /dev/null @@ -1,231 +0,0 @@ - - -

Using Local Disks as Storage Volumes

-

You can mount a file directory of the host where a container is located to a specified container path (the hostPath mode in Kubernetes) for persistent data storage. Alternatively, you can leave the source path empty (the emptyDir mode in Kubernetes), and a temporary directory of the host will be mounted to the mount point of the container for temporary storage.

-

Using Local Volumes

CCE supports four types of local volumes.

-
-
  • hostPath: mounts a file directory of the host where the container is located to the specified mount point of the container. For example, if the container needs to access /etc/hosts, you can use a hostPath volume to map /etc/hosts.
  • emptyDir: stores data temporarily. An emptyDir volume is first created when a pod is assigned to a node, and exists as long as that pod is running on that node. When a container pod is terminated, EmptyDir will be deleted and the data is permanently lost.
  • ConfigMap: A ConfigMap can be mounted as a volume, and all contents stored in its key are mounted onto the specified container directory. A ConfigMap is a type of resource that stores configuration information required by a workload. Its content is user-defined. For details about how to create a ConfigMap, see Creating a ConfigMap. For details about how to use a ConfigMap, see Using a ConfigMap.
  • Secret: You can store sensitive information such as passwords, in secrets and mount them as files for use by pods. A secret is a type of resource that holds sensitive data, such as authentication and key information. All content is user-defined. For details about how to create a secret, see Creating a Secret. For details about how to use a secret, see Using a Secret.
-

The following describes how to mount these four types of volumes.

-

hostPath

You can mount a path on the host to a specified container path. A hostPath volume is usually used to store workload logs permanently or used by workloads that need to access internal data structure of the Docker engine on the host.

-
-
  1. Log in to the CCE console.
  2. When creating a workload, click Data Storage in the Container Settings. Click the Local Volumes tab and click .
  3. Set parameters for adding a local volume, as listed in Table 1.

    -

    - - - - - - - - - - - - - -
    Table 1 Setting parameters for mounting a hostPath volume

    Parameter

    -

    Description

    -

    Storage Type

    -

    Select HostPath.

    -

    Host Path

    -

    Path of the host to which the local volume is to be mounted, for example, /etc/hosts.

    -
    NOTE:

    Host Path cannot be set to the root directory /. Otherwise, the mounting fails. Mount paths can be as follows:

    -
    • /opt/xxxx (excluding /opt/cloud)
    • /mnt/xxxx (excluding /mnt/paas)
    • /tmp/xxx
    • /var/xxx (excluding key directories such as /var/lib, /var/script, and /var/paas)
    • /xxxx (It cannot conflict with the system directory, such as bin, lib, home, root, boot, dev, etc, lost+found, mnt, proc, sbin, srv, tmp, var, media, opt, selinux, sys, and usr.)
    -

    Do not set this parameter to /home/paas, /var/paas, /var/lib, /var/script, /mnt/paas, or /opt/cloud. Otherwise, the system or node installation will fail.

    -
    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter a subpath, for example, tmp.

      A subpath is used to mount a local disk so that the same data volume is used in a single pod. If this parameter is left blank, the root path is used by default.

      -
    2. Container Path: Enter the path of the container, for example, /tmp.
      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Permission
      • Read-only: You can only read the data volumes mounted to the path.
      • Read/Write: You can modify the data volumes mounted to the path. Newly written data is not migrated if the container is migrated, which may cause a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

-

emptyDir

emptyDir applies to temporary data storage, disaster recovery, and runtime data sharing. It will be deleted upon deletion or transfer of workload pods.

-
-
  1. Log in to the CCE console.
  2. When creating a workload, click Data Storage in the Container Settings. Click the Local Volumes tab and click .
  3. Set the local volume type to emptyDir and set parameters for adding a local volume, as described in Table 2.

    -

    - - - - - - - - - - - - - -
    Table 2 Setting parameters for mounting an emptyDir volume

    Parameter

    -

    Description

    -

    Storage Type

    -

    Select emptyDir.

    -

    Medium

    -
    • Default: Data is stored in hard disks, which is applicable to a large amount of data with low requirements on reading and writing efficiency.
    • Memory: Selecting this option can improve the running speed, but the storage capacity is subject to the memory size. This mode applies to scenarios where the data volume is small and the read and write efficiency is high.
    -
    NOTE:
    • If you select Memory, any files you write will count against your container's memory limit. Pay attention to the memory quota. If the memory usage exceeds the threshold, OOM may occur.
    • If Memory is selected, the size of an emptyDir volume is 50% of the pod specifications and cannot be changed.
    • If Memory is not selected, emptyDir volumes will not occupy the system memory.
    -
    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter a subpath, for example, tmp.

      A subpath is used to mount a local disk so that the same data volume is used in a single pod. If this parameter is left blank, the root path is used by default.

      -
    2. Container Path: Enter the path of the container, for example, /tmp.
      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Permission
      • Read-only: You can only read the data volumes mounted to the path.
      • Read/Write: You can modify the data volumes mounted to the path. Newly written data is not migrated if the container is migrated, which may cause a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

-

ConfigMap

The data stored in a ConfigMap can be referenced in a volume of type ConfigMap. You can mount such a volume to a specified container path. The platform supports the separation of workload codes and configuration files. ConfigMap volumes are used to store workload configuration parameters. Before that, you need to create ConfigMaps in advance. For details, see Creating a ConfigMap.

-
-
  1. Log in to the CCE console.
  2. When creating a workload, click Data Storage in the Container Settings. Click the Local Volumes tab and click .
  3. Set the local volume type to ConfigMap and set parameters for adding a local volume, as shown in Table 3.

    -

    - - - - - - - - - - - - - -
    Table 3 Setting parameters for mounting a ConfigMap volume

    Parameter

    -

    Description

    -

    Storage Type

    -

    Select ConfigMap.

    -

    Option

    -

    Select the desired ConfigMap name.

    -

    A ConfigMap must be created in advance. For details, see Creating a ConfigMap.

    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter a subpath, for example, tmp.

      A subpath is used to mount a local disk so that the same data volume is used in a single pod. If this parameter is left blank, the root path is used by default.

      -
    2. Container Path: Enter the path of the container, for example, /tmp.
      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Set the permission to Read-only. Data volumes in the path are read-only.
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

-

Secret

You can mount a secret as a volume to the specified container path. Contents in a secret are user-defined. Before that, you need to create a secret. For details, see Creating a Secret.

-
-
  1. Log in to the CCE console.
  2. When creating a workload, click Data Storage in the Container Settings. Click the Local Volumes tab and click .
  3. Set the local volume type to Secret and set parameters for adding a local volume, as shown in Table 4.

    -

    - - - - - - - - - - - - - -
    Table 4 Setting parameters for mounting a secret volume

    Parameter

    -

    Description

    -

    Storage Type

    -

    Select Secret.

    -

    Secret

    -

    Select the desired secret name.

    -

    A secret must be created in advance. For details, see Creating a Secret.

    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter a subpath, for example, tmp.

      A subpath is used to mount a local disk so that the same data volume is used in a single pod. If this parameter is left blank, the root path is used by default.

      -
    2. Container Path: Enter the path of the container, for example, /tmp.
      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Set the permission to Read-only. Data volumes in the path are read-only.
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

-

Mounting a hostPath Volume Using kubectl

You can use kubectl to mount a file directory of the host where the container is located to a specified mount path of the container.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the hostPath-pod-example.yaml file, which is used to create a pod.

    touch hostPath-pod-example.yaml

    -

    vi hostPath-pod-example.yaml

    -

    Mount the hostPath volume for the Deployment. The following is an example:

    -
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -  name: hostpath-pod-example 
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: hostpath-pod-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: hostpath-pod-example 
    -    spec: 
    -      containers: 
    -      - image: nginx
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp 
    -          name: hostpath-example 
    -      imagePullSecrets:
    -        - name: default-secret
    -      restartPolicy: Always 
    -      volumes: 
    -      - name: hostpath-example 
    -        hostPath: 
    -          path: /tmp/test
    - -
    - - - - - - - - - - -
    Table 5 Local disk storage dependency parameters

    Parameter

    -

    Description

    -

    mountPath

    -

    Mount path of the container. In this example, the volume is mounted to the /tmp directory.

    -

    hostPath

    -

    Host path. In this example, the host path is /tmp/test.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the pod:

    kubectl create -f hostPath-pod-example.yaml

    -

  4. Verify the mounting.

    1. Query the pod name of the workload (hostpath-pod-example is used as an example).
      kubectl get po|grep hostpath-pod-example
      -

      Expected outputs:

      -
      hostpath-pod-example-55c8d4dc59-md5d9   1/1     Running   0          35s
      -
    2. Create the test1 file in the container mount path /tmp.
      kubectl exec hostpath-pod-example-55c8d4dc59-md5d9 -- touch /tmp/test1
      -
    3. Verify that the file is created in the host path /tmp/test/.
      ll /tmp/test/
      -

      Expected outputs:

      -
      -rw-r--r--  1 root root    0 Jun  1 16:12 test1
      -
    4. Create the test2 file in the host path /tmp/test/.
      touch /tmp/test/test2
      -
    5. Verify that the file is created in the container mount path.
      kubectl exec hostpath-pod-example-55c8d4dc59-md5d9 -- ls -l /tmp
      -

      Expected outputs:

      -
      -rw-r--r-- 1 root root 0 Jun  1 08:12 test1
      --rw-r--r-- 1 root root 0 Jun  1 08:14 test2
      -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0057.html b/docs/cce/umn/cce_01_0057.html deleted file mode 100644 index 274f2512..00000000 --- a/docs/cce/umn/cce_01_0057.html +++ /dev/null @@ -1,144 +0,0 @@ - - -

Scaling a Workload

-
  • Auto scaling: You can set metric-based, scheduled, and periodic policies. After configuration, pods can be automatically added or deleted based on resource changes or the specified schedule.
  • Manual scaling: Pods are immediately added or deleted after the configuration is complete.
-

Scaling policy priority: If you do not manually adjust the number of pods, auto scaling policies will take effect for resource scheduling. If manual scaling is triggered, auto scaling policies will be temporarily invalid.

-
-

Auto Scaling - HPA

HPA policies can be used for auto scaling. You can view all policies or perform more operations in Auto Scaling.

-
-

Auto Scaling - AOM

You can define auto scaling policies as required, which can intelligently adjust resources in response to service changes and data traffic spikes.

-

Auto scaling can be backed by Application Operations Management (AOM), but not for clusters of v1.17 and later.

-

Currently, CCE supports the following types of auto scaling policies:

-

Metric-based policy: After a workload is created, pods will be automatically scaled when the workload's CPU or memory usage exceeds or falls below a preset limit.

-

Scheduled policy: scaling at a specified time. Scheduled auto scaling is applicable flash sales, premier shopping events, and other regular events that bring a high burst of traffic load.

-

Periodic policy: scaling at a specified time on a daily, weekly, or monthly basis. Periodic scheduling is applicable to scenarios where traffic changes periodically.

-
-
  • Metric-based policy: Supports auto scaling of a workload based on the CPU/memory usage.
    1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments or StatefulSets. In the same row as the target workload, choose More > Scaling.
    2. In the Auto Scaling area, click Add Scaling Policy.
    3. Set the policy parameters as listed in Table 1. -
      - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 1 Parameters for adding a metric-based policy

      Parameter

      -

      Description

      -

      Policy Name

      -

      Enter the name of the scaling policy.

      -

      The policy name must be 1 to 64 characters in length and start with a letter. Only letters, digits, underscores (_), and hyphens (-) are allowed.

      -

      Policy Type

      -

      Set this parameter to Metric-based policy.

      -

      The alarm policy is triggered based on historical data. The system checks whether the indicators set by the user in the monitoring window meet the triggering conditions every minute. If the triggering conditions are met for N consecutive periods, the system performs the action specified by the policy.

      -

      Metric

      -

      Set the metrics that describe the resource performance data or status.

      -
      • CPU Usage: CPU usage of the measured object. The value is the percentage of the used CPU cores to the total CPU cores.
      • Physical Memory Usage: percentage of the physical memory size used by the measured object to the physical memory size that the measured object has applied for.
      -

      Trigger Condition

      -

      The value can be higher (>) or lower (<) than a threshold. When the usage of the preceding metrics reaches the specified value, the scaling policy is triggered.

      -

      For example, if Metric is set to CPU Usage and this parameter is set to > 70%, the scaling policy is triggered when the CPU usage exceeds 70%.

      -

      Monitoring window

      -

      Size of the data aggregation window.

      -

      If the value is set to 60, metric statistics are collected every 60 seconds.

      -

      Threshold Crossings

      -

      Number of consecutive times that the threshold is reached within the monitoring window. The calculation cycle is fixed at one minute.

      -

      If the parameter is set to 3, the action is triggered if threshold is reached for three consecutive measurement periods.

      -

      Action

      -

      Action executed after a policy is triggered. Two actions are available: add or reduce pods.

      -
      -
      -
    4. Click OK.
    5. In the Auto Scaling area, check that the policy has been started.

      When the trigger condition is met, the auto scaling policy starts automatically.

      -
    -
  • Scheduled policy: scaling at a specified time.
    1. In the Auto Scaling area, click Add Scaling Policy. Select Scheduled policy. -
      - - - - - - - - - - - - - - - - -
      Table 2 Parameters for adding a scheduled policy

      Parameter

      -

      Description

      -

      Policy Name

      -

      Enter the name of the scaling policy.

      -

      The policy name must be 1 to 64 characters in length and start with a letter. Only letters, digits, underscores (_), and hyphens (-) are allowed.

      -

      Policy Type

      -

      Set this parameter to Scheduled policy.

      -

      Trigger Time

      -

      Time at which the policy is enforced.

      -

      Action

      -

      Action executed after a policy is triggered. Three actions are available: add pods, reduce pods, and set the number of pods.

      -
      -
      -
    2. Click OK.
    3. In the Auto Scaling area, check that the policy has been started.

      When the trigger time is reached, you can see on the Pods tab page that the auto scaling policy has taken effect.

      -
    -
  • Periodic policy: scaling at a specified time on a daily, weekly, or monthly basis.
    1. In the Auto Scaling area, click Add Scaling Policy. Select Periodic policy. -
      - - - - - - - - - - - - - - - - -
      Table 3 Parameters for adding a periodic policy

      Parameter

      -

      Description

      -

      Policy Name

      -

      Enter the name of the scaling policy.

      -

      The policy name must be 1 to 64 characters in length and start with a letter. Only letters, digits, underscores (_), and hyphens (-) are allowed.

      -

      Policy Type

      -

      Set this parameter to Periodic policy.

      -

      Time Range

      -

      Specify the time for triggering the policy.

      -

      Action

      -

      Action executed after a policy is triggered. Three actions are available: add pods, reduce pods, and set the number of pods.

      -
      -
      -
    2. Click OK.
    3. In the Auto Scaling area, check that the policy has been started.

      When the trigger condition is met, the auto scaling policy starts automatically.

      -
    -
-

Manual Scaling

  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments or StatefulSets. In the same row as the target workload, choose More > Scaling.
  2. In the Manual Scaling area, click and change the number of pods to, for example, 3. Then, click Save. The scaling takes effect immediately.
  3. On the Pods tab page, check that a new pod is being created. When the pod status becomes Running, pod scaling is complete.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0059.html b/docs/cce/umn/cce_01_0059.html deleted file mode 100644 index 50e9139b..00000000 --- a/docs/cce/umn/cce_01_0059.html +++ /dev/null @@ -1,147 +0,0 @@ - - -

Network Policies

-

As the service logic becomes increasingly complex, many applications require network calls between modules. Traditional external firewalls or application-based firewalls cannot meet the requirements. Network policies are urgently needed between modules, service logic layers, or functional teams in a large cluster.

-

CCE has enhanced the Kubernetes-based network policy feature, allowing network isolation in a cluster by configuring network policies. This means that a firewall can be set between pods.

-

For example, to make a payment system accessible only to specified components for security purposes, you can configure network policies.

-

Notes and Constraints

  • Only clusters that use the tunnel network model support network policies.
  • Network isolation is not supported for IPv6 addresses.
  • Network policies do not support egress rules except for clusters of v1.23 or later.

    Egress rules are supported only in the following operating systems:

    -
    • EulerOS 2.9: kernel version 4.18.0-147.5.1.6.h541.eulerosv2r9.x86_64
    • CentOS 7.7: kernel version 3.10.0-1062.18.1.el7.x86_64
    • EulerOS 2.5: kernel version 3.10.0-862.14.1.5.h591.eulerosv2r7.x86_64
    -
  • If a cluster is upgraded to v1.23 in in-place mode, you cannot use egress rules because the node OS is not upgraded. In this case, reset the node.
-
-

Precautions

If no network policies have been configured for a workload, such as workload-1, other workloads in the same cluster can access workload-1.

-
-

Using Ingress Rules

  • Using podSelector to specify the access scope
    apiVersion: networking.k8s.io/v1
    -kind: NetworkPolicy
    -metadata:
    -  name: test-network-policy
    -  namespace: default
    -spec:
    -  podSelector:                  # The rule takes effect for pods with the role=db label.
    -    matchLabels:
    -      role: db
    -  ingress:                      #This is an ingress rule.
    -  - from:
    -    - podSelector:              #Only traffic from the pods with the role=frontend label is allowed.
    -        matchLabels:
    -          role: frontend
    -    ports:                      #Only TCP can be used to access port 6379.
    -    - protocol: TCP
    -      port: 6379
    -

    Diagram:

    -
    Figure 1 podSelector
    -
-
  • Using namespaceSelector to specify the access scope
    apiVersion: networking.k8s.io/v1
    -kind: NetworkPolicy
    -metadata:
    -  name: test-network-policy
    -spec:
    -  podSelector:                  # The rule takes effect for pods with the role=db label.
    -    matchLabels:
    -      role: db
    -  ingress:                      #This is an ingress rule.
    -  - from:
    -    - namespaceSelector:        # Only traffic from the pods in the namespace with the "project=myproject" label is allowed.
    -        matchLabels:
    -          project: myproject
    -    ports:                      #Only TCP can be used to access port 6379.
    -    - protocol: TCP
    -      port: 6379
    -

    Figure 2 shows how namespaceSelector selects ingress sources.

    -
    Figure 2 namespaceSelector
    -
-
-

Using Egress Rules

Egress supports not only podSelector and namespaceSelector, but also ipBlock.

-

Only clusters of version 1.23 or later support egress rules. Currently, only EulerOS 2.5, EulerOS 2.9, and CentOS 7.7 nodes are supported.

-
-
apiVersion: networking.k8s.io/v1
-kind: NetworkPolicy
-metadata:
-  name: deny-client-a-via-except-cidr-egress-rule
-  namespace: default
-spec:
-  policyTypes:                  # Must be specified for an egress rule.
-    - Egress
-  podSelector:                  # The rule takes effect for pods with the role=db label.
-    matchLabels:
-      role: db
-  egress:                       # Egress rule
-  - to:
-    - ipBlock:
-        cidr: 172.16.0.16/16    # Allow access to this CIDR block.
-        except:
-        - 172.16.0.40/32        # This CIDR block cannot be accessed. This value must fall within the range specified by cidr.
-

Diagram:

-
Figure 3 ipBlock
-

You can define ingress and egress in the same rule.

-
apiVersion: networking.k8s.io/v1
-kind: NetworkPolicy
-metadata:
-  name: test-network-policy
-  namespace: default
-spec:
-  policyTypes:
-  - Ingress
-  - Egress
-  podSelector:                  # The rule takes effect for pods with the role=db label.
-    matchLabels:
-      role: db
-  ingress:                      # Ingress rule
-  - from:
-    - podSelector:              #Only traffic from the pods with the "role=frontend" label is allowed.
-        matchLabels:
-          role: frontend
-    ports:                      #Only TCP can be used to access port 6379.
-    - protocol: TCP
-      port: 6379
-  egress:                       # Egress rule
-  - to:
-    - podSelector:              # Only pods with the role=web label can be accessed.
-        matchLabels:
-          role: web
-

Diagram:

-
Figure 4 Using both ingress and egress
-
-

Adding a Network Policy on the Console

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Network. On the Network Policies tab page, click Create Network Policy.

    • Network Policy Name: Specify a network policy name.
    • Cluster Name: Select a cluster to which the network policy belongs.
    • Namespace: Select a namespace in which the network policy is applied.
    • Workload

      Click Select Workload. In the dialog box displayed, select a workload for which the network policy is to be created, for example, workload-1. Then, click OK.

      -
    • Rules: Click Add Rule, set the parameters listed in Table 1, and click OK. -
      - - - - - - - - - - - - - - - - -
      Table 1 Parameters for adding a rule

      Parameter

      -

      Description

      -

      Direction

      -

      Only Inbound is supported, indicating that the whitelisted workloads access the current workload (workload-1 in this example).

      -

      Protocol

      -

      Select a protocol. Currently, the TCP and UDP protocols are supported. The ICMP protocol is not supported.

      -

      Destination Container Port

      -

      Specify a port on which the workload in the container image listens. The Nginx application listens on port 80.

      -

      If no container port is specified, all ports can be accessed by default.

      -

      Whitelisted Workloads

      -

      Select other workloads that can access the current workload. These workloads will access the current workload at the destination container port.

      -
      • Namespace: All workloads in the selected namespace(s) are added to the whitelist. That is, all workloads in the namespace(s) can access workload-1.
      • Workload: The selected workloads can access workload-1. Only other workloads in the same namespace as workload-1 can be selected.
      -
      -
      -
    -

  2. Click Create.
  3. Repeat the preceding steps to add more network policies for the current workload when other ports need to be accessed by some workloads.

    After the network policies are created, only the specified workloads or workloads in the specified namespaces can access the current workload.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0063.html b/docs/cce/umn/cce_01_0063.html deleted file mode 100644 index 46fcb364..00000000 --- a/docs/cce/umn/cce_01_0063.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

Managing Node Scaling Policies

-

Scenario

After a node scaling policy is created, you can delete, edit, disable, enable, or clone the policy.

-
-

Viewing a Node Scaling Policy

You can view the associated node pool, rules, and scaling history of a node scaling policy and rectify faults according to the error information displayed.

-
  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Node Scaling tab page, click in front of the policy to be viewed.
  2. In the expanded area, the Associated Node Pool, Execution Rules, and Scaling Records tab pages are displayed. If the policy is abnormal, locate and rectify the fault based on the error information.

    You can also enable or disable auto scaling in Node Pools. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools, and click Edit in the upper right corner of the node pool to be operated. In the Edit Node Pool dialog box displayed, you can enable Autoscaler and set the limits of the number of nodes.

    -
    -

-
-

Deleting a Node Scaling Policy

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Node Scaling tab page, click Delete in the Operation column of the policy to be deleted.
  2. In the Delete Node Policy dialog box displayed, confirm whether to delete the policy.
  3. Enter DELETE in the text box.
  4. Click OK to delete the policy.
-
-

Editing a Node Scaling Policy

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Node Scaling tab page, click Edit in the Operation column of the policy.
  2. On the Edit Node Scaling Policy page displayed, modify policy parameter values listed in Table 1.
  3. After the configuration is complete, click OK.
-
-

Cloning a Node Scaling Policy

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Node Scaling tab page, click More > Clone in the Operation column of the policy.
  2. On the Create Node Scaling Policy page displayed, certain parameters have been cloned. Add or modify other policy parameters based on service requirements.
  3. Click Create Now to clone the policy. The cloned policy is displayed in the policy list on the Node Scaling tab page.
-
-

Enabling or Disabling a Node Scaling Policy

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Node Scaling tab page, click More > Disable or Enable in the Operation column of the policy.
  2. In the dialog box displayed, confirm whether to disable or enable the node policy.
  3. Click Yes. The policy status is displayed in the node scaling list.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0064.html b/docs/cce/umn/cce_01_0064.html deleted file mode 100644 index 42705e9c..00000000 --- a/docs/cce/umn/cce_01_0064.html +++ /dev/null @@ -1,23 +0,0 @@ - - -

Add-ons

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0066.html b/docs/cce/umn/cce_01_0066.html deleted file mode 100644 index 752fedd9..00000000 --- a/docs/cce/umn/cce_01_0066.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

everest (System Resource Add-on, Mandatory)

-

Introduction

Everest is a cloud-native container storage system. Based on Container Storage Interface (CSI), clusters of Kubernetes v1.15 or later can interconnect with cloud storage services such as EVS, OBS, SFS, and SFS Turbo.

-

everest is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.15 or later is created.

-
-

Notes and Constraints

  • If your cluster is upgraded from v1.13 to v1.15, storage-driver is replaced by everest (v1.1.6 or later) for container storage. The takeover does not affect the original storage functions. For details about CSI and FlexVolume, see Differences Between CSI and FlexVolume Plug-ins.
  • In version 1.2.0 of the everest add-on, key authentication is optimized when OBS is used. After the everest add-on is upgraded from a version earlier than 1.2.0, you need to restart all workloads that use OBS in the cluster. Otherwise, workloads may not be able to use OBS.
  • By default, this add-on is installed in clusters of v1.15 and later. For clusters of v1.13 and earlier, the storage-driver add-on is installed by default.
-
-

Installing the Add-on

This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

-
  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Marketplace tab page, click Install Add-on under everest.
  2. On the Install Add-on page, select the cluster and the add-on version, and click Next: Configuration.
  3. Select Single or HA for Add-on Specifications, and click Install.

    After the add-on is installed, click Go Back to Previous Page. On the Add-on Instance tab page, select the corresponding cluster to view the running instance. This indicates that the add-on has been installed on each node in the cluster.

    -

-
-

Upgrading the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Upgrade under everest.

    • If the Upgrade button is unavailable, the current add-on is already up-to-date and no upgrade is required.
    • When the upgrade is complete, the original everest version on cluster nodes will be replaced by the latest version.
    -
    -

  2. On the Basic Information page, select the add-on version and click Next.
  3. Select Single or HA for Add-on Specifications, and click Upgrade.
-
-

Uninstalling the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Uninstall under everest.
  2. In the dialog box displayed, click Yes to uninstall the add-on.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0068.html b/docs/cce/umn/cce_01_0068.html deleted file mode 100644 index 58318d38..00000000 --- a/docs/cce/umn/cce_01_0068.html +++ /dev/null @@ -1,68 +0,0 @@ - - -

CCE Kubernetes Release Notes

-

CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. To enable interoperability from one Kubernetes installation to the next, you must upgrade your Kubernetes clusters before the maintenance period ends.

-

After the latest Kubernetes version is released, CCE will provide you the changes in this version. For details, see Table 1.

- -
- - - - - - - - - - - - - - - - - - - - - -
Table 1 Cluster version differences

Source Version

-

Target Version

-

Description

-

v1.19

-

v1.21

-
-

v1.17

-

v1.19

-
-

v1.15

-

v1.17

-
-

v1.13

-

v1.15

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0081.html b/docs/cce/umn/cce_01_0081.html deleted file mode 100644 index 5387f3ce..00000000 --- a/docs/cce/umn/cce_01_0081.html +++ /dev/null @@ -1,144 +0,0 @@ - - -

Node Pool Overview

-

Introduction

CCE introduces node pools to help you better manage nodes in Kubernetes clusters. A node pool contains one node or a group of nodes with identical configuration in a cluster.

-

You can create custom node pools on the CCE console. With node pools, you can quickly create, manage, and destroy nodes without affecting the cluster. All nodes in a custom node pool have identical parameters and node type. You cannot configure a single node in a node pool; any configuration changes affect all nodes in the node pool.

-

You can also use node pools for auto scaling.

-
  • When a pod in a cluster cannot be scheduled due to insufficient resources, scale-out can be automatically triggered.
  • When there is an idle node or a monitoring metric threshold is met, scale-in can be automatically triggered.
-

This section describes how node pools work in CCE and how to create and manage node pools.

-
-

Node Pool Architecture

Figure 1 Overall architecture of a node pool
-

Generally, all nodes in a node pool have the following same attributes:

-
  • Node OS
  • Startup parameters of Kubernetes components on a node
  • User-defined startup script of a node
  • K8S Labels and Taints
-

CCE provides the following extended attributes for node pools:

-
  • Node pool OS
  • Maximum number of pods on each node in a node pool
-
-

Description of DefaultPool

DefaultPool is not a real node pool. It only classifies nodes that are not in any node pool. These nodes are directly created on the console or by calling APIs. DefaultPool does not support any node pool functions, including scaling and parameter configuration. DefaultPool cannot be edited, deleted, expanded, or auto scaled, and nodes in it cannot be migrated.

-
-

Applicable Scenarios

When a large-scale cluster is required, you are advised to use node pools to manage nodes.

-

The following table describes multiple scenarios of large-scale cluster management and the functions of node pools in each scenario.

- -
- - - - - - - - - - - - - -
Table 1 Using node pools for different management scenarios

Scenario

-

Function

-

Multiple heterogeneous nodes (with different models and configurations) in the cluster

-

Nodes can be grouped into different pools for management.

-

Frequent node scaling required in a cluster

-

Node pools support auto scaling to dynamically add or reduce nodes.

-

Complex application scheduling rules in a cluster

-

Node pool tags can be used to quickly specify service scheduling rules.

-
-
-
-

Functions and Precautions

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Function

-

Description

-

Notes

-

Creating a node pool

-

Add a node pool.

-

It is recommended that a cluster contain no more than 100 node pools.

-

Deleting a node pool

-

Deleting a node pool will delete nodes in the pool. Pods on these nodes will be automatically migrated to available nodes in other node pools.

-

If pods in the node pool have a specific node selector and none of the other nodes in the cluster satisfies the node selector, the pods will become unschedulable.

-

Enabling auto scaling for a node pool

-

After auto scaling is enabled, nodes will be automatically created or deleted in the node pool based on the cluster loads.

-

You are advised not to store important data on nodes in a node pool because after auto scaling, data cannot be restored as nodes may be deleted.

-

Enabling auto scaling for a node pool

-

After auto scaling is disabled, the number of nodes in a node pool will not automatically change with the cluster loads.

-

/

-

Adjusting the size of a node pool

-

The number of nodes in a node pool can be directly adjusted. If the number of nodes is reduced, nodes are randomly removed from the current node pool.

-

After auto scaling is enabled, you are not advised to manually adjust the node pool size.

-

Changing node pool configurations

-

You can modify the node pool name, node quantity, Kubernetes labels, taints, and resource tags.

-

The modified Kubernetes labels and taints will apply to all nodes in the node pool, which may cause pod re-scheduling. Therefore, exercise caution when performing this operation.

-

Adding an existing node to a node pool

-

Nodes that do not belong to the cluster can be added to a node pool. The following requirements must be met:

-
  • The node to be added and the CCE cluster are in the same VPC and subnet.
  • The node is not used by other clusters and has the same configurations (such as specifications and billing mode) as the node pool.
-

Unless required, you are not advised to add existing nodes. You are advised to create a node pool.

-

Removing a node from a node pool

-

Nodes in a node pool can be migrated to the default node pool of the same cluster.

-

Nodes in the default node pool cannot be migrated to other node pools, and nodes in a user-created node pool cannot be migrated to other user-created node pools.

-

Cloning a node pool

-

You can copy the configuration of an existing node pool to create a new node pool.

-

/

-

Setting Kubernetes parameters

-

You can configure core components with fine granularity.

-
  • This function is supported only for clusters of v1.15 and later. It is not displayed for versions earlier than v1.15
  • The default node pool DefaultPool does not support this type of configuration.
-
-
-
-

Deploying a Workload in a Specified Node Pool

When creating a workload, you can constrain pods to run in a specified node pool.

-

For example, on the CCE console, you can set the affinity between the workload and the node on the Scheduling Policies tab page on the workload details page to forcibly deploy the workload to a specific node pool. In this way, the workload runs only on nodes in the node pool. If you need to better control where the workload is to be scheduled, you can use affinity or anti-affinity policies between workloads and nodes described in Scheduling Policy Overview.

-

For example, you can use container's resource request as a nodeSelector so that workloads will run only on the nodes that meet the resource request.

-

If the workload definition file defines a container that requires four CPUs, the scheduler will not choose the nodes with two CPUs to run workloads.

-
-

Related Operations

You can log in to the CCE console and refer to the following sections to perform operations on node pools:

- -
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0083.html b/docs/cce/umn/cce_01_0083.html deleted file mode 100644 index 8e8ce9a2..00000000 --- a/docs/cce/umn/cce_01_0083.html +++ /dev/null @@ -1,107 +0,0 @@ - - -

Managing Workload Scaling Policies

-

Scenario

After an HPA policy is created, you can update, clone, edit, and delete the policy, as well as edit the YAML file.

-
-

Checking an HPA Policy

You can view the rules, status, and events of an HPA policy and handle exceptions based on the error information displayed.

-
  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Workload Scaling tab page, click in front of the target policy.
  2. In the expanded area, you can view the Rules, Status, and Events tab pages. If the policy is abnormal, locate and rectify the fault based on the error information.

    You can also view the created HPA policy on the workload details page. Log in to the CCE console, choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane, and choose More > Scaling in the Operation column. On the workload details page, click the Scaling tab. You can see the Auto Scaling-HPA pane, as well as the HPA policy you have configured on the Auto Scaling page.

    -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Event types and names

    Event Type

    -

    Event Name

    -

    Description

    -

    Normal

    -

    SuccessfulRescale

    -

    The scaling is performed successfully.

    -

    Abnormal

    -

    InvalidTargetRange

    -

    Invalid target range.

    -

    InvalidSelector

    -

    Invalid selector.

    -

    FailedGetObjectMetric

    -

    Objects fail to be obtained.

    -

    FailedGetPodsMetric

    -

    Pods fail to be obtained.

    -

    FailedGetResourceMetric

    -

    Resources fail to be obtained.

    -

    FailedGetExternalMetric

    -

    External metrics fail to be obtained.

    -

    InvalidMetricSourceType

    -

    Invalid metric source type.

    -

    FailedConvertHPA

    -

    HPA conversion failed.

    -

    FailedGetScale

    -

    The scale fails to be obtained.

    -

    FailedComputeMetricsReplicas

    -

    Failed to calculate metric-defined replicas.

    -

    FailedGetScaleWindow

    -

    Failed to obtain ScaleWindow.

    -

    FailedRescale

    -

    Failed to scale the service.

    -
    -
    -

-
-

Updating an HPA Policy

An HPA policy is used as an example.

-
  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Workload Scaling tab page, click Update in the Operation column of the policy to be updated.
  2. On the Update HPA Policy page displayed, set the policy parameters listed in Table 1.
  3. Click Update.
-
-

Cloning an HPA Policy

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Workload Scaling tab page, click Clone in the Operation column of the target policy.
  2. For example, for an HPA policy, on the Create HPA Policy page, you can view that parameters such as Pod Range, Cooldown Period, and Rules have been cloned. Add or modify other policy parameters as needed.
  3. Click Create to complete policy cloning. On the Workload Scaling tab page, you can view the cloned policy in the policy list.
-
-

Editing the YAML File (HPA Policy)

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Workload Scaling tab page, choose More > Edit YAML in the Operation column of the target policy.
  2. In the Edit YAML dialog box displayed, edit or download the YAML file.
  3. Click the close button in the upper right corner.
-
-

Deleting an HPA Policy

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Workload Scaling tab page, choose More > Delete in the Operation column of the target policy.
  2. In the Delete HPA Policy dialog box displayed, confirm whether to delete the HPA policy.
  3. Click Yes to delete the policy.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0085.html b/docs/cce/umn/cce_01_0085.html deleted file mode 100644 index 984889cb..00000000 --- a/docs/cce/umn/cce_01_0085.html +++ /dev/null @@ -1,49 +0,0 @@ - - -

Controlling Cluster Permissions

-

Scenario

This section describes how to control permissions on resources in a cluster, for example, allow user A to read and write application data in a namespace, and user B to only read resource data in a cluster.

-
-

Procedure

  1. If you need to perform permission control on the cluster, select Enhanced authentication for Authentication Mode during cluster creation, upload your own CA certificate, client certificate, and client certificate private key (for details about how to create a certificate, see Certificates), and select I have confirmed that the uploaded certificates are valid. For details, see Table 1.

    • Upload a file smaller than 1 MB. The CA certificate and client certificate can be in .crt or .cer format. The private key of the client certificate can only be uploaded unencrypted.
    • The validity period of the client certificate must be longer than five years.
    • The uploaded CA certificate is used for both the authentication proxy and the kube-apiserver aggregation layer configuration. If the certificate is invalid, the cluster cannot be created.
    -
    -

  2. Create a role using kubectl.

    The following example shows how to create a role and allow the role to read all pods in the default namespace. For details about the parameters, see the official Kubernetes documentation.
    kind: Role
    -apiVersion: rbac.authorization.k8s.io/v1
    -metadata:
    -  namespace: default
    -  name: pod-reader
    -rules:
    -- apiGroups: [""]
    -  resources: ["pods"]
    -  verbs: ["get", "watch", "list"]
    -
    -

  3. Bind the role to a user by using kubectl.

    In the following example, the RoleBinding assigns the role of pod-reader in the default namespace to user jane. This policy allows user jane to read all pods in the default namespace. For details about the parameters, see the official Kubernetes documentation.
    kind: RoleBinding
    -apiVersion: rbac.authorization.k8s.io/v1
    -metadata:
    -  name: read-pods
    -  namespace: default
    -subjects:
    -- kind: User
    -  name: jane   #User name
    -  apiGroup: rbac.authorization.k8s.io
    -roleRef:
    -  kind: Role
    -  name: pod-reader    #Name of the role that is created
    -  apiGroup: rbac.authorization.k8s.io
    -
    -

  4. After a role is created and bound to a user, call a Kubernetes API by initiating an API request message where headers carry user information and the certificate uploaded during cluster creation. For example, to call the pod query API, run the following command:

    curl -k -H "X-Remote-User: jane" --cacert /root/tls-ca.crt --key /root/tls.key --cert /root/tls.crt https://192.168.23.5:5443/api/v1/namespaces/default/pods

    -

    If 200 is returned, user jane is authorized to read pods in the cluster's default namespace. If 403 is returned, user jane is not authorized to read pods in the cluster's default namespace.

    -

    To prevent the command execution failure, upload the certificate to the /root directory in advance.

    -
    -

    The parameter descriptions are as follows:

    -
    • X-Remote-User: jane: The request header is fixed at X-Remote-User, and jane is the username.
    • tls-ca.crt: CA root certificate uploaded during cluster creation.
    • tls.crt: client certificate that matches the CA root certificate uploaded during cluster creation.
    • tls.key: client key corresponding to the CA root certificate uploaded during cluster creation.
    • 192.168.23.5:5443: address for connecting to the cluster. To obtain the address, perform the following steps:

      Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters. Click the name of the cluster to be connected and obtain the IP address and port number from Internal API Server Address on the cluster details page.

      -
      Figure 1 Obtaining the access address
      -
    -

    In addition, the X-Remote-Group header field, that is, the user group name, is supported. During role binding, a role can be bound to a group and carry user group information when you access the cluster.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0105.html b/docs/cce/umn/cce_01_0105.html deleted file mode 100644 index b702311d..00000000 --- a/docs/cce/umn/cce_01_0105.html +++ /dev/null @@ -1,209 +0,0 @@ - - -

Setting Container Lifecycle Parameters

-

Scenario

CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before stopping, you can register a hook function.

-

CCE provides the following lifecycle callback functions:

-
  • Start Command: executed to start a container. For details, see Setting Container Startup Commands.
  • Post-Start: executed immediately after a container is started. For details, see Post-Start Processing.
  • Pre-Stop: executed before a container is stopped. The pre-stop processing function helps you ensure that the services running on the pods can be completed in advance in the case of pod upgrade or deletion. For details, see Pre-Stop Processing.
-
-

Commands and Parameters Used to Run a Container

A Docker image has metadata that stores image information. If lifecycle commands and arguments are not set, CCE runs the default commands and arguments, that is, Docker instructions ENTRYPOINT and CMD, provided during image creation.

-

If the commands and arguments used to run a container are set during application creation, the default commands ENTRYPOINT and CMD are overwritten during image build. The rules are as follows:

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Commands and parameters used to run a container

Image Entrypoint

-

Image CMD

-

Command to Run a Container

-

Parameters to Run a Container

-

Command Executed

-

[touch]

-

[/root/test]

-

Not set

-

Not set

-

[touch /root/test]

-

[touch]

-

[/root/test]

-

[mkdir]

-

Not set

-

[mkdir]

-

[touch]

-

[/root/test]

-

Not set

-

[/opt/test]

-

[touch /opt/test]

-

[touch]

-

[/root/test]

-

[mkdir]

-

[/opt/test]

-

[mkdir /opt/test]

-
-
-
-

Startup Commands

By default, the default command during image start. To run a specific command or rewrite the default image value, you must perform specific settings: For details, see Setting Container Startup Commands.

-
-

Post-Start Processing

  1. Log in to the CCE console. Expand Lifecycle when adding a container during workload creation.
  2. Set the post-start processing parameters, as listed in Table 2.

    -

    - - - - - - - - - - -
    Table 2 Post-start processing parameters

    Parameter

    -

    Description

    -

    CLI

    -

    Set commands to be executed in the container for post-start processing. The command format is Command Args[1] Args[2].... Command is a system command or a user-defined executable program. If no path is specified, an executable program in the default path will be selected. If multiple commands need to be executed, write the commands into a script for execution. Commands that are executed in the background or asynchronously are not supported.

    -

    Example command:

    -
    exec: 
    -  command: 
    -  - /install.sh 
    -  - install_agent
    -

    Enter /install install_agent in the script. This command indicates that install.sh will be executed after the container is created successfully.

    -

    HTTP request

    -

    Send an HTTP request for post-start processing. The related parameters are described as follows:

    -
    • Path: (optional) request URL.
    • Port: (mandatory) request port.
    • Host Address: (optional) IP address of the request. The default value is the IP address of the node where the container resides.
    -
    -
    -

-
-

Pre-Stop Processing

  1. When creating a workload and adding a container, expand Lifecycle.
  2. Set pre-stop parameters, as shown in Table 2.

    -

    - - - - - - - - - - -
    Table 3 Pre-stop parameters

    Parameter

    -

    Description

    -

    CLI

    -

    Set commands to be executed in the container for pre-stop processing. The command format is Command Args[1] Args[2].... Command is a system command or a user-defined executable program. If no path is specified, an executable program in the default path will be selected. If multiple commands need to be executed, write the commands into a script for execution.

    -

    Example command:

    -
    exec: 
    -  command: 
    -  - /uninstall.sh 
    -  - uninstall_agent
    -

    Enter /uninstall uninstall_agent in the script. This command indicates that the uninstall.sh script will be executed before the container completes its execution and stops running.

    -

    HTTP request

    -

    Send an HTTP request for pre-stop processing. The related parameters are described as follows:

    -
    • Path: (optional) request URL.
    • Port: (mandatory) request port.
    • Host Address: (optional) IP address of the request. The default value is the IP address of the node where the container resides.
    -
    -
    -

-
-

Container Restart Policy

The restartPolicy field is used to specify the pod restart policy. The restart policy type can be Always, OnFailure, or Never. The default value is Always.

-

When restartPolicy is used, containers are restarted only through kubelet on the same node.

- -
- - - - - - - - - - - - - -

Restart Policy

-

Description

-

Always

-

When a container fails, kubelet automatically restarts the container.

-

OnFailure

-

When the container stops running and the exit code is not 0, kubelet automatically restarts the container.

-

Never

-

kubelet does not restart the container regardless of the container running status.

-
-
-

Controllers that can manage pods include ReplicaSet Controllers, jobs, DaemonSets, and kubelet (static pod).

-
  • ReplicaSet Controller and DaemonSet: The policy must be set to Always to ensure that containers run continuously.
  • Job: The policy can be set to OnFailure or Never to ensure that containers are not restarted after being executed.
  • kubelet will restart a pod whenever it fails, regardless of the value of restartPolicy. In addition, no health check is performed on the pod.
-
-
-

Example YAML for Setting the Container Lifecycle

This section uses Nginx as an example to describe how to set the container lifecycle.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the nginx-deployment.yaml file. nginx-deployment.yaml is an example file name, and you can change it as required.

    vi nginx-deployment.yaml

    -

    In the following configuration file, the postStart command is defined to run the install.sh command in the /bin/bash directory. preStop is defined to run the uninstall.sh command.

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: nginx
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  strategy:
    -    type: RollingUpdate
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      restartPolicy: Always               #Restart policy
    -      containers:
    -      - image: nginx 
    -        command:
    -        - sleep 3600                        #Startup command
    -        imagePullPolicy: Always
    -        lifecycle:
    -          postStart:
    -            exec:
    -              command:
    -              - /bin/bash
    -              - install.sh                  #Post-start command
    -          preStop:
    -            exec:
    -              command:
    -              - /bin/bash
    -              - uninstall.sh                 #Pre-stop command
    -        name: nginx
    -      imagePullSecrets:
    -      - name: default-secret
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0107.html b/docs/cce/umn/cce_01_0107.html deleted file mode 100644 index 13076760..00000000 --- a/docs/cce/umn/cce_01_0107.html +++ /dev/null @@ -1,39 +0,0 @@ - - -

Connecting to a Cluster Using kubectl

-

Scenario

This section uses a CCE cluster as an example to describe how to connect to a CCE cluster using kubectl.

-
-

Permission Description

When you access a cluster using kubectl, CCE uses the kubeconfig.json file generated on the cluster for authentication. This file contains user information, based on which CCE determines which Kubernetes resources can be accessed by kubectl. The permissions recorded in a kubeconfig.json file vary from user to user.

-

For details about user permissions, see Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based).

-
-

Using kubectl

Background

-

To connect a client to a Kubernetes cluster, you can use kubectl. For details, see Install Tools.

-

Prerequisites

-
CCE allows you to access a cluster through a VPC network or a public network.
  • VPC internal access: Clusters in the same VPC can access each other.
  • Public network access: You need to prepare an ECS that can connect to a public network.
-
-

If public network access is used, the kube-apiserver of the cluster will be exposed to the public network and may be attacked. You are advised to configure Advanced Anti-DDoS for the EIP of the node where the kube-apiserver is located.

-
-

Downloading kubectl

-

You need to download kubectl and configuration file, copy the file to your client, and configure kubectl. After the configuration is complete, you can use kubectl to access your Kubernetes clusters.

-

On the Kubernetes release page, click the corresponding link based on the cluster version, click Client Binaries, and download the corresponding platform software package.

-
Figure 1 Downloading kubectl
-

Installing and configuring kubectl

-
  1. Log in to the CCE console, click Resource Management > Clusters, and choose Command Line Tool > Kubectl under the cluster to be connected.
  2. On the Kubectl tab page of the cluster details page, connect to the cluster as prompted.

    • You can download the kubectl configuration file (kubeconfig.json) on the kubectl tab page. This file is used for user cluster authentication. If the file is leaked, your clusters may be attacked.
    • If two-way authentication is enabled for the current cluster and an EIP has been bound to the cluster, when the authentication fails (x509: certificate is valid), you need to bind the EIP and download the kubeconfig.json file again.
    • By default, two-way authentication is disabled for domain names in the current cluster. You can run the kubectl config use-context externalTLSVerify command to enable two-way authentication. For details, see Two-Way Authentication for Domain Names. For a cluster that has been bound to an EIP, if the authentication fails (x509: certificate is valid) when two-way authentication is used, you need to bind the EIP again and download kubeconfig.json again.
    • The Kubernetes permissions assigned by the configuration file downloaded by IAM users are the same as those assigned to the IAM users on the CCE console.
    • If the KUBECONFIG environment variable is configured in the Linux OS, kubectl preferentially loads the KUBECONFIG environment variable instead of $home/.kube/config.
    -
    -

-
-

Two-Way Authentication for Domain Names

Currently, CCE supports two-way authentication for domain names.

-
  • Two-way authentication is disabled for domain names by default. You can run the kubectl config use-context externalTLSVerify command to switch to the externalTLSVerify context to enable it.
  • When an EIP is bound to or unbound from a cluster, or a custom domain name is configured or updated, the cluster server certificate will be added the latest cluster access address (including the EIP bound to the cluster and all custom domain names configured for the cluster).
  • Asynchronous cluster synchronization takes about 5 to 10 minutes.
  • For a cluster that has been bound to an EIP, if the authentication fails (x509: certificate is valid) when two-way authentication is used, you need to bind the EIP again and download kubeconfig.json again.
  • If the domain name two-way authentication is not supported, kubeconfig.json contains the "insecure-skip-tls-verify": true field, as shown in Figure 2. To use two-way authentication, you can download the kubeconfig.json file again and enable two-way authentication for the domain names.
    Figure 2 Two-way authentication disabled for domain names
    -
-
-

Common Issue (Error from server Forbidden)

When you use kubectl to create or query Kubernetes resources, the following output is returned:

-

# kubectl get deploy Error from server (Forbidden): deployments.apps is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "deployments" in API group "apps" in the namespace "default"

-

The cause is that the user does not have the permissions to operate the Kubernetes resources. For details about how to assign permissions, see Namespace Permissions (Kubernetes RBAC-based).

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0110.html b/docs/cce/umn/cce_01_0110.html deleted file mode 100644 index 704f4b5d..00000000 --- a/docs/cce/umn/cce_01_0110.html +++ /dev/null @@ -1,14 +0,0 @@ - - -

Monitoring and Logs

-

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0111.html b/docs/cce/umn/cce_01_0111.html deleted file mode 100644 index c0f06569..00000000 --- a/docs/cce/umn/cce_01_0111.html +++ /dev/null @@ -1,23 +0,0 @@ - - - -

SFS Volumes

- -

-
- -
- - - -
- diff --git a/docs/cce/umn/cce_01_0112.html b/docs/cce/umn/cce_01_0112.html deleted file mode 100644 index 6469710c..00000000 --- a/docs/cce/umn/cce_01_0112.html +++ /dev/null @@ -1,50 +0,0 @@ - - -

Setting Health Check for a Container

-

Scenario

Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect service exceptions or automatically restart the service to restore it. This will result in a situation where the pod status is normal but the service in the pod is abnormal.

-

CCE provides the following health check probes:

-
  • Liveness probe: checks whether a container is still alive. It is similar to the ps command that checks whether a process exists. If the liveness check of a container fails, the cluster restarts the container. If the liveness check is successful, no operation is executed.
  • Readiness probe: checks whether a container is ready to process user requests. Upon that the container is detected unready, service traffic will not be directed to the container. It may take a long time for some applications to start up before they can provide services. This is because that they need to load disk data or rely on startup of an external module. In this case, the application process is running, but the application cannot provide services. To address this issue, this health check probe is used. If the container readiness check fails, the cluster masks all requests sent to the container. If the container readiness check is successful, the container can be accessed.
-
-

Health Check Methods

  • HTTP request

    This health check mode is applicable to containers that provide HTTP/HTTPS services. The cluster periodically initiates an HTTP/HTTPS GET request to such containers. If the return code of the HTTP/HTTPS response is within 200–399, the probe is successful. Otherwise, the probe fails. In this health check mode, you must specify a container listening port and an HTTP/HTTPS request path.

    -

    For example, for a container that provides HTTP services, the HTTP check path is /health-check, the port is 80, and the host address is optional (which defaults to the container IP address). Here, 172.16.0.186 is used as an example, and we can get such a request: GET http://172.16.0.186:80/health-check. The cluster periodically initiates this request to the container.

    -
  • TCP port

    For a container that provides TCP communication services, the cluster periodically establishes a TCP connection to the container. If the connection is successful, the probe is successful. Otherwise, the probe fails. In this health check mode, you must specify a container listening port.

    -

    For example, if you have a Nginx container with service port 80, after you specify TCP port 80 for container listening, the cluster will periodically initiate a TCP connection to port 80 of the container. If the connection is successful, the probe is successful. Otherwise, the probe fails.

    -
  • CLI

    CLI is an efficient tool for health check. When using the CLI, you must specify an executable command in a container. The cluster periodically runs the command in the container. If the command output is 0, the health check is successful. Otherwise, the health check fails.

    -

    The CLI mode can be used to replace the HTTP request-based and TCP port-based health check.

    -
    • For a TCP port, you can write a program script to connect to a container port. If the connection is successful, the script returns 0. Otherwise, the script returns –1.
    • For an HTTP request, you can write a program script to run the wget command for a container.

      wget http://127.0.0.1:80/health-check

      -

      Check the return code of the response. If the return code is within 200–399, the script returns 0. Otherwise, the script returns –1.

      -
      • Put the program to be executed in the container image so that the program can be executed.
      • If the command to be executed is a shell script, do not directly specify the script as the command, but add a script parser. For example, if the script is /data/scripts/health_check.sh, you must specify sh/data/scripts/health_check.sh for command execution. The reason is that the cluster is not in the terminal environment when executing programs in a container.
      -
      -
    -
-
-

Common Parameter Description

-
- - - - - - - - - - -
Table 1 Common parameter description

Parameter

-

Description

-

Initial Delay (s)

-

Check delay time in seconds. Set this parameter according to the normal startup time of services.

-

For example, if this parameter is set to 30, the health check will be started 30 seconds after the container is started. The time is reserved for containerized services to start.

-

Timeout (s)

-

Timeout duration. Unit: second.

-

For example, if this parameter is set to 10, the timeout wait time for performing a health check is 10s. If the wait time elapses, the health check is regarded as a failure. If the parameter is left blank or set to 0, the default timeout time is 1s.

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0113.html b/docs/cce/umn/cce_01_0113.html deleted file mode 100644 index d557e687..00000000 --- a/docs/cce/umn/cce_01_0113.html +++ /dev/null @@ -1,28 +0,0 @@ - - -

Setting an Environment Variable

-

Scenario

An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deployed, increasing flexibility in workload configuration.

-

The function of setting environment variables on CCE is the same as that of specifying ENV in a Dockerfile.

-

CCE provides three ways to add environment variables: Manually add environment variables, import environment variables from a secret, and import environment variables from a configMap.

-

After a container is started, do not modify configurations in the container. If configurations in the container are modified (for example, passwords, certificates, and environment variables of a containerized application are added to the container), the configurations will be lost after the container restarts and container services will become abnormal. An example scenario of container restart is pod rescheduling due to node anomalies.

-

Configurations must be imported to a container as arguments. Otherwise, configurations will be lost after the container restarts.

-
-
-

Manually Adding Environment Variables

  1. When creating a workload, add a container image. Then, expand Environment Variables and click Add Environment Variables.
  2. Configure the following parameters as required:

    • Type: Set this to Added manually.
    • Variable Name: Enter a variable name, for example, demo.
    • Variable Value/Reference: Enter a variable value, for example, value.
    -
    Figure 1 Manually adding environment variables
    -

-
-

Importing Environment Variables from a Secret

  1. You need to create a key first. For details, see Creating a Secret.
  2. When creating a workload, add a container image. Then, expand Environment Variables and click Add Environment Variables.
  3. Configure the following parameters as required:

    • Type: Set this to Added from Secret.
    • Variable Name: Enter a variable name.
    • Variable Value/Reference: Select the corresponding secret name and key.
    -
    Figure 2 Importing environment variables from a secret
    -

-
-

Importing Environment Variables from a ConfigMap

  1. Create a ConfigMap first. For details, see Creating a ConfigMap.
  2. When creating a workload, add a container image. Then, expand Environment Variables and click Add Environment Variables.
  3. Configure the following parameters as required:

    • Type: Set this to Added from ConfigMap.
    • Variable Name: Enter a variable name.
    • Variable Value/Reference: Select the corresponding ConfigMap name and key.
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0114.html b/docs/cce/umn/cce_01_0114.html deleted file mode 100644 index 4658db94..00000000 --- a/docs/cce/umn/cce_01_0114.html +++ /dev/null @@ -1,91 +0,0 @@ - - -

ENI LoadBalancer

-

Scenario

An ENI LoadBalancer Service directs traffic from a load balancer at backend pods, reducing the latency and avoiding performance loss for containerized applications.

-

External access requests are directly forwarded from a load balancer to pods. Internal access requests can be forwarded to a pod through a Service.

-

-
-

Notes and Constraints

  • ENI LoadBalancer is available only in certain regions.
  • Only dedicated load balancers are supported, and they must support layer-4 networking (TCP/UDP).
  • After a load balancer is created, its flavor cannot be changed. Therefore, in CCE, after you create a Service, you cannot connect the automatically created load balancer to other objects. If no load balancer is automatically created, you can connect any existing one to the Service.
  • The cluster version must be 1.17 or later.
  • ENI LoadBalancer Services can be created only for workloads (containers) bound with elastic network interfaces (ENIs).
-
-

Adding a Service When Creating a Workload

You can set the Service when creating a workload on the CCE console. An Nginx workload is used as an example.

-
  1. In the Set Application Access step of Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet, click Add Service and set the following parameters:

    • Access Type: Select ENI LoadBalancer (ELB). This option is available only if you have selected Attach ENI to Pod when specifying basic workload information during workload creation.
    • Service Name: Specify a Service name, which can be the same as the workload name.
    -

    ELB Configuration

    -
    • Elastic Load Balancer: Only dedicated load balancers are supported.

      Dedicated: Resources are shared among load balancers, and the performance of a load balancer is not affected by other load balancers. IPv6 is supported.

      -

      You can create public network or private network load balancers.

      -
      • Public network: You can select an existing public network load balancer or have the system automatically create a new one.
      • Private network: You can select an existing private network load balancer or have the system automatically create a new one.
      -
      The selected or created load balancer must be in the same VPC as the current cluster, and it must match the load balancer type (private or public network).
      • Enterprise Project: Select an enterprise project in which the load balancer is created.
      • Specifications: This field is displayed only when you select Public network and Automatically created for Elastic Load Balancer. You can click to modify the name, specifications, billing mode, and bandwidth of the load balancer.
      • Configure Dedicated Load Balancer
        • AZ: Dedicated load balancers can be deployed across AZs to provide higher reliability.
        • Subnet: subnet where the backend server of the load balancer is located.

          Load balancers occupy different number of subnet IP addresses based on their specifications. Therefore, you are not advised to use the subnet CIDR blocks of other resources (such as clusters and nodes) as the load balancer CIDR block.

          -
        • Specifications: Specifications determine the types of listeners that can be added to a load balancer. Select specifications that best fit your needs. For details, see Specifications of Dedicated Load Balancers.
        -
      • Algorithm Type: You can select Weighted round robin, Weighted least connections, or Source IP hash. The weight is dynamically adjusted based on the number of pods of the workload associated with the Service on each node.
        • Weighted round robin: Requests are forwarded to different servers based on their weights, which indicate server processing performance. Backend servers with higher weights receive proportionately more requests, whereas equal-weighted servers receive the same number of requests. This algorithm is often used for short connections, such as HTTP services.
        • Weighted least connections: In addition to the weight assigned to each server, the number of connections processed by each backend server is also considered. Requests are forwarded to the server with the lowest connections-to-weight ratio. Building on least connections, the weighted least connections algorithm assigns a weight to each server based on their processing performance. This algorithm is often used for persistent connections, such as database connections.
        • Source IP hash: The source IP address of each request is calculated using the hash algorithm to obtain a unique hash key, and all backend servers are numbered. The generated key allocates the client to a particular server. This allows requests from different clients to be routed based on source IP addresses and ensures that a client is directed to the same server as always. This algorithm applies to TCP connections without cookies.
        -
        -
      • Sticky Session: This function is disabled by default. You can select Based on source IP address. Listeners ensure session stickiness based on IP addresses. Requests from the same IP address will be forwarded to the same backend server.
      • Health Check: This function is enabled by default. Enabling it will perform health checks on your load balancer. For details about how to configure the ELB health check parameters, see Configuring a Health Check.
      -
      -
    • Port Settings
      • Protocol: protocol used by the Service.
      • Container Port: port defined in the container image and on which the workload listens. The Nginx application listens on port 80.
      • Access Port: port mapped to the container port at the load balancer's IP address. The workload can be accessed at <Load balancer's IP address>:<Access port>. The port number range is 1–65535.
      -
    -

  2. After the configuration is complete, click OK.
  3. On the workload creation page, click Next: Configure Advanced Settings. On the page displayed, click Create.
  4. After the workload is successfully created, choose Workloads > Deployments or Workloads > StatefulSets on the CCE console. Click the name of the workload to view its details. On the workload details page, click the Services tab and obtain the access address.
  5. Click the access address.
-
-

Adding a Service After Creating a Workload

You can set the Service after creating a workload. This has no impact on the workload status and takes effect immediately. The procedure is as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Network.
  2. On the Services tab page, click Create Service.

    The parameters are the same as those in Adding a Service When Creating a Workload.

    -

  3. Click Create. An ENI LoadBalancer Service will be added for the workload.
-
-

Using kubectl to Create a Service (Automatically Creating a Load Balancer)

An ENI LoadBalancer Service supports only dedicated ELBs. You do not need to specify NodePort when creating a Service.

-
apiVersion: v1
-kind: Service
-metadata:
-    name: example
-    annotations:
-        kubernetes.io/elb.class: performance
-        kubernetes.io/elb.autocreate: 
-          '
-          {
-              "type": "public",
-              "bandwidth_name": "cce-bandwidth-1630813564682",
-              "bandwidth_chargemode": "traffic",
-              "bandwidth_size": 5,
-              "bandwidth_sharetype": "PER",
-              "eip_type": "5_bgp",
-              "available_zone": [
-                  "eu-de-01"
-              ],
-              "l7_flavor_name": "L7_flavor.elb.s2.medium",
-              "l4_flavor_name": "L4_flavor.elb.s1.small"
-          }
-          '
-spec:
-    selector:
-        app: example
-    ports:
-        -   name: cce-service-0
-            targetPort: 80
-            port: 8082
-            protocol: TCP
-    type: LoadBalancer
-

For details about the parameters, see Table 4.

-
-

Using kubectl to Create a Service (Using an Existing Load Balancer)

When creating a Service using an existing load balancer, you only need to specify the ID of the load balancer.

-
apiVersion: v1
-kind: Service
-metadata:
-    name: example
-    annotations:
-        kubernetes.io/elb.id: bcc44e84-d0b5-4192-8bec-b2ca55ce5025     # ID of the load balancer. Replace it with the actual value.
-spec:
-    selector:
-        app: example
-    ports:
-        -   name: cce-service-0
-            targetPort: 80
-            port: 8082
-            protocol: TCP
-    type: LoadBalancer
-
-

ELB Forwarding

After an ENI LoadBalancer Service is created, you can view the listener forwarding rules of the load balancer on the ELB console.

-
Figure 1 ELB forwarding
-

You can find that a listener is created for the load balancer. The backend server address is the IP address of the pod, and the service port is the container port. This is because the pod uses an ENI or sub-ENI. When traffic passes through the load balancer, it directly forwards the traffic to the pod. This is the same as that described in Scenario.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0120.html b/docs/cce/umn/cce_01_0120.html deleted file mode 100644 index 6070c05b..00000000 --- a/docs/cce/umn/cce_01_0120.html +++ /dev/null @@ -1,62 +0,0 @@ - - -

Performing Replace/Rolling Upgrade (v1.13 and Earlier)

-

Scenario

You can upgrade your clusters to a newer Kubernetes version on the CCE console.

-

Before the upgrade, learn about the target version to which each CCE cluster can be upgraded in what ways, and the upgrade impacts. For details, see Overview and Before You Start.

-
-

Precautions

  • If the coredns add-on needs to be upgraded during the cluster upgrade, ensure that the number of nodes is greater than or equal to the number of coredns instances and all coredns instances are running. Otherwise, the upgrade will fail. Before upgrading a cluster of v1.11 or v1.13, you need to upgrade the coredns add-on to the latest version available for the cluster.
  • When a cluster of v1.11 or earlier is upgraded to v1.13, the impacts on the cluster are as follows:
    • All cluster nodes will be restarted as their OSs are upgraded, which affects application running.
    • The cluster signing certificate mechanism is changed. As a result, the original cluster certificate becomes invalid. You need to obtain the certificate or kubeconfig file again after the cluster is upgraded.
    -
  • During the upgrade from one release of v1.13 to a later release of v1.13, applications in the cluster are interrupted for a short period of time only during the upgrade of network components.
  • During the upgrade from Kubernetes 1.9 to 1.11, the kube-dns of the cluster will be uninstalled and replaced with CoreDNS, which may cause loss of the cascading DNS configuration in the kube-dns or temporary interruption of the DNS service. Back up the DNS address configured in the kube-dns so you can configure the domain name in the CoreDNS again when domain name resolution is abnormal.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters. In the cluster list, check the cluster version.
  2. Click More for the cluster you want to upgrade, and select Upgrade from the drop-down menu.

    • If your cluster version is up-to-date, the Upgrade button is grayed out.
    • If the cluster status is Unavailable, the upgrade flag in the upper right corner of the cluster card view will be grayed out. Check the cluster status by referring to Before You Start.
    -
    -

  3. In the displayed Pre-upgrade Check dialog box, click Check Now.
  4. The pre-upgrade check starts. While the pre-upgrade check is in progress, the cluster status will change to Pre-checking and new nodes/applications will not be able to be deployed on the cluster. However, existing nodes and applications will not be affected. It takes 3 to 5 minutes to complete the pre-upgrade check.
  5. When the status of the pre-upgrade check is Completed, click Upgrade.
  6. On the cluster upgrade page, review or configure basic information by referring to Table 1.

    -

    - - - - - - - - - - - - - - - - - - - -
    Table 1 Basic information

    Parameter

    -

    Description

    -

    Cluster Name

    -

    Review the name of the cluster to be upgraded.

    -

    Current Version

    -

    Review the version of the cluster to be upgraded.

    -

    Target Version

    -

    Review the target version after the upgrade.

    -

    Node Upgrade Policy

    -

    Replace (replace upgrade): Worker nodes will be reset. Their OSs will be reinstalled, and data on the system and data disks will be cleared. Exercise caution when performing this operation.

    -
    NOTE:
    • The lifecycle management function of the nodes and workloads in the cluster is unavailable.
    • APIs cannot be called temporarily.
    • Running workloads will be interrupted because nodes are reset during the upgrade.
    • Data in the system and data disks on the worker nodes will be cleared. Back up important data before resetting the nodes.
    • Data disks without LVM mounted to worker nodes need to be mounted again after the upgrade, and data on the disks will not be lost during the upgrade.
    • The EVS disk quota must be greater than 0.
    • The container IP addresses change, but the communication between containers is not affected.
    • Custom labels on the worker nodes will be cleared.
    • It takes about 20 minutes to upgrade a master node and about 30 to 120 minutes to upgrade worker nodes (about 3 minutes for each worker node), depending on the number of worker nodes and upgrade batches.
    -
    -

    Login Mode

    -
    • Key pair: Select the key pair used to log in to the node. You can select a shared key.

      A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

      -
    -
    -
    -
    -

  7. Click Next. In the dialog box displayed, click OK.
  8. Upgrade add-ons. If an add-on needs to be upgraded, a red dot is displayed. Click the Upgrade button in the lower left corner of the add-on card view. After the upgrade is complete, click Upgrade in the lower right corner of the page.

    • Master nodes will be upgraded first, and then the worker nodes will be upgraded concurrently. If there are a large number of worker nodes, they will be upgraded in different batches.
    • Select a proper time window for the upgrade to reduce impacts on services.
    • Clicking OK will start the upgrade immediately, and the upgrade cannot be canceled. Do not shut down or restart nodes during the upgrade.
    -
    -

  9. In the displayed Upgrade dialog box, read the information and click OK. Note that the cluster cannot be rolled back after the upgrade.
  10. Back to the cluster list, you can see that the cluster status is Upgrading. Wait until the upgrade is completed.

    After the upgrade is successful, you can view the cluster status and version on the cluster list or cluster details page.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0125.html b/docs/cce/umn/cce_01_0125.html deleted file mode 100644 index 34d1cf91..00000000 --- a/docs/cce/umn/cce_01_0125.html +++ /dev/null @@ -1,23 +0,0 @@ - - - -

SFS Turbo Volumes

- -

-
- -
- - - -
- diff --git a/docs/cce/umn/cce_01_0127.html b/docs/cce/umn/cce_01_0127.html deleted file mode 100644 index af45efbc..00000000 --- a/docs/cce/umn/cce_01_0127.html +++ /dev/null @@ -1,28 +0,0 @@ - - -

storage-driver (System Resource Add-on, Mandatory)

-

Introduction

storage-driver functions as a standard Kubernetes FlexVolume plug-in to allow containers to use IaaS storage resources. By installing and upgrading storage-driver, you can quickly install and update cloud storage capabilities.

-

storage-driver is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.13 or earlier is created.

-
-

Notes and Constraints

  • For clusters created in CCE, Kubernetes v1.15.11 is a transitional version in which the FlexVolume plug-in (storage-driver) is compatible with the CSI plug-in (everest). Clusters of v1.17 and later versions do not support FlexVolume any more. You need to use the everest add-on. For details about CSI and FlexVolume, see Differences Between CSI and FlexVolume Plug-ins.
  • The FlexVolume plug-in will be maintained by Kubernetes developers, but new functionality will only be added to CSI. You are advised not to create storage that connects to the FlexVolume plug-in (storage-driver) in CCE any more. Otherwise, the storage resources may not function normally.
  • This add-on can be installed only in clusters of v1.13 or earlier. By default, the everest add-on is installed when clusters of v1.15 or later are created.

    In a cluster of v1.13 or earlier, when an upgrade or bug fix is available for storage functionalities, you only need to install or upgrade the storage-driver add-on. Upgrading the cluster or creating a cluster is not required.

    -
    -
-
-

Installing the Add-on

This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

-

If storage-driver is not installed in a cluster, perform the following steps to install it:

-
  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Marketplace tab page, click Install Add-on under storage-driver.
  2. On the Install Add-on page, select the cluster and the add-on version, and click Next: Configuration.
  3. Click Install to install the add-on. Note that the storage-driver has no configurable parameters and can be directly installed.

    After the add-on is installed, click Go Back to Previous Page. On the Add-on Instance tab page, select the corresponding cluster to view the running instance. This indicates that the add-on has been installed on each node in the cluster.

    -

-
-

Upgrading the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, select the target cluster and click Upgrade under storage-driver.

    • If the Upgrade button is unavailable, the current add-on is already up-to-date and no upgrade is required.
    • When the upgrade is complete, the original storage-driver version on cluster nodes will be replaced by the latest version.
    -
    -

  2. On the Basic Information page, select the add-on version and click Next.
  3. Click Upgrade to upgrade the storage-driver add-on. Note that the storage-driver has no configurable parameters and can be directly upgraded.
-
-

Uninstalling the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, select the target cluster and click Uninstall under storage-driver.
  2. In the dialog box displayed, click Yes to uninstall the add-on.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0129.html b/docs/cce/umn/cce_01_0129.html deleted file mode 100644 index 96a904ff..00000000 --- a/docs/cce/umn/cce_01_0129.html +++ /dev/null @@ -1,173 +0,0 @@ - - -

coredns (System Resource Add-on, Mandatory)

-

Introduction

The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.

-

coredns is an open-source software and has been a part of CNCF. It provides a means for cloud services to discover each other in cloud-native deployments. Each of the plug-ins chained by coredns provides a particular DNS function. You can integrate coredns with only the plug-ins you need to make it fast, efficient, and flexible. When used in a Kubernetes cluster, coredns can automatically discover services in the cluster and provide domain name resolution for these services. By working with a cloud DNS server, coredns can resolve external domain names for workloads in a cluster.

-

coredns is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.11 or later is created.

-

Kubernetes v1.11 and later back CoreDNS as the official default DNS for all clusters going forward.

-

CoreDNS official website: https://coredns.io/

-

Open source community: https://github.com/coredns/coredns

-
-

Notes and Constraints

When CoreDNS is running properly or being upgraded, ensure that the number of available nodes is greater than or equal to the number of CoreDNS instances and all CoreDNS instances are running. Otherwise, the upgrade will fail.

-
-

Installing the Add-on

This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

-
  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Marketplace tab page, click Install Add-on under coredns.
  2. On the Install Add-on page, select the cluster and the add-on version, and click Next: Configuration.
  3. In the Configuration step, set the following parameters:

    -

    - - - - - - - - - - - - - - - - - - - -
    Table 1 coredns add-on parameters

    Parameter

    -

    Description

    -

    Add-on Specifications

    -

    Concurrent domain name resolution ability. Select add-on specifications that best fit your needs.

    -

    Instances

    -

    Number of pods that will be created to match the selected add-on specifications. The number cannot be modified.

    -

    Container

    -

    CPU and memory quotas of the container allowed for the selected add-on specifications. The quotas cannot be modified.

    -

    Notes

    -

    Add-on precautions. Read the precautions before you proceed with the step.

    -

    stub domain

    -

    A domain name server for a user-defined domain name. The format is a key-value pair. The key is a suffix of DNS domain name, and the value is one or more DNS IP addresses. For example, acme.local -- 1.2.3.4,6.7.8.9 means that DNS requests with the .acme.local suffix are forwarded to a DNS listening at 1.2.3.4,6.7.8.9.

    -
    -
    -

  4. After the preceding configurations are complete, click Install.

    After the add-on is installed, click Go Back to Previous Page. On the Add-on Instance tab page, select the corresponding cluster to view the running instance. This indicates that the add-on has been installed on each node in the cluster.

    -

-
-

Configuring the Stub Domain for CoreDNS

Cluster administrators can modify the ConfigMap for the CoreDNS Corefile to change how service discovery works. They can configure stub domains for CoreDNS using the proxy plug-in.

-

Assume that a cluster administrator has a Consul DNS server located at 10.150.0.1 and all Consul domain names have the suffix .consul.local.

-

To configure this Consul DNS server in CoreDNS, run the following command to edit the CoreDNS ConfigMap:

-

kubectl edit configmap coredns -n kube-system

-

Example configuration:

-
consul.local:5353 {
-        errors
-        cache 30
-        proxy . 10.150.0.1
-    }
-

In clusters of v1.15.11 and later, the modified ConfigMap is as follows:

-
apiVersion: v1
-metadata:
-  name: coredns
-  namespace: kube-system
-  selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
-  uid: 00cb8f29-62d7-4df8-a769-0a16237903c1
-  resourceVersion: '2074614'
-  creationTimestamp: '2021-04-07T03:52:42Z'
-  labels:
-    app: coredns
-    k8s-app: coredns
-    kubernetes.io/cluster-service: 'true'
-    kubernetes.io/name: CoreDNS
-    release: cceaddon-coredns
-data:
-  Corefile: |-
-    .:5353 {
-        bind {$POD_IP}
-        cache 30
-        errors
-        health {$POD_IP}:8080
-        kubernetes cluster.local in-addr.arpa ip6.arpa {
-          pods insecure
-          upstream /etc/resolv.conf
-          fallthrough in-addr.arpa ip6.arpa
-        }
-        loadbalance round_robin
-        prometheus {$POD_IP}:9153
-        forward . /etc/resolv.conf
-        reload
-    }
-
-    consul.local:5353 {
-        errors
-        cache 30
-        proxy . 10.150.0.1
-    }
-

In clusters earlier than v1.15.11, the modified ConfigMap is as follows:

-
apiVersion: v1
-data:
-  Corefile: |-
-    .:5353 {
-        cache 30
-        errors
-        health
-        kubernetes cluster.local in-addr.arpa ip6.arpa {
-          pods insecure
-          upstream /etc/resolv.conf
-          fallthrough in-addr.arpa ip6.arpa
-        }
-        loadbalance round_robin
-        prometheus 0.0.0.0:9153
-        proxy . /etc/resolv.conf
-        reload
-    }
-
-    consul.local:5353 {
-        errors
-        cache 30
-        proxy . 10.150.0.1
-    }
-kind: ConfigMap
-metadata:
-  name: coredns
-  namespace: kube-system
-
-

How Does Domain Name Resolution Work in Kubernetes?

DNS policies can be set on a per-pod basis. Currently, Kubernetes supports four types of DNS policies: Default, ClusterFirst, ClusterFirstWithHostNet, and None. For details, see https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/. These policies are specified in the dnsPolicy field in the pod-specific.

-
-
  • Default: Pods inherit the name resolution configuration from the node that the pods run on. The custom upstream DNS server and the stub domain cannot be used together with this policy.
  • ClusterFirst: Any DNS query that does not match the configured cluster domain suffix, such as www.kubernetes.io, is forwarded to the upstream name server inherited from the node. Cluster administrators may have extra stub domains and upstream DNS servers configured.
  • ClusterFirstWithHostNet: For pods running with hostNetwork, set its DNS policy ClusterFirstWithHostNet.
  • None: It allows a pod to ignore DNS settings from the Kubernetes environment. All DNS settings are supposed to be provided using the dnsPolicy field in the pod-specific.
-
  • Clusters of Kubernetes v1.10 and later support Default, ClusterFirst, ClusterFirstWithHostNet, and None. Clusters earlier than Kubernetes v1.10 support only Default, ClusterFirst, and ClusterFirstWithHostNet.
  • Default is not the default DNS policy. If dnsPolicy is not explicitly specified, ClusterFirst is used.
-
-

Routing

-

Without stub domain configurations: Any query that does not match the configured cluster domain suffix, such as www.kubernetes.io, is forwarded to the upstream DNS server inherited from the node.

-

With stub domain configurations: If stub domains and upstream DNS servers are configured, DNS queries are routed according to the following flow:

-
  1. The query is first sent to the DNS caching layer in coredns.
  2. From the caching layer, the suffix of the request is examined and then the request is forwarded to the corresponding DNS:
    • Names with the cluster suffix, for example, .cluster.local: The request is sent to coredns.
    -
    • Names with the stub domain suffix, for example, .acme.local: The request is sent to the configured custom DNS resolver that listens, for example, on 1.2.3.4.
    • Names that do not match the suffix (for example, widget.com): The request is forwarded to the upstream DNS.
    -
-
Figure 1 Routing
-

Upgrading the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Upgrade under coredns.

    • If the Upgrade button is unavailable, the current add-on is already up-to-date and no upgrade is required.
    • During the upgrade, the previous configurations are lost and need to be specified again.
    • When the upgrade is complete, the original coredns version on cluster nodes will be replaced by the latest version. If an exception occurs during the upgrade, uninstall the add-on and then re-install it.
    -
    -

  2. On the Basic Information page, select the add-on version and click Next.
  3. Configure the parameters listed in Table 2. After the configuration is complete, click Upgrade to upgrade the coredns add-on.

    -

    - - - - - - - - - - -
    Table 2 Parameters for installing coredns

    Parameter

    -

    Description

    -

    Add-on Specifications

    -

    Concurrent domain name resolution ability. Select add-on specifications that best fit your needs.

    -

    stub domain

    -

    A domain name server for a user-defined domain name. The format is a key-value pair. The key is a suffix of DNS domain name, and the value is one or more DNS IP addresses. For example, acme.local -- 1.2.3.4,6.7.8.9 means that DNS requests with the .acme.local suffix are forwarded to a DNS listening at 1.2.3.4,6.7.8.9.

    -
    -
    -

-
-

Uninstalling the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Uninstall under coredns.
  2. In the dialog box displayed, click Yes to uninstall the add-on.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0130.html b/docs/cce/umn/cce_01_0130.html deleted file mode 100644 index adf3daba..00000000 --- a/docs/cce/umn/cce_01_0130.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

Configuring a Container

-
-
- - - -
- diff --git a/docs/cce/umn/cce_01_0139.html b/docs/cce/umn/cce_01_0139.html deleted file mode 100644 index 2b3031a0..00000000 --- a/docs/cce/umn/cce_01_0139.html +++ /dev/null @@ -1,186 +0,0 @@ - - -

Common kubectl Commands

-

Getting Started

get

-

The get command displays one or many resources of a cluster.

-

This command prints a table of the most important information about all resources, including cluster nodes, running pods, Deployments, and Services.

-

A cluster can have multiple namespaces. If no namespace is specified, this command will run with the --namespace=default flag.

-
-

Examples:

-

To list all pods with detailed information:

-
kubectl get po -o wide
-

To display pods in all namespaces:

-
kubectl get po --all-namespaces
-

To list labels of pods in all namespaces:

-
kubectl get po --show-labels
-

To list all namespaces of the node:

-
kubectl get namespace
-

To list information of other nodes, run this command with the -s flag. To list a specified type of resources, add the resource type to this command, for example, kubectl get svc, kubectl get nodes, and kubectl get deploy.

-
-

To list a pod with a specified name in YAML output format:

-
kubectl get po <podname> -o yaml
-

To list a pod with a specified name in JSON output format:

-
kubectl get po <podname> -o json
-
kubectl get po rc-nginx-2-btv4j -o=custom-columns=LABELS:.metadata.labels.app
-

LABELS indicates a comma separated list of user-defined column titles. metadata.labels.app indicates the data to be listed in either YAML or JSON output format.

-
-

create

-

The create command creates a cluster resource from a file or input.

-

If there is already a resource descriptor (a YAML or JSON file), you can create the resource from the file by running the following command:

-
kubectl create -f filename
-

expose

-

The expose command exposes a resource as a new Kubernetes service. Possible resources include a pod, Service, and Deployment.

-
kubectl expose deployment deployname --port=81 --type=NodePort --target-port=80 --name=service-name
-

The example command creates a service of NodePort type for the deployment with the name specified in deployname. The service will serve on port 81 specified in -port and connect to the containers on port 80 specified in -target-port. More specifically, the service is reachable at <cluster-internal IP address>:<port>, and containers are reachable at <node IP address>:<target-port>.

-
-

run

-

Examples:

-

To run a particular image in the cluster:

-
kubectl run deployname --image=nginx:latest
-

To run a particular image using a specified command:

-
kubectl run deployname -image=busybox --command -- ping baidu.com
-

set

-

The set command configures object resources.

-

Example:

-

To change the image of a deployment with the name specified in deployname to image 1.0:

-
kubectl set image deploy deployname containername=containername:1.0
-

edit

-

The edit command edits a resource from the default editor.

-

Examples:

-

To update a pod:

-
kubectl edit po po-nginx-btv4j
-

The example command yields the same effect as the following command:

-
kubectl get po po-nginx-btv4j -o yaml >> /tmp/nginx-tmp.yaml
-vim /tmp/nginx-tmp.yaml
-/*do some changes here */
-kubectl replace -f /tmp/nginx-tmp.yaml
-

explain

-

The explain command views documents or reference documents.

-

Example:

-

To get documentation of pods:

-
kubectl explain pod
-

delete

-

The delete command deletes resources by resource name or label.

-

Example:

-

To delete a pod with minimal delay:

-
kubectl delete po podname --now 
-
kubectl delete -f nginx.yaml
-kubectl delete deployment deployname
-
-

Deployment Commands

rolling-update*

-

rolling-update is a very important command. It updates a running service with zero downtime. Pods are incrementally replaced by new ones. One pod is updated at a time. The old pod is deleted only after the new pod is up. New pods must be distinct from old pods by name, version, and label. Otherwise, an error message will be reported.

-
kubectl rolling-update poname -f newfilename
-kubectl rolling-update poname -image=image:v2
-

If any problem occurs during the rolling update, run the command with the -rollback flag to abort the rolling update and revert to the previous pod.

-
kubectl rolling-update poname -rollback
-

rollout

-

The rollout command manages the rollout of a resource.

-

Examples:

-

To check the rollout status of a particular deployment:

-
kubectl rollout status deployment/deployname
-

To view the rollout history of a particular deployment:

-
kubectl rollout history deployment/deployname
-

To roll back to the previous deployment: (by default, a resource is rolled back to the previous version)

-
kubectl rollout undo deployment/test-nginx
-

scale

-

The scale command sets a new size for a resource by adjusting the number of resource replicas.

-
kubectl scale deployment deployname --replicas=newnumber
-

autoscale

-

The autoscale command automatically chooses and sets the number of pods. This command specifies the range for the number of pod replicas maintained by a replication controller. If there are too many pods, the replication controller terminates the extra pods. If there is too few, the replication controller starts more pods.

-
kubectl autoscale deployment deployname --min=minnumber --max=maxnumber
-
-

Cluster Management Commands

cordon, drain, uncordon*

-

If a node to be upgraded is running many pods or is already down, perform the following steps to prepare the node for maintenance:

-
  1. Run the cordon command to mark a node as unschedulable. This means that new pods will not be scheduled onto the node.

    kubectl cordon nodename
    -

    Note: In CCE, nodename indicates the private network IP address of a node.

    -

  2. Run the drain command to smoothly migrate the running pods from the node to another node.

    kubectl drain nodename --ignore-daemonsets --ignore-emptydir
    -

    ignore-emptydir ignores the pods that use emptyDirs.

    -

  3. Perform maintenance operations on the node, such as upgrading the kernel and upgrading Docker.
  4. After node maintenance is completed, run the uncordon command to mark the node as schedulable.

    kubectl uncordon nodename
    -

-

cluster-info

-

To display the add-ons running in the cluster:

-
kubectl cluster-info
-

To dump current cluster information to stdout:

-
kubectl cluster-info dump
-

top*

-

The top command displays resource (CPU/memory/storage) usage. This command requires Heapster to be correctly configured and working on the server.

-

taint*

-

The taint command updates the taints on one or more nodes.

-

certificate*

-

The certificate command modifies the certificate resources.

-
-

Fault Diagnosis and Debugging Commands

describe

-

The describe command is similar to the get command. The difference is that the describe command shows details of a specific resource or group of resources, whereas the get command lists one or more resources in a cluster. The describe command does not support the -o flag. For resources of the same type, resource details are printed out in the same format.

-

If the information about a resource is queried, you can use the get command to obtain more detailed information. If you want to check the status of a specific resource, for example, to check if a pod is in the running state, run the describe command to show more detailed status information.

-
kubectl describe po <podname>
-
-

logs

-

The logs command prints logs for a container in a pod or specified resource to stdout. To display logs in the tail -f mode, run this command with the -f flag.

-
kubectl logs -f podname
-

exec

-

The kubectl exec command is similar to the Docker exec command and executes a command in a container. If there are multiple containers in a pod, use the -c flag to choose a container.

-
kubectl exec -it podname bash
-kubectl exec -it podname -c containername bash
-

port-forward*

-

The port-forward command forwards one or more local ports to a pod.

-

Example:

-

To listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod:

-
kubectl port -forward podname 5000:6000
-

proxy*

-

The proxy command creates a proxy server between localhost and the Kubernetes API server.

-

Example:

-

To enable the HTTP REST APIs on the master node:

-
kubectl proxy -accept-hosts= '.*' -port=8001 -address= '0.0.0.0'
-

cp

-

The cp command copies files and directories to and from containers.

-
cp filename newfilename
-

auth*

-

The auth command inspects authorization.

-

attach*

-

The attach command is similar to the logs -f command and attaches to a process that is already running inside an existing container. To exit, run the ctrl-c command. If a pod contains multiple containers, to view the output of a specific container, use the -c flag and containername following podname to specify a container.

-
kubectl attach podname -c containername
-
-

Advanced Commands

replace

-

The replace command updates or replaces an existing resource by attributes including the number of replicas, labels, image versions, and ports. You can directly modify the original YAML file and then run the replace command.

-
kubectl replace -f filename
-

Resource names cannot be updated.

-
-

apply*

-

The apply command provides a more strict control on resource updating than patch and edit commands. The apply command applies a configuration to a resource and maintains a set of configuration files in source control. Whenever there is an update, the configuration file is pushed to the server, and then the kubectl apply command applies the latest configuration to the resource. The Kubernetes compares the new configuration file with the original one and updates only the changed configuration instead of the whole file. The configuration that is not contained in the -f flag will remain unchanged. Unlike the replace command which deletes the resource and creates a new one, the apply command directly updates the original resource. Similar to the git operation, the apply command adds an annotation to the resource to mark the current apply.

-
kubectl apply -f
-

patch

-

If you want to modify attributes of a running container without first deleting the container or using the replace command, the patch command is to the rescue. The patch command updates field(s) of a resource using strategic merge patch, a JSON merge patch, or a JSON patch. For example, to change a pod label from app=nginx1 to app=nginx2 while the pod is running, use the following command:

-
kubectl patch pod podname -p '{"metadata":{"labels":{"app":"nginx2"}}}'
-

convent*

-

The convert command converts configuration files between different API versions.

-
-

Configuration Commands

label

-

The label command update labels on a resource.

-
kubectl label pods my-pod new-label=newlabel
-

annotate

-

The annotate command update annotations on a resource.

-
kubectl annotate pods my-pod icon-url=http://......
-

completion

-

The completion command provides autocompletion for shell.

-
-

Other Commands

api-versions

-

The api-versions command prints the supported API versions.

-
kubectl api-versions
-

api-resources

-

The api-resources command prints the supported API resources.

-
kubectl api-resources
-

config*

-

The config command modifies kubeconfig files. An example use case of this command is to configure authentication information in API calls.

-

help

-

The help command gets all command references.

-

version

-

The version command prints the client and server version information for the current context.

-
kubectl version
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0141.html b/docs/cce/umn/cce_01_0141.html deleted file mode 100644 index 8ee6eb75..00000000 --- a/docs/cce/umn/cce_01_0141.html +++ /dev/null @@ -1,37 +0,0 @@ - - -

gpu-beta

-

Introduction

gpu-beta is a device management add-on that supports GPUs in containers. It supports only NVIDIA Tesla drivers.

-
-

Notes and Constraints

  • This add-on is available only in certain regions.
  • This add-on can be installed only in CCE clusters of v1.11 or later.
  • If GPU nodes are used in the cluster, the gpu-beta add-on must be installed.
  • The driver to be downloaded must be a .run file.
  • Only Tesla drivers are supported, not GRID drivers.
-
  • If the download link is a public network address, for example, https://us.download.nvidia.com/tesla/396.37/NVIDIA-Linux-x86_64-396.37.run, bind an EIP to each GPU node. For details about how to obtain the driver link, see Obtaining the Driver Link from Public Network.
  • If the download link is an OBS URL, you do not need to bind an EIP to GPU nodes.
  • Ensure that the NVIDIA driver version matches the GPU node.
  • After the driver version is changed, restart the node for the change to take effect.
-
-
-

Installing the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Marketplace tab page, click Install Add-on under gpu-beta.
  2. On the Install Add-on page, select the cluster and the add-on version, and click Next: Configuration.
  3. In the Configuration step, enter the link to download the NVIDIA driver.
  4. Click Install.

    After the add-on is installed, click Go Back to Previous Page. On the Add-on Instance tab page, select the corresponding cluster to view the running instance. This indicates that the add-on has been installed on each GPU node in the cluster.

    -

-
-

Verifying the Add-on

After the add-on is installed, run the nvidia-smi command on the GPU node and the container that schedules GPU resources to verify the availability of the GPU device and driver.

-
GPU node:
cd /opt/cloud/cce/nvidia/bin && ./nvidia-smi
-
-

Container:

-
cd /usr/local/nvidia/bin && ./nvidia-smi
-

If GPU information is returned, the device is available and the add-on is successfully installed.

-

-
-

Obtaining the Driver Link from Public Network

  1. Log in to the CCE console.
  2. Click Create Node and select the GPU node to be created in the Specifications area. The GPU card model of the node is displayed in the lower part of the page.
  1. Visit https://www.nvidia.com/Download/Find.aspx?lang=en.
  2. Select the driver information on the NVIDIA Driver Downloads page, as shown in Figure 1. Operating System must be Linux 64-bit.

    Figure 1 Setting parameters
    -

  3. After confirming the driver information, click SEARCH. A page is displayed, showing the driver information, as shown in Figure 2. Click DOWNLOAD.

    Figure 2 Driver information
    -

  4. Obtain the driver link in either of the following ways:

    • Method 1: As shown in Figure 3, find url=/tesla/396.37/NVIDIA-Linux-x86_64-396.37.run in the browser address box. Then, supplement it to obtain the driver link https://us.download.nvidia.com/tesla/396.37/NVIDIA-Linux-x86_64-396.37.run. By using this method, you must bind an EIP to each GPU node.
    • Method 2: As shown in Figure 3, click AGREE & DOWNLOAD to download the driver. Then, upload the driver to OBS and record the OBS URL. By using this method, you do not need to bind an EIP to GPU nodes.
      Figure 3 Obtaining the link
      -
    -

-
-

Uninstalling the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, select the cluster and click Uninstall under gpu-beta.
  2. In the dialog box displayed, click Yes to uninstall the add-on.

    The driver will not be uninstalled during gpu-beta add-on uninstall. If the driver is reinstalled, you must restart all GPU nodes.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0142.html b/docs/cce/umn/cce_01_0142.html deleted file mode 100644 index fcc48061..00000000 --- a/docs/cce/umn/cce_01_0142.html +++ /dev/null @@ -1,151 +0,0 @@ - - -

NodePort

-

Scenario

A Service is exposed on each node's IP address at a static port (NodePort). A ClusterIP Service, to which the NodePort Service will route, is automatically created. By requesting <NodeIP>:<NodePort>, you can access a NodePort Service from outside the cluster.

-
Figure 1 NodePort access
-
-

Notes and Constraints

  • By default, a NodePort Service is accessed within a VPC. If you need to use an EIP to access a NodePort Service through public networks, bind an EIP to the node in the cluster in advance.
  • After a Service is created, if the affinity setting is switched from the cluster level to the node level, the connection tracing table will not be cleared. You are advised not to modify the Service affinity setting after the Service is created. If you need to modify it, create a Service again.
  • The service port of a NodePort Service created on the CCE console is the same as the configured container port.
  • CCE Turbo clusters support only cluster-level service affinity.
-
-

Adding a Service When Creating a Workload

You can set the access type when creating a workload on the CCE console. An Nginx workload is used as an example.

-
  1. In the Set Application Access step of Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet, click Add Service and set the following parameters:

    • Access Type: Select NodePort.

      If you want to use an EIP to access a NodePort Service through public networks, bind an EIP to the node in the cluster in advance.

      -
      -
    • Service Name: Specify a Service name, which can be the same as the workload name.
    • Service Affinity: For details, see externalTrafficPolicy (Service Affinity).
      • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
      • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
      -
    • Port Settings
      • Protocol: protocol used by the Service.
      • Container Port: port on which the workload in the container image listens. The value ranges from 1 to 65535.
      • Access Port: node port (with a private IP address) to which the container port will be mapped. You are advised to select Automatically generated.
        • Automatically generated: The system automatically assigns a port number.
        • Specified port: You have to manually specify a fixed node port number in the range of 30000–32767. Ensure that the port is unique in a cluster.
        -
      -
    -

  2. After the configuration is complete, click OK.
  3. Click Next: Configure Advanced Settings. On the page displayed, click Create.
  4. Click View Deployment Details or View StatefulSet Details. On the Services tab page, obtain the access address, for example, 192.168.0.160:30358.
-
-

Adding a Service After Creating a Workload

You can set the Service after creating a workload. This has no impact on the workload status and takes effect immediately. The procedure is as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments. On the workload list, click the name of the workload for which you will create a Service.

    If the Service is associated with an ingress, the ingress is unavailable after the port information of the Service is updated. In this case, you need to delete and recreate the Service.

    -
    -

  2. On the Services tab page, click Add Service.
  3. On the Create Service page, select NodePort from the Access Type drop-down list.

    If you want to use an EIP to access a NodePort Service through public networks, bind an EIP to the node in the cluster in advance.

    -
    -

  4. Set node access parameters.

    • Service Name: Service name, which can be the same as the workload name.
    • Cluster Name: name of the cluster where the workload runs. The value is inherited from the workload creation page and cannot be changed.
    • Namespace: namespace where the workload is located. The value is inherited from the workload creation page and cannot be changed.
    • Workload: workload for which you want to add a Service. The value is inherited from the workload creation page and cannot be changed.
    • Service Affinity
      • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
      • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
      -
    • Port Settings
      • Protocol: protocol used by the Service.
      • Container Port: port on which the workload in the container image listens. The Nginx workload listens on port 80.
      • Access Port: node port (with a private IP address) to which the container port will be mapped. You are advised to select Automatically generated.
        • Automatically generated: The system automatically assigns a port number.
        • Specified port: You have to manually specify a fixed node port number in the range of 30000–32767. Ensure that the port is unique in a cluster.
        -
      -
    -

  5. Click Create. A NodePort Service will be added for the workload.
-
-

Using kubectl

You can run kubectl commands to set the access type. This section uses a Nginx workload as an example to describe how to set a NodePort Service using kubectl.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the nginx-deployment.yaml and nginx-nodeport-svc.yaml files.

    The file names are user-defined. nginx-deployment.yaml and nginx-nodeport-svc.yaml are merely example file names.

    -

    vi nginx-deployment.yaml

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: nginx
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      containers:
    -      - image: nginx:latest
    -        name: nginx
    -      imagePullSecrets:
    -      - name: default-secret
    -

    vi nginx-nodeport-svc.yaml

    -
    apiVersion: v1
    -kind: Service
    -metadata:
    -  labels:
    -    app: nginx
    -  name: nginx-nodeport
    -spec:
    -  ports:
    -  - name: service
    -    nodePort: 30000     # Node port. The value ranges from 30000 to 32767.
    -    port: 8080          # Port for accessing a Service.
    -    protocol: TCP       # Protocol used for accessing a Service. The value can be TCP or UDP.
    -    targetPort: 80      # Port used by a Service to access the target container. This port is closely related to the applications running in a container. In this example, the Nginx image uses port 80 by default.
    -  selector:             # Label selector. A Service selects a pod based on the label and forwards the requests for accessing the Service to the pod. In this example, select the pod with the app:nginx label.
    -    app: nginx
    -  type: NodePort        # Service type. NodePort indicates that the Service is accessed through a node port.
    -

  3. Create a workload.

    kubectl create -f nginx-deployment.yaml

    -

    If information similar to the following is displayed, the workload has been created.

    -
    deployment "nginx" created
    -

    kubectl get po

    -

    If information similar to the following is displayed, the workload is running.

    -
    NAME                     READY     STATUS             RESTARTS   AGE
    -nginx-2601814895-qhxqv   1/1       Running            0          9s
    -

  4. Create a Service.

    kubectl create -f nginx-nodeport-svc.yaml

    -

    If information similar to the following is displayed, the Service is being created.

    -
    service "nginx-nodeport" created
    -

    kubectl get svc

    -

    If information similar to the following is displayed, the Service has been created.

    -
    # kubectl get svc
    -NAME             TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE
    -kubernetes       ClusterIP   10.247.0.1     <none>        443/TCP          4d8h
    -nginx-nodeport   NodePort    10.247.30.40   <none>        8080:30000/TCP   18s
    -

  5. Access the Service.

    By default, a NodePort Service can be accessed by using Any node IP address:Node port.

    -

    The Service can be accessed from a node in another cluster in the same VPC or in another pod in the cluster. If a public IP address is bound to the node, you can also use the public IP address to access the Service. Create a container in the cluster and access the container by using Node IP address:Node port.

    -
    # kubectl get node -owide
    -NAME           STATUS   ROLES    AGE    INTERNAL-IP    EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
    -10.100.0.136   Ready    <none>   152m   10.100.0.136   <none>        CentOS Linux 7 (Core)   3.10.0-1160.25.1.el7.x86_64   docker://18.9.0
    -10.100.0.5     Ready    <none>   152m   10.100.0.5     <none>        CentOS Linux 7 (Core)   3.10.0-1160.25.1.el7.x86_64   docker://18.9.0
    -# kubectl run -i --tty --image nginx:alpine test --rm /bin/sh
    -If you don't see a command prompt, try pressing enter.
    -/ # curl 10.100.0.136:30000
    -<!DOCTYPE html>
    -<html>
    -<head>
    -<title>Welcome to nginx!</title>
    -<style>
    -    body {
    -        width: 35em;
    -        margin: 0 auto;
    -        font-family: Tahoma, Verdana, Arial, sans-serif;
    -    }
    -</style>
    -</head>
    -<body>
    -<h1>Welcome to nginx!</h1>
    -<p>If you see this page, the nginx web server is successfully installed and
    -working. Further configuration is required.</p>
    -
    -<p>For online documentation and support please refer to
    -<a href="http://nginx.org/">nginx.org</a>.<br/>
    -Commercial support is available at
    -<a href="http://nginx.com/">nginx.com</a>.</p>
    -
    -<p><em>Thank you for using nginx.</em></p>
    -</body>
    -</html>
    -/ # 
    -

-
-

externalTrafficPolicy (Service Affinity)

For a NodePort Service, requests are first sent to the node port, then the Service, and finally the pod backing the Service. The backing pod may be not located in the node receiving the requests. By default, the backend workload can be accessed from any node IP address and service port. If the pod is not on the node that receives the request, the request will be redirected to the node where the pod is located, which may cause performance loss.

-

externalTrafficPolicy is a configuration parameter of the Service.

-
apiVersion: v1
-kind: Service
-metadata:
-  name: nginx-nodeport
-spec:
-  externalTrafficPolicy: local
-  ports:
-  - name: service
-    nodePort: 30000
-    port: 80
-    protocol: TCP
-    targetPort: 80
-  selector:
-    app: nginx
-  type: NodePort
-

If the value of externalTrafficPolicy is local, requests sent from Node IP address:Service port will be forwarded only to the pod on the local node. If the node does not have a pod, the requests are suspended.

-

The other value of externalTrafficPolicy is cluster (default value), which indicates that requests are forwarded in a cluster.

-

You can set this parameter when creating a Service of the NodePort type on the CCE console.

-

-

The values of externalTrafficPolicy are as follows:

-
  • cluster: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
  • local: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0143.html b/docs/cce/umn/cce_01_0143.html deleted file mode 100644 index 2fc7af47..00000000 --- a/docs/cce/umn/cce_01_0143.html +++ /dev/null @@ -1,21 +0,0 @@ - - -

My Charts

-
-
- - - -
- diff --git a/docs/cce/umn/cce_01_0144.html b/docs/cce/umn/cce_01_0144.html deleted file mode 100644 index 509a9a85..00000000 --- a/docs/cce/umn/cce_01_0144.html +++ /dev/null @@ -1,81 +0,0 @@ - - -

Preparing a Chart

-

You can prepare a chart using one of the following methods:

- -

Customizing a Chart

  1. Customize the content of a chart as required.

    For details about how to create a chart, see https://helm.sh/docs/chart_template_guide/getting_started/.

    -

  2. Set the chart directory structure and name the chart based on the requirements defined in Chart Specifications.
-
-

Using a Kubernetes Official Chart

  1. Visit https://artifacthub.io/ to obtain the required chart.
  2. Log in to a Linux host.
  3. Upload the chart obtained in 1.
  4. Run the following command to compress the chart.

    • If the Helm client is not installed on the Linux host, run the following command:

      tar pzcf {name}-{version}.tgz {name}/

      -

      In the preceding command,

      -

      {name} indicates the actual chart name.

      -

      {version} indicates the actual chart version.

      -

      The values of {name} and {version} must be the same as the values of name and version in the Chart.yaml file in the chart.

      -
      -
    • If the Helm client is installed on the Linux host, run the following command:

      helm package {name}/

      -

      In the preceding command, replace {name} with the actual chart name.

      -
    -

  5. Set the chart directory structure and name the chart based on the requirements defined in Chart Specifications.
-
-

Chart Specifications

This section uses the redis chart as an example to illustrate the chart specifications.

-
  • Naming Requirement

    A chart package is named in the format of {name}-{version}.tgz, where {version} indicates the version number in the format of Major version number.Minor version number.Revision number, for example, redis-0.4.2.tgz.

    -

    The chart name {name} can contain a maximum of 64 characters.

    -

    The version number must comply with the semantic versioning rules.

    -
    • The main and minor version numbers are mandatory, and the revision number is optional.
    • The major and minor version numbers and revision number must be integers, greater than or equal to 0, and less than or equal to 99.
    -
    -
  • Directory Structure

    The directory structure of a chart is as follows:

    -
    redis/
    -  templates/
    -  values.yaml
    -  README.md
    -  Chart.yaml
    -  .helmignore
    -
    As listed in Table 1, the parameters marked with * are mandatory. -
    - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters in the directory structure of a chart

    Parameter

    -

    Description

    -

    * templates

    -

    Stores all templates.

    -

    * values.yaml

    -

    Describes configuration parameters required by templates.

    -
    NOTICE:

    Make sure that the image address set in the values.yaml file is the same as the image address in the container image repository. Otherwise, an exception occurs when you create a workload, and the system displays a message indicating that the image fails to be pulled.

    -

    To obtain the image address, perform the following operations: Log in to the CCE console. In the navigation pane, choose Image Repository to access the SWR console. Choose My Images > Private Images and click the name of the uploaded image. On the Image Tags tab page, obtain the image address from the pull command. You can click to copy the command in the Image Pull Command column.

    -
    -

    README.md

    -

    A markdown file, including:

    -
    • The workload or services provided by the chart.
    • Prerequisites for running the chart.
    • Configurations in the values.yaml file.
    • Information about chart installation and configuration.
    -

    * Chart.yaml

    -

    Basic information about the chart.

    -

    .helmignore

    -

    Files or data that does not need to read templates during workload installation.

    -
    -
    -
    -
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0145.html b/docs/cce/umn/cce_01_0145.html deleted file mode 100644 index 942cc1d3..00000000 --- a/docs/cce/umn/cce_01_0145.html +++ /dev/null @@ -1,49 +0,0 @@ - - -

Uploading a Chart

-

Scenario

Upload a chart to Charts > Uploaded Charts for subsequent workload creation.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Charts > Uploaded Charts and click Upload Chart.
  2. Click Select File, select the chart to be uploaded, and click Upload.

    When you upload a chart, the naming rule of the OBS bucket is changed from cce-charts-{region}-{domain_name} to cce-charts-{region}-{domain_id}. In the old naming rule, the system converts the domain_name value into a Base64 string and uses the first 63 characters. If you cannot find the chart in the OBS bucket with the new name, search for the bucket with the old name.

    -
    -

-
-

Related Operations

After a chart is created, you can perform operations listed in Table 1 on the Uploaded Charts page.

- -
- - - - - - - - - - - - - - - - -
Table 1 Related operations

Operation

-

Description

-

Installing a chart

-

Click Install Chart to install the chart for creating workloads. For details, see Creating a Workload from a Chart.

-

Updating a chart

-

The chart content will be updated while the chart version remains unchanged. The procedure is similar to that of uploading a chart.

-

Downloading a chart

-

Click More > Download to download the chart to the local host.

-

Deleting a chart

-

Click More > Delete to delete the installed chart.

-

Deleted charts cannot be restored. Exercise caution when performing this operation.

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0146.html b/docs/cce/umn/cce_01_0146.html deleted file mode 100644 index b8577983..00000000 --- a/docs/cce/umn/cce_01_0146.html +++ /dev/null @@ -1,58 +0,0 @@ - - -

Creating a Workload from a Chart

-

Creating a Chart-based Workload

  1. Log in to the CCE console. In the navigation pane, choose Charts > Uploaded Chart.
  2. In the list of uploaded charts, click Install.
  3. Set the installation parameters listed in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - - - - -
    Table 1 Installation parameters

    Parameter

    -

    Description

    -

    * Release Name

    -

    Unique name of the chart release.

    -

    * Chart Version

    -

    Chart version by default.

    -

    * Cluster

    -

    Cluster where the workload will be deployed.

    -

    * Namespace

    -

    Namespace to which the workload will be deployed.

    -

    Advanced Settings

    -

    You can import and replace the values.yaml file or directly edit the chart parameters online.

    -
    NOTE:

    An imported values.yaml file must comply with YAML specifications, that is, KEY:VALUE format. The fields in the file are not restricted.

    -

    The key value of the imported values.yaml must be the same as that of the selected chart package. Otherwise, the values.yaml does not take effect. That is, the key cannot be changed.

    -
    -
    1. Click Import Configuration File.
    2. Select the corresponding values.yaml file and click Open.
    -
    -
    -

  4. After the configuration is complete, click Next.
  5. Confirm the configuration and click Submit.
  6. Click Back to Release List to view the running status of the chart-based workload (also called release), or click View Release Details to view details about the release.
-
-

Upgrading a Chart-based Workload

  1. Log in to the CCE console. In the navigation pane, choose Charts > Uploaded Charts. Click the Template Instances tab.
  2. Click Upgrade in the row where the desired workload resides and set the parameters for the workload.
  3. Select a chart version for Chart Version.
  4. Follow the prompts to modify the chart parameters. Click Upgrade, and then click Submit.
  5. Click Back to Release List. If the chart status changes to Upgrade successful, the workload is successfully upgraded.
-
-

Rolling Back a Chart-based Workload

  1. Log in to the CCE console. In the navigation pane, choose Charts > Uploaded Charts. Click the Template Instances tab.
  2. Click More > Roll Back for the workload to be rolled back, select the workload version, and click Roll back to this version.

    In the workload list, if the status is Rollback successful, the workload is rolled back successfully.

    -

-
-

Uninstalling a Chart-based Workload

  1. Log in to the CCE console. In the navigation pane, choose Charts > Uploaded Charts. Click the Template Instances tab.
  2. Click More > Uninstall next to the release to be uninstalled, and click Yes. Exercise caution when performing this operation because releases cannot be restored after being uninstalled.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0149.html b/docs/cce/umn/cce_01_0149.html deleted file mode 100644 index 99035f48..00000000 --- a/docs/cce/umn/cce_01_0149.html +++ /dev/null @@ -1,15 +0,0 @@ - - -

Affinity and Anti-Affinity Scheduling

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0150.html b/docs/cce/umn/cce_01_0150.html deleted file mode 100644 index 8de85033..00000000 --- a/docs/cce/umn/cce_01_0150.html +++ /dev/null @@ -1,236 +0,0 @@ - - -

Creating a Job

-

Scenario

Jobs are short-lived and run for a certain time to completion. They can be executed immediately after being deployed. It is completed after it exits normally (exit 0).

-

A job is a resource object that is used to control batch tasks. It is different from a long-term servo workload (such as Deployment and StatefulSet).

-

A job is started and terminated at specific times, while a long-term servo workload runs unceasingly unless being terminated. The pods managed by a job automatically exit after successfully completing the job based on user configurations. The success flag varies according to the spec.completions policy.

-
  • One-off jobs: A single pod runs once until successful termination.
  • Jobs with a fixed success count: N pods run until successful termination.
  • A queue job is considered completed based on the global success confirmed by the application.
-
-

Prerequisites

Resources have been created. For details, see Creating a Node. If clusters and nodes are available, you need not create them again.

- -
-

Procedure

  1. (Optional) If you use a private container image to create your job, upload the container image to the image repository.

    -

  2. Log in to the CCE console. In the navigation pane, choose Workloads > Jobs. Click Create Job.
  3. Configure the basic job information listed in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - - - - -
    Table 1 Basic job information

    Parameter

    -

    Description

    -

    * Job Name

    -

    Name of a new job. The name must be unique.

    -

    Enter 4 to 63 characters starting with a lowercase letter and ending with a letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    * Cluster

    -

    Cluster to which a new job belongs.

    -

    * Namespace

    -

    Namespace to which the new job belongs. By default, this parameter is set to default.

    -

    *Instances

    -

    Number of pods in this job. A job can have one or more pods. You can specify the number of pods. The default value is 1.

    -

    Each job pod consists of the same containers. Configuring multiple job pods can ensure high availability. The job can continue to run even if one of the pods is faulty.

    -

    Description

    -

    Description of a job.

    -
    -
    -

  4. Click Next: Add Container to add a container and an image.

    1. Click Select Container Image to select the image to be deployed.
      • My Images: displays all image repositories you created.
      • Third-Party Images: Create a job using an image from any third-party image repository. When you create a job using a third-party image, ensure that the node where the job is running can access public networks. For details about how to use a third-party image, see Using a Third-Party Image.
        • If your image repository does not require authentication, set Secret Authentication to No, enter an image address in Image Address, and then click OK.
        • If your image repository must be authenticated (account and password), you need to create a secret and then use a third-party image. For details, see Using a Third-Party Image.
        -
      • Shared Images: The images shared by other tenants using the SWR service are displayed here. You can create workloads based on the shared images.
      -
    2. Set image parameters. -
      - - - - - - - - - - - - - - - - -
      Table 2 Image parameters

      Parameter

      -

      Description

      -

      Image

      -

      Name of the image. You can click Change Image to update it.

      -

      *Image Version

      -

      Select the image tag to be deployed.

      -

      *Container Name

      -

      Name of the container. You can modify it.

      -

      Container Resources

      -

      CPU

      -
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      -

      Memory

      -
      • Request: minimum amount of memory required by a container. The default value is 0.5 GiB.
      • Limit: maximum amount of memory available for a container. When memory usage exceeds the specified memory limit, the container will be terminated.
      -

      For more information about Request and Limit, see Setting Container Specifications.

      -

      GPU: configurable only when the cluster contains GPU nodes.

      -

      It indicates the percentage of GPU resources reserved for a container. Select Use and set the percentage. For example, if this parameter is set to 10%, the container is allowed to use 10% of GPU resources. If you do not select Use or set this parameter to 0, no GPU resources can be used.

      -

      GPU/Graphics Card: The workload's pods will be scheduled to the node with the specified GPU.

      -

      If Any GPU type is selected, the container uses a random GPU in the node. If you select a specific GPU, the container uses that GPU accordingly.

      -
      -
      -
    3. (Optional) Configure advanced settings. -
      - - - - - - - - - - - - - - - - -
      Table 3 Advanced settings

      Parameter

      -

      Description

      -

      Lifecycle

      -
      Lifecycle scripts define the actions taken for container-related jobs when a lifecycle event occurs. -
      -

      Environment Variables

      -
      Environment variables can be added to a container. In general, environment variables are used to set parameters. On the Environment Variables tab page, click Add Environment Variable. Currently, environment variables can be added using any of the following methods:
      • Added manually: Set Variable Name and Variable Value/Reference.
      • Added from Secret: Set Variable Name and select the desired secret name and data. A secret must be created in advance. For details, see Creating a Secret.
      • Added from ConfigMap: Set Variable Name and select the desired ConfigMap name and data. A ConfigMap must be created in advance. For details, see Creating a ConfigMap.
      -
      -

      Data Storage

      -

      The local disk or cloud storage can be mounted to a container to implement persistent data file storage.

      -

      For details, see Storage (CSI).

      -

      Log Policies

      -

      Set a log policy and log path for collecting workload logs and preventing logs from being over-sized. For details, see Container Logs.

      -
      -
      -
    4. (Optional) One job pod contains one or more related containers. If your job contains multiple containers, click Add Container to add containers.
    -

  5. Click Create.

    If the status of the job is Executing, the job has been created successfully.

    -

-
-

Using kubectl

A job has the following configuration parameters:

-
  • spec.template: has the same schema as a pod.
  • RestartPolicy: can only be set to Never or OnFailure.
  • For a single-pod job, the job ends after the pod runs successfully by default.
  • .spec.completions: indicates the number of pods that need to run successfully to end a job. The default value is 1.
  • .spec.parallelism: indicates the number of pods that run concurrently. The default value is 1.
  • spec.backoffLimit: indicates the maximum number of retries performed if a pod fails. When the limit is reached, the pod will not try again.
  • .spec.activeDeadlineSeconds: indicates the running time of pods. Once the time is reached, all pods of the job are terminated. The priority of .spec.activeDeadlineSeconds is higher than that of .spec.backoffLimit. That is, if a job reaches the .spec.activeDeadlineSeconds, the spec.backoffLimit is ignored.
-

Based on the .spec.completions and .spec.Parallelism settings, jobs are classified into the following types.

- -
- - - - - - - - - - - - - - - - - - - - - -
Table 4 Job types

Job Type

-

Description

-

Example

-

One-off jobs

-

A single pod runs once until successful termination.

-

Database migration

-

Jobs with a fixed completion count

-

One pod runs until reaching the specified completions count.

-

Work queue processing pod

-

Parallel jobs with a fixed completion count

-

Multiple pods run until reaching the specified completions count.

-

Multiple pods for processing work queues concurrently

-

Parallel jobs

-

One or more pods run until successful termination.

-

Multiple pods for processing work queues concurrently

-
-
-

The following is an example job, which calculates π till the 2000th digit and prints the output.

-
apiVersion: batch/v1
-kind: Job
-metadata:
-  name: myjob
-spec:
-  completions: 50        # 50 pods need to be run to finish a job. In this example, π is printed for 50 times.
-  parallelism: 5        # 5 pods are run in parallel.
-  backoffLimit: 5        # The maximum number of retry times is 5.
-  template:
-    spec:
-      containers:
-      - name: pi
-        image: perl
-        command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
-      restartPolicy: Never
-

Description

-
  • apiVersion: batch/v1 indicates the version of the current job.
  • kind: Job indicates that the current resource is a job.
  • restartPolicy: Never indicates the current restart policy. For jobs, this parameter can only be set to Never or OnFailure. For other controllers (for example, Deployments), you can set this parameter to Always.
-

Run the job.

-
  1. Start the job.

    [root@k8s-master k8s]# kubectl apply -f myjob.yaml
    -job.batch/myjob created
    -

  2. View the job details.

    kubectl get job

    -
    [root@k8s-master k8s]# kubectl get job
    -NAME    COMPLETIONS   DURATION   AGE
    -myjob   50/50         23s        3m45s
    -

    If the value of COMPLETIONS is 50/50, the job is successfully executed.

    -

  3. Query the pod status.

    kubectl get pod

    -
    [root@k8s-master k8s]# kubectl get pod
    -NAME          READY   STATUS      RESTARTS   AGE
    -myjob-29qlw   0/1     Completed   0          4m5s
    -...
    -

    If the status is Completed, the job is complete.

    -

  4. View the pod logs.

    kubectl logs

    -
    # kubectl logs myjob-29qlw
    -3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632788659361533818279682303019520353018529689957736225994138912497217752834791315155748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035637076601047101819429555961989467678374494482553797747268471040475346462080466842590694912933136770289891521047521620569660240580381501935112533824300355876402474964732639141992726042699227967823547816360093417216412199245863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818347977535663698074265425278625518184175746728909777727938000816470600161452491921732172147723501414419735685481613611573525521334757418494684385233239073941433345477624168625189835694855620992192221842725502542568876717904946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886269456042419652850222106611863067442786220391949450471237137869609563643719172874677646575739624138908658326459958133904780275901
    -

-
-

Related Operations

After a one-off job is created, you can perform operations listed in Table 5.

- -
- - - - - - - - - - -
Table 5 Other operations

Operation

-

Description

-

Viewing a YAML

-

Click View YAML next to the job name to view the YAML file corresponding to the current job.

-

Deleting a one-off job

-
  1. Select the job to be deleted and click Delete in the Operation column.
  2. Click OK.

    Deleted jobs cannot be restored. Exercise caution when deleting a job.

    -
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0151.html b/docs/cce/umn/cce_01_0151.html deleted file mode 100644 index 27a7ac29..00000000 --- a/docs/cce/umn/cce_01_0151.html +++ /dev/null @@ -1,215 +0,0 @@ - - -

Creating a Cron Job

-

Scenario

A cron job runs on a repeating schedule. You can perform time synchronization for all active nodes at a fixed time point.

-
A cron job runs periodically at the specified time. It is similar with Linux crontab. A cron job has the following characteristics:
  • Runs only once at the specified time.
  • Runs periodically at the specified time.
-
-

The typical usage of a cron job is as follows:

-
  • Schedules jobs at the specified time.
  • Creates jobs to run periodically, for example, database backup and email sending.
-
-

Prerequisites

Resources have been created. For details, see Creating a Node. If clusters and nodes are available, you need not create them again.

-
-

Procedure

  1. (Optional) If you use a private container image to create your cron job, upload the container image to the image repository.

    -

  2. Log in to the CCE console. In the navigation pane, choose Workloads > Cron Jobs. Then, click Create Cron Job.
  3. Configure the basic cron job information listed in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - -
    Table 1 Basic cron job information

    Parameter

    -

    Description

    -

    * Job Name

    -

    Name of a new cron job. The name must be unique.

    -

    Enter 4 to 52 characters starting with a lowercase letter and ending with a letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    * Cluster

    -

    Cluster to which a new cron job belongs.

    -

    * Namespace

    -

    Namespace to which a cron job belongs. If you do not specify this parameter, the value default is used by default.

    -

    Description

    -

    Description of a cron job.

    -
    -
    -

  4. Click Next: Configure Timing Rule.
  5. Set the timing rule.

    -

    - - - - - - - - - - - - - -
    Table 2 Timing rule parameters

    Parameter

    -

    Description

    -

    * Concurrency Policy

    -

    The following policies are supported:

    -
    • Forbid: A new job cannot be created before the previous job is complete.
    • Allow: The cron job allows concurrently running jobs, which preempt cluster resources.
    • Replace: A new job replaces the previous job when it is time to create the job but the previous job is not complete.
    -

    * Schedule

    -

    Time when a new cron job is executed.

    -

    Job Records

    -

    You can set the number of jobs that are successfully executed or fail to be executed. Setting a limit to 0 corresponds to keeping none of the jobs after they finish.

    -
    -
    -

  6. Click Next: Add Container to add a container.

    1. Click Select Container Image to select the image to be deployed.
      • My Images: displays all image repositories you created.
      • Third-Party Images: Create a job using an image from any third-party image repository. When you create a job using a third-party image, ensure that the node where the job is running can access public networks. For details about how to use a third-party image, see Using a Third-Party Image.
        • If your image repository does not require authentication, set Secret Authentication to No, enter an image address in Image Address, and then click OK.
        • If your image repository must be authenticated (account and password), you need to create a secret and then use a third-party image. For details, see Using a Third-Party Image.
        -
      • Shared Images: The images shared by other tenants using the SWR service are displayed here. You can create workloads based on the shared images.
      -
    2. Set image parameters. -
      - - - - - - - - - - - - - - - - -
      Table 3 Image parameters

      Parameter

      -

      Description

      -

      Image

      -

      Name of the image. You can click Change Image to update it.

      -

      *Image Version

      -

      Select the image tag to be deployed.

      -

      *Container Name

      -

      Name of the container. You can modify it.

      -

      Container Resources

      -

      CPU

      -
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      -

      Memory

      -
      • Request: minimum amount of memory required by a container. The default value is 0.5 GiB.
      • Limit: maximum amount of memory available for a container. When memory usage exceeds the specified memory limit, the container will be terminated.
      -

      For more information about Request and Limit, see Setting Container Specifications.

      -

      GPU: configurable only when the cluster contains GPU nodes.

      -

      It indicates the percentage of GPU resources reserved for a container. Select Use and set the percentage. For example, if this parameter is set to 10%, the container is allowed to use 10% of GPU resources. If you do not select Use or set this parameter to 0, no GPU resources can be used.

      -

      GPU/Graphics Card: The workload's pods will be scheduled to the node with the specified GPU.

      -

      If Any GPU type is selected, the container uses a random GPU in the node. If you select a specific GPU, the container uses that GPU accordingly.

      -
      -
      -
    3. (Optional) Configure advanced settings. -
      - - - - - - - - - - -
      Table 4 Advanced settings

      Parameter

      -

      Description

      -

      Lifecycle

      -
      Actions defined in the lifecycle script definition are taken for the lifecycle events of container tasks. -
      -

      Environment Variables

      -
      Environment variables can be added to a container. In general, environment variables are used to set parameters. On the Environment Variables tab page, click Add Environment Variable. Currently, environment variables can be added using any of the following methods:
      • Added manually: Set Variable Name and Variable Value/Reference.
      • Added from Secret: Set Variable Name and select the desired secret name and data. A secret must be created in advance. For details, see Creating a Secret.
      • Added from ConfigMap: Set Variable Name and select the desired ConfigMap name and data. A ConfigMap must be created in advance. For details, see Creating a ConfigMap.
      -
      -
      -
      -
    4. (Optional) One job pod contains one or more related containers. If your cron job contains multiple containers, click Add Container to add containers.
    -

  7. Click Create.

    If the status is Started, the cron job has been created successfully.

    -

-
-

Using kubectl

A cron job has the following configuration parameters:

-
  • .spec.schedule: takes a Cron format string, for example, 0 * * * * or @hourly, as schedule time of jobs to be created and executed.
  • .spec.jobTemplate: specifies jobs to be run, and has the same schema as when you are Creating a Job Using kubectl.
  • .spec.startingDeadlineSeconds: specifies the deadline for starting a job.
  • .spec.concurrencyPolicy: specifies how to treat concurrent executions of a job created by the Cron job. The following options are supported:
    • Allow (default value): allows concurrently running jobs.
    • Forbid: forbids concurrent runs, skipping next run if previous has not finished yet.
    • Replace: cancels the currently running job and replaces it with a new one.
    -
-

The following is an example cron job, which is saved in the cronjob.yaml file.

-
apiVersion: batch/v1beta1
-kind: CronJob
-metadata:
-  name: hello
-spec:
-  schedule: "*/1 * * * *"
-  jobTemplate:
-    spec:
-      template:
-        spec:
-          containers:
-          - name: hello
-            image: busybox
-            args:
-            - /bin/sh
-            - -c
-            - date; echo Hello from the Kubernetes cluster
-          restartPolicy: OnFailure
-

Run the job.

-
  1. Create a cron job.

    kubectl create -f cronjob.yaml

    -

    Information similar to the following is displayed:

    -
    cronjob.batch/hello created
    -

  2. Query the running status of the cron job:

    kubectl get cronjob

    -
    NAME      SCHEDULE      SUSPEND   ACTIVE    LAST SCHEDULE   AGE
    -hello     */1 * * * *   False     0         <none>          9s
    -

    kubectl get jobs

    -
    NAME               COMPLETIONS   DURATION   AGE
    -hello-1597387980   1/1           27s        45s
    -

    kubectl get pod

    -
    NAME                           READY     STATUS      RESTARTS   AGE
    -hello-1597387980-tjv8f         0/1       Completed   0          114s
    -hello-1597388040-lckg9         0/1       Completed   0          39s
    -

    kubectl logs hello-1597387980-tjv8f

    -
    Fri Aug 14 06:56:31 UTC 2020
    -Hello from the Kubernetes cluster
    -

    kubectl delete cronjob hello

    -
    cronjob.batch "hello" deleted
    -

    When a cron job is deleted, the related jobs and pods are deleted too.

    -
    -

-
-

Related Operations

After a cron job is created, you can perform operations listed in Table 5.

- -
- - - - - - - - - - - - - -
Table 5 Other operations

Operation

-

Description

-

Editing a YAML file

-

Click More > View YAML next to the cron job name to view the YAML file of the current job.

-

Stopping a cron job

-
  1. Select the job to be stopped and click Stop in the Operation column.
  2. Click OK.
-

Deleting a cron job

-
  1. Select the cron job to be deleted and click More > Delete in the Operation column.
  2. Click OK.

    Deleted jobs cannot be restored. Therefore, exercise caution when deleting a job.

    -
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0152.html b/docs/cce/umn/cce_01_0152.html deleted file mode 100644 index 16644290..00000000 --- a/docs/cce/umn/cce_01_0152.html +++ /dev/null @@ -1,123 +0,0 @@ - - -

Creating a ConfigMap

-

Scenario

A ConfigMap is a type of resource that stores configuration information required by a workload. Its content is user-defined. After creating ConfigMaps, you can use them as files or environment variables in a containerized workload.

-

ConfigMaps allow you to decouple configuration files from container images to enhance the portability of containerized workloads.

-

Benefits of ConfigMaps:

-
  • Manage configurations of different environments and services.
  • Deploy workloads in different environments. Multiple versions are supported for configuration files so that you can update and roll back workloads easily.
  • Quickly import configurations in the form of files to containers.
-
-

Prerequisites

Cluster and node resources have been created. For more information, see Creating a CCE Cluster. If you have available clusters and node resources, skip this operation.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Configuration Center > ConfigMaps. Click Create ConfigMap.
  2. You can create a ConfigMap directly or based on YAML. If you create a ConfigMap based on YAML, go to 4.
  3. Method 1: Create a ConfigMap directly.

    Set the parameters by referring to Table 1. -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating a ConfigMap

    Parameter

    -

    Description

    -

    Name

    -

    Name of a ConfigMap, which must be unique in a namespace.

    -

    Cluster

    -

    Cluster that will use the ConfigMap you create.

    -

    Namespace

    -

    Namespace to which the ConfigMap belongs. If you do not specify this parameter, the value default is used by default.

    -

    Description

    -

    Description of the ConfigMap.

    -

    Data

    -

    The workload configuration data can be used in a container or used to store the configuration data. Key indicates a file name. Value indicates the content in the file.

    -
    1. Click Add Data.
    2. Set Key and Value.
    -

    Labels

    -

    Labels are attached to objects such as workloads, nodes, and Services in key-value pairs.

    -

    Labels define the identifiable attributes of these objects and are used to manage and select the objects.

    -
    1. Click Add Label.
    2. Set Key and Value.
    -
    -
    -
    -

  4. Method 2: Create a ConfigMap based on YAML.

    To create ConfigMaps by uploading a file, ensure that the resource description file has been created. CCE supports files in YAML format. For more information, see ConfigMap Requirements.

    -
    -
    Click Create YAML on the right of the page.
    • Method 1: Import the orchestration file.

      Click Add File to import the file in YAML format. The orchestration content can be directly displayed.

      -
    • Method 2: Directly orchestrate the content.

      In the orchestration content area, enter the content of the YAML file.

      -
    -
    -

  5. After the configuration is complete, click Create.

    The new ConfigMap is displayed in the ConfigMap list.

    -

-
-

ConfigMap Requirements

A ConfigMap resource file must be in YAML format, and the file size cannot exceed 2 MB.

-
The file name is configmap.yaml and the following shows a configuration example.
apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: test-configmap
-data:
-  data-1: value-1
-  data-2: value-2
-
-
-

Creating a ConfigMap Using kubectl

  1. Configure the kubectl command to connect an ECS to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the cce-configmap.yaml file.

    vi cce-configmap.yaml

    -
    apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  name: cce-configmap
    -data:
    -  SPECIAL_LEVEL: Hello
    -  SPECIAL_TYPE: CCE
    -

  3. Run the following commands to create a ConfigMap.

    kubectl create -f cce-configmap.yaml

    -

    kubectl get cm

    -
    NAME               DATA            AGE
    -cce-configmap      3               3h
    -cce-configmap1     3               7m
    -

-
-

Related Operations

After creating a configuration item, you can update or delete it as described in Table 2. -
- - - - - - - - - - - - - -
Table 2 Related operations

Operation

-

Description

-

Viewing a YAML file

-

Click View YAML next to the ConfigMap name to view the YAML file corresponding to the current ConfigMap.

-

Updating a ConfigMap

-
  1. Select the name of the ConfigMap to be updated and click Update.
  2. Modify the secret data. For more information, see Table 1.
  3. Click Update.
-

Deleting a ConfigMap

-

Select the configuration you want to delete and click Delete.

-

Follow the prompts to delete the ConfigMap.

-
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0153.html b/docs/cce/umn/cce_01_0153.html deleted file mode 100644 index c9e1b4e0..00000000 --- a/docs/cce/umn/cce_01_0153.html +++ /dev/null @@ -1,145 +0,0 @@ - - -

Creating a Secret

-

Scenario

A secret is a type of resource that holds sensitive data, such as authentication and key information. Its content is user-defined. After creating secrets, you can use them as files or environment variables in a containerized workload.

-
-

Prerequisites

Cluster and node resources have been created. For more information, see Creating a CCE Cluster. If you have available clusters and node resources, skip this operation.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Configuration Center > Secrets. Click Create Secret.
  2. You can create a secret directly or based on YAML. If you want to create a secret based on YAML, go to 4.
  3. Method 1: Create a secret directly.

    Set the basic information by referring to Table 1. -
    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating a secret

    Parameter

    -

    Description

    -

    Name

    -

    Name of the secret you create, which must be unique.

    -

    Cluster

    -

    Cluster that will use the secret you create.

    -

    Namespace

    -

    Namespace to which the secret belongs. If you do not specify this parameter, the value default is used by default.

    -

    Description

    -

    Description of a secret.

    -

    Type

    -

    Type of the secret you create.

    -
    • Opaque: common secret.
    • kubernetes.io/dockerconfigjson: a secret that stores the authentication information required for pulling images from a private repository.
    • IngressTLS: a secret that stores the certificate required by ingresses (layer-7 load balancing Services).
    • Other: another type of secret, which is specified manually.
    -

    Secret Data

    -

    Workload secret data can be used in containers.

    -
    • If the secret is of the Opaque type:
      1. Click Add Data.
      2. Enter the key and value. The value must be based on the Base64 coding method. For details about the method, see Base64 Encoding.
      -
    • If the secret type is kubernetes.io/dockerconfigjson, enter the account and password of the private image repository.
    • If the secret type is IngressTLS, upload the certificate file and private key file.
      NOTE:
      • A certificate is a self-signed or CA-signed credential used for identity authentication.
      • A certificate request is a request for a signature with a private key.
      -
      -
    -

    Secret Label

    -

    Labels are attached to objects such as workloads, nodes, and Services in key-value pairs.

    -

    Labels define the identifiable attributes of these objects and are used to manage and select the objects.

    -
    1. Click Add Label.
    2. Enter the key and value.
    -
    -
    -
    -

  4. Method 2: Create a secret based on the YAML file.

    To create a resource by uploading a file, ensure that the resource description file has been created. CCE supports files in JSON or YAML format. For more information, see Secret Resource File Configuration.

    -
    -
    You can import or directly write the file content in YAML or JSON format.
    • Method 1: Import the orchestration file.

      Click Add File to import the file in YAML or JSON format. The orchestration content can be directly displayed.

      -
    • Method 2: Directly orchestrate the content.

      In the orchestration content area, enter the content of the YAML or JSON file.

      -
    -
    -

  5. After the configuration is complete, click Create.

    The new secret is displayed in the key list.

    -

-
-

Secret Resource File Configuration

This section describes configuration examples of secret resource description files.

-

For example, you can retrieve the username and password for a workload through a secret.

-
  • YAML format

    The secret.yaml file is defined as shown below. The value must be based on the Base64 coding method. For details about the method, see Base64 Encoding.

    -
    apiVersion: v1
    -kind: Secret
    -metadata:
    -  name: mysecret           #Secret name
    -  namespace: default       #Namespace. The default value is default.
    -data:
    -  username: ******  #The value must be Base64-encoded.
    -  password: ******  #The value must be encoded using Base64.
    -type: Opaque     #You are advised not to change this parameter value.
    -
-
-

Creating a Secret Using kubectl

  1. According to Connecting to a Cluster Using kubectl, configure the kubectl command to connect an ECS to the cluster.
  2. Create and edit the Base64-encoded cce-secret.yaml file.

    # echo -n "content to be encoded" | base64
    -******
    -

    vi cce-secret.yaml

    -
    apiVersion: v1
    -kind: Secret
    -metadata:
    -  name: mysecret
    -type: Opaque
    -data:
    -  username: ******
    -  password: ******
    -

  3. Create a secret.

    kubectl create -f cce-secret.yaml

    -

    You can query the secret after creation.

    -

    kubectl get secret

    -

-
-

Related Operations

After creating a secret, you can update or delete it as described in Table 2.

The secret list contains system secret resources that can be queried only. The system secret resources cannot be updated or deleted.

-
- -
- - - - - - - - - - - - - - - - -
Table 2 Related Operations

Operation

-

Description

-

Viewing a YAML file

-

Click View YAML next to the secret name to view the YAML file corresponding to the current secret.

-

Updating a secret

-
  1. Select the name of the secret to be updated and click Update.
  2. Modify the secret data. For more information, see Table 1.
  3. Click Update.
-

Deleting a secret

-

Select the secret you want to delete and click Delete.

-

Follow the prompts to delete the secret.

-

Deleting secrets in batches

-
  1. Select the secrets to be deleted.
  2. Click Delete above the secret list.
  3. Follow the prompts to delete the secrets.
-
-
-
-
-

Base64 Encoding

To encrypt a character string using Base64, run the echo -n to-be-encoded content | base64 command. The following is an example.

-
root@ubuntu:~# echo -n "content to be encoded" | base64
-******
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0154.html b/docs/cce/umn/cce_01_0154.html deleted file mode 100644 index 26b3e3f6..00000000 --- a/docs/cce/umn/cce_01_0154.html +++ /dev/null @@ -1,146 +0,0 @@ - - -

autoscaler

-

Introduction

Autoscaler is an important Kubernetes controller. It supports microservice scaling and is key to serverless design.

-

When the CPU or memory usage of a microservice is too high, horizontal pod autoscaling is triggered to add pods to reduce the load. These pods can be automatically reduced when the load is low, allowing the microservice to run as efficiently as possible.

-

CCE simplifies the creation, upgrade, and manual scaling of Kubernetes clusters, in which traffic loads change over time. To balance resource usage and workload performance of nodes, Kubernetes introduces the autoscaler add-on to automatically resize a cluster based on the resource usage required for workloads deployed in the cluster. For details, see Creating a Node Scaling Policy.

-

Open source community: https://github.com/kubernetes/autoscaler

-
-

How the Add-on Works

autoscaler controls auto scale-out and scale-in.

-
  • Auto scale-out

    If pods in a cluster cannot be scheduled due to insufficient worker nodes, cluster scaling is triggered to add nodes. The nodes to be added have the same specification as configured for the node pool to which the nodes belong. For details, see Creating a Node Scaling Policy.

    -
    The add-on follows the "No Less, No More" policy. For example, if three cores are required for creating a pod and the system supports four-core and eight-core nodes, autoscaler will preferentially create a four-core node.

    Auto scale-out will be performed when:

    -
    • Node resources are insufficient.
    • No node affinity policy is set in the pod scheduling configuration. That is, if a node has been configured as an affinity node for pods, no node will not be automatically added when pods cannot be scheduled. For details about how to configure the node affinity policy, see Node Affinity.
    -
    -
    -
  • Auto scale-in

    When a cluster node is idle for a period of time (10 minutes by default), cluster scale-in is triggered, and the node is automatically deleted. However, a node cannot be deleted from a cluster if the following pods exist:

    -
    • Pods that do not meet specific requirements set in PodDisruptionBudget
    • Pods that cannot be scheduled to other nodes due to constraints such as affinity and anti-affinity policies
    • Pods that have the cluster-autoscaler.kubernetes.io/safe-to-evict: 'false' annotation
    • Pods (except those created by kube-system DaemonSet) that exist in the kube-system namespace on the node
    • Pods that are not created by the controller (Deployment/ReplicaSet/job/StatefulSet)
    -
-
-

Notes and Constraints

  • Only clusters of v1.9.10-r2 and later support autoscaler.
  • Ensure that there are sufficient resources for installing the add-on.
-
-

Installing the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Marketplace tab page, click Install Add-on under autoscaler.
  2. On the Install Add-on page, select the cluster and the add-on version, and click Next: Configuration.
  3. Configure add-on installation parameters listed in Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Basic settings

    Parameter

    -

    Add-on Version

    -

    Description

    -

    Add-on Specifications

    -

    Available in all versions

    -

    The add-on can be deployed in the following specifications:

    -
    • Single: The add-on is deployed with only one pod.
    • HA50: The add-on is deployed with two pods, serving a cluster with 50 nodes and ensuring high availability.
    • HA200: The add-on is deployed with two pods, serving a cluster with 50 nodes and ensuring high availability. Each pod uses more resources than those of the HA50 specification.
    • Custom: You can customize the number of pods and specifications as required.
    -

    Instances

    -

    Available in all versions

    -

    Number of pods that will be created to match the selected add-on specifications. The number cannot be modified.

    -

    Container

    -

    Available in all versions

    -

    CPU and memory quotas of the container allowed for the selected add-on specifications. The quotas cannot be modified.

    -

    Login Mode

    -

    Available only in certain versions

    -

    Select a login mode for the worker nodes to be added during auto scale-up.

    -

    If you select Key pair:

    -

    Key pair: Select an existing key pair or create a new one for identity authentication during remote login to the added nodes.

    -

    Auto Scale-In

    -

    Available in all versions

    -

    Off: Auto scale-down is not allowed. Only auto scale-up is allowed.

    -

    On: Enable auto scale-in. The scale-in policy is valid for node pools in the cluster with auto scaling enabled.

    -
    • Idle Time (min): Time for which a node should be unneeded before it is eligible for scale-down. Default value: 10 minutes.
    • Resource Usage: If the percentage of both CPU and memory usage on a node is below this threshold, auto scale-down will be triggered to delete the node from the cluster. The default value is 0.5, which means 50%.
    • Scale-in Cooldown After Scale-out: The time after scale-up that the scale-down evaluation will resume. Default value: 10 minutes.
      NOTE:

      If both auto scale-out and scale-in exist in a cluster, you are advised to set Scale-in Cooldown After Scale-out to 0 minutes. This can prevent the node scale-in from being blocked due to continuous scale-out of some node pools or retries upon a scale-out failure, resulting in unexpected waste of node resources.

      -
      -
    • Scale-in Cooldown After Node Deletion: The time after node deletion that the scale-down evaluation will resume. Default value: 10 minutes.
    • Scale-in Cooldown After Failure: The time after a scale-down failure that the scale-down evaluation will resume. Default value: 3 minutes. For details about the impact and relationship between the scale-in cooling intervals configured in the node pool and autoscaler, see Scale-in Cooling Interval.
    • Max empty bulk delete: The maximum number of empty nodes that can be deleted at the same time. Default value: 10.
    • Node Recheck Timeout: The timeout before autoscaler checks again the node that could not be previously removed. Default value: 5 minutes.
    -

    Node Pool Configuration

    -

    Available only in certain versions

    -

    Configuration of the default node pool. A node pool is a group of compute nodes with the same node type (VM or BMS), specifications, and labels. When a cluster needs to be scaled up, autoscaler will automatically add nodes from node pools to the cluster. If no custom node pool is available, autoscaler will use the default node pool.

    -

    Click Add Node Pool Configuration and set the following parameters:

    -
    • AZ: A physical region where resources use independent power supplies and networks. AZs are physically isolated but interconnected through the internal network.
    • OS: OS of the nodes to be created.
    • Taints: No taints are added by default.
      Taints allow nodes to repel a set of pods. You can add a maximum of 10 taints for each node pool. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
      -
      NOTICE:
      • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.
      • Taints cannot be modified after configuration. Incorrect taints may cause a scale-up failure or prevent pods from being scheduled onto the added nodes.
      -
      -
      -
    • Resource Tags: Resource tags can be added to classify resources.
      NOTE:

      You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use these tags to improve tagging and resource migration efficiency.

      -
      -
    • Specifications: CPU and memory of the added nodes.
    -
    -
    -

    To configure more add-on parameters, click Advanced Settings at the bottom of this page.

    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Advanced settings

    Parameter

    -

    Add-on Version

    -

    Description

    -

    Total Nodes

    -

    Available in all versions

    -

    Maximum number of nodes that can be managed by the cluster, within which cluster scale-out is performed.

    -

    Total Cores

    -

    Available in all versions

    -

    Maximum sum of CPU cores of all nodes in a cluster, within which cluster scale-out is performed.

    -

    Total Memory (GB)

    -

    Available in all versions

    -

    Maximum sum of memory of all nodes in a cluster, within which cluster scale-out is performed.

    -

    Auto Scale-Out

    -

    Available only in certain versions

    -

    Triggered when there are pods unscheduled: Selected by default.

    -
    -
    -

  4. When the configuration is complete, click Install.

    After the add-on is installed, click Go Back to Previous Page. On the Add-on Instance tab page, select the corresponding cluster to view the running instance. This indicates that the add-on has been installed on each node in the cluster.

    -

-
-

Upgrading the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Upgrade under autoscaler.

    • If the Upgrade button is unavailable, the current add-on is already up-to-date and no upgrade is required.
    • If the Upgrade button is available, click Upgrade to upgrade the add-on.
    • During the upgrade, the coredns add-on of the original version on cluster nodes will be discarded, and the add-on of the target version will be installed.
    -
    -

  2. In the dialog box displayed, set parameters and upgrade the add-on. For details about the parameters, see the parameter description in Installing the Add-on.
-
-

Uninstalling the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, select the target cluster and click Uninstall under autoscaler.
  2. In the dialog box displayed, click Yes to uninstall the add-on.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0157.html b/docs/cce/umn/cce_01_0157.html deleted file mode 100644 index d246e191..00000000 --- a/docs/cce/umn/cce_01_0157.html +++ /dev/null @@ -1,135 +0,0 @@ - - -

Setting Cluster Auto Scaling

-

Scenario

The Cluster Auto Scaling feature allows CCE to automatically scale out a cluster (adding worker nodes to a cluster) according to custom policies when workloads cannot be scheduled into the cluster due to insufficient cluster resources.

-
-

Notes and Constraints

  • Currently, master nodes cannot be automatically added to or removed from clusters.
  • If both auto scale-in and auto scale-out are required, use the autoscaler add-on. For details, see autoscaler.
  • Clusters of v1.17 do not support auto scaling using AOM. You can use node pools for auto scaling. For details, see Node Pool Overview.
-
-

Automatic Cluster Scale-out

  1. Log in to the CCE console. Choose Resource Management > Clusters in the navigation pane. In the card view of the cluster to be scaled, choose More > Auto Scaling.
  2. Click the Scale-out Settings tab and then Edit. Set the maximum number of nodes, minimum number of nodes, cooldown period, and node configuration.

    -

    - - - - - - - - - - - - - -
    Table 1 Scale-out settings

    Parameter

    -

    Description

    -

    Cooldown Period

    -

    Interval between consecutive scale-out operations, in the unit of second. The cooldown period ensures that a scale-out operation is initiated only when previous scaling operation is finished and the system is running stably.

    -

    The value ranges from 60 to 3600, in seconds. The default value is 900. If the cooling interval is less than 900 seconds (15 minutes), the auto scaling may not work well, because creating a node may take 2 to 10 minutes.

    -

    Maximum Nodes

    -

    Maximum number of nodes to which the cluster can scale out.

    -

    1 ≤ Maximum Nodes < cluster node quota

    -
    NOTE:

    The cluster node quota depends on the cluster size (maximum number of nodes that can be managed by a cluster) and the node quota of the account. The cluster node quota used here is the smaller of the two.

    -
    -

    Node Configuration

    -

    If scale-out is required after the scale-out policy is executed, the system creates a node.

    -
    1. Click Set and set the node parameters. For details about how to set the node parameters, see Creating a Node.
    2. After the parameters are configured, click Submit.
    -
    -
    -

  3. After confirming the scale-out configuration and node parameters, click OK.
  4. Set the scale-out policy for the cluster. Click the Scale-out Policies tab and click Add Policy.

    • Policy Name: Enter a policy name, for example, policy01.
    • Policy Type: Currently, the following types of auto scale-out policies are supported:
      • Metric-based policy: Scale-out is performed based on the CPU or memory settings. -
        - - - - - - - - - - - - - - - - - - - -
        Table 2 Parameters for adding a metric-based policy

        Parameter

        -

        Description

        -

        *Metric

        -

        Select Allocated CPU or Allocated Memory.

        -

        *Trigger Condition

        -

        Set a condition for triggering a scale-out policy, that is, when the average CPU or memory allocation value is greater than or less than a specified percentage.

        -

        *Monitoring Window

        -

        Size of the data aggregation window. Select a value from the drop-down list.

        -

        If you select 15min, the selected metric is measured every 15 minutes.

        -

        *Threshold Crossings

        -

        Number of consecutive times that the threshold is reached within the monitoring window. The calculation cycle is fixed at one minute. If you set this parameter to 3, the configured action will be triggered when the metrics meet the specified threshold for three consecutive times.

        -

        *Action

        -

        Action executed after a policy is triggered.

        -
        -
        -
      • Scheduled policy: Scale-out is performed at a specified time. -
        - - - - - - - - - - - - - -
        Table 3 Parameters for adding a scheduled policy

        Parameter

        -

        Description

        -

        *Policy Type

        -

        Set this parameter to Scheduled policy.

        -

        *Trigger Time

        -

        Time at which the policy is triggered.

        -

        *Action

        -

        Action executed after a policy is triggered.

        -
        -
        -
      • Periodic policy: Scale-out can be performed by day, week, or month. -
        - - - - - - - - - - - - - -
        Table 4 Parameters for adding a periodic policy

        Parameter

        -

        Description

        -

        *Policy Type

        -

        Set the parameter to Periodic policy.

        -

        *Time Range

        -

        Specify the time for triggering the policy.

        -

        *Action

        -

        Action executed after a policy is triggered.

        -
        -
        -
      -
    -

  5. Click OK.

    After the auto scale-out is completed, choose Resource Management > Nodes in the navigation pane. On the node list, you can view the worker nodes added during cluster auto scaling.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0160.html b/docs/cce/umn/cce_01_0160.html deleted file mode 100644 index 9909a5f2..00000000 --- a/docs/cce/umn/cce_01_0160.html +++ /dev/null @@ -1,23 +0,0 @@ - - - -

OBS Volumes

- -

-
- -
- - - -
- diff --git a/docs/cce/umn/cce_01_0163.html b/docs/cce/umn/cce_01_0163.html deleted file mode 100644 index 3d7aa871..00000000 --- a/docs/cce/umn/cce_01_0163.html +++ /dev/null @@ -1,74 +0,0 @@ - - -

Setting Container Specifications

-

Scenario

CCE allows you to set resource limits for added containers during workload creation. You can request and limit the CPU and memory quotas used by each pod in the workload.

-
-

Meanings

For CPU and Memory, the meanings of Request and Limit are as follows:
  • If Request is selected, the system schedules the pod to the node that meets the requirements for workload deployment based on the request value.
  • If Request is deselected, the system schedules the pod to a random node for workload deployment.
  • If Limit is selected, the system limits the resources used by the workload based on the preset value.
  • If Limit is deselected, the system does not limit the resources used by the pod. If the memory resources used by the pod exceed the memory allocated to the node, the workload or node may be unavailable.
-
-

When creating a workload, you are advised to set the upper and lower limits of CPU and memory resources. If the upper and lower resource limits are not set for a workload, a resource leak of this workload will make resources unavailable for other workloads deployed on the same node. In addition, workloads that do not have upper and lower resource limits cannot be accurately monitored.

-
-
-

For GPU quotas, the meanings of Use and Any GPU type are as follows:

-
  • If Use is selected, the system schedules the pod to a node that meets the requirements for workload deployment based on the configured value.
  • Any GPU type is selected by default and cannot be deselected. This option indicates that the resources used by pods are not limited.
-

Configuration Description

  • CPU quotas: -
    - - - - - - - - - - -
    Table 1 Description of CPU quotas

    Parameter

    -

    Description

    -

    CPU request

    -

    Minimum number of CPU cores required by a container. Resources are scheduled for the container based on this value. The container can be scheduled to this node only when the total available CPU on the node is greater than or equal to the number of containerized CPU applications.

    -

    CPU limit

    -

    Maximum number of CPU cores available for a container.

    -
    -
    -

    Recommended configuration

    -

    Actual available CPU of a node ≥ Sum of CPU limits of all containers on the current node ≥ Sum of CPU requests of all containers on the current node. You can view the actual available CPUs of a node on the CCE console (Resource Management > Nodes > Allocatable).

    -
-
  • Memory quotas: -
    - - - - - - - - - - -
    Table 2 Description of memory quotas

    Parameter

    -

    Description

    -

    Memory request

    -

    Minimum amount of memory required by a container. Resources are scheduled for the container based on this value. The container can be scheduled to this node only when the total available memory on the node is greater than or equal to the number of containerized memory applications.

    -

    Memory Limit

    -

    Maximum amount of memory available for a container. When the memory usage exceeds the configured memory limit, the instance may be restarted, which affects the normal use of the workload.

    -
    -
    -

    Recommended configuration

    -

    Actual available memory of a node ≥ Sum of memory limits of all containers on the current node ≥ Sum of memory requests of all containers on the current node. You can view the actual available memory of a node on the CCE console (Resource Management > Nodes > Allocatable).

    -
-

The allocatable resources are calculated based on the resource request value (Request), which indicates the upper limit of resources that can be requested by pods on this node, but does not indicate the actual available resources of the node. The calculation formula is as follows:

-
  • Allocatable CPU = Total CPU – Requested CPU of all pods – Reserved CPU for other resources
  • Allocatable memory = Total memory – Requested memory of all pods – Reserved memory for other resources
-
-
-

Example

Assume that a cluster contains a node with 4 cores and 8 GB. A workload containing two pods has been deployed on the cluster. The resources of the two pods (pods 1 and 2) are as follows: {CPU request, CPU limit, memory request, memory limit} = {1 core, 2 cores, 2 GB, 2 GB}.

-

The CPU and memory usage of the node is as follows:

-
  • Allocatable CPU = 4 cores - (1 core requested by pod 1 + 1 core requested by pod 2) = 2 cores
  • Allocatable memory = 8 GB - (2 GB requested by pod 1 + 2 GB requested by pod 2) = 4 GB
-

Therefore, the remaining 2 cores and 4 GB can be used by the next new pod.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0164.html b/docs/cce/umn/cce_01_0164.html deleted file mode 100644 index b427f78f..00000000 --- a/docs/cce/umn/cce_01_0164.html +++ /dev/null @@ -1,17 +0,0 @@ - - -

Permissions Management

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0175.html b/docs/cce/umn/cce_01_0175.html deleted file mode 100644 index f9fc9165..00000000 --- a/docs/cce/umn/cce_01_0175.html +++ /dev/null @@ -1,17 +0,0 @@ - - -

Obtaining a Cluster Certificate

-

Scenario

Before accessing cluster resources through open-source Kubernetes APIs, obtain the cluster's certificate.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters.
  2. In the card view of the target cluster, choose More > Download X.509 Certificate.
  3. In the Download X.509 Certificate dialog box displayed, select the certificate expiration time and download the X.509 certificate of the cluster as prompted.

    Figure 1 Downloading a certificate
    -
    • The downloaded certificate contains three files: client.key, client.crt, and ca.crt. Keep these files secure.
    • Certificates are not required for mutual access between containers in a cluster.
    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0178.html b/docs/cce/umn/cce_01_0178.html deleted file mode 100644 index 0438c9ba..00000000 --- a/docs/cce/umn/cce_01_0178.html +++ /dev/null @@ -1,129 +0,0 @@ - - -

Formula for Calculating the Reserved Resources of a Node

-

Some of the resources on the node need to run some necessary Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total number of node resources and the number of assignable node resources in Kubernetes are different. The larger the node specifications, the more the containers deployed on the node. Therefore, Kubernetes needs to reserve more resources.

-

To ensure node stability, a certain amount of CCE node resources will be reserved for Kubernetes components (such as kubelet, kube-proxy, and docker) based on the node specifications.

-

CCE calculates the resources that can be allocated to user nodes as follows:

-

Allocatable resources = Total amount - Reserved amount - Eviction threshold

-

-

Rules for Reserving Node Memory

You can use the following formula calculate how much memory you should reserve for running containers on a node:

-

Total reserved amount = Reserved memory for system components + Reserved memory for kubelet to manage pods

- -
- - - - - - - - - - - - - - - - -
Table 1 Reservation rules for system components

Total Memory (TM)

-

Reserved Memory for System Components

-

TM ≤ 8 GB

-

0 MB

-

8 GB < TM ≤ 16 GB

-

[(TM – 8 GB) x 1024 x 10%] MB

-

16 GB < TM ≤ 128 GB

-

[8 GB x 1024 x 10% + (TM – 16 GB) x 1024 x 6%] MB

-

TM > 128 GB

-

(8 GB x 1024 x 10% + 112 GB x 1024 x 6% + (TM – 128 GB) x 1024 x 2%) MB

-
-
- -
- - - - - - - - - - - - - - - - - - - - - - - - - -
Table 2 Reservation rules for kubelet

Total Memory (TM)

-

Number of Pods

-

Reserved Memory for kubelet

-

TM ≤ 2 GB

-

-

-

TM x 25%

-

TM > 2 GB

-

0 < Max. pods on a node ≤ 16

-

700 MB

-

16 < Max. pods on a node ≤ 32

-

[700 + (Max. pods on a node – 16) x 18.75] MB

-

32 < Max. pods on a node ≤ 64

-

[1024 + (Max. pods on a node – 32) x 6.25] MB

-

64 < Max. pods on a node ≤ 128

-

[1230 + (Max. pods on a node – 64) x 7.80] MB

-

Max. pods on a node > 128

-

[1740 + (Max. pods on a node – 128) x 11.20] MB

-
-
-

For a small-capacity node, adjust the maximum number of instances based on the site requirements. Alternatively, when creating a node on the CCE console, you can adjust the maximum number of instances for the node based on the node specifications.

-
-
-

Rules for Reserving Node CPU

-
- - - - - - - - - - - - - - - - -
Table 3 Node CPU reservation rules

Total CPU Cores (Total)

-

Reserved CPU Cores

-

Total ≤ 1 core

-

Total x 6%

-

1 core < Total ≤ 2 cores

-

1 core x 6% + (Total – 1 core) x 1%

-

2 cores < Total ≤ 4 cores

-

1 core x 6% + 1 core x 1% + (Total – 2 cores) x 0.5%

-

Total > 4 cores

-

1 core x 6% + 1 core x 1% + 2 cores x 0.5% + (Total – 4 cores) x 0.25%

-
-
-

CCE reserves an extra 100 MiB for kubelet eviction.

-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0180.html b/docs/cce/umn/cce_01_0180.html deleted file mode 100644 index 72bc1ec6..00000000 --- a/docs/cce/umn/cce_01_0180.html +++ /dev/null @@ -1,284 +0,0 @@ - - -

Overview

-

Introduction

A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (PM), depending on your service requirements. The components on a node include kubelet, container runtime, and kube-proxy.

-

A Kubernetes cluster consists of master nodes and node nodes. The nodes described in this section refer to worker nodes, the computing nodes of a cluster that run containerized applications.

-
-

CCE uses high-performance Elastic Cloud Servers (ECSs) as nodes to build highly available Kubernetes clusters.

-
-

Notes

  • To ensure node stability, a certain amount of CCE node resources will be reserved for Kubernetes components (such as kubelet, kube-proxy, and docker) based on the node specifications. Therefore, the total number of node resources and the amount of allocatable node resources for your cluster are different. The larger the node specifications, the more the containers deployed on the node. Therefore, more node resources need to be reserved to run Kubernetes components.
  • The node networking (such as the VM networking and container networking) is taken over by CCE. You are not allowed to add NICs or change routes. If you modify the networking configuration, the availability of CCE may be affected.
-
-

Node Lifecycle

A lifecycle indicates the node statuses recorded from the time when the node is created through the time when the node is deleted or released.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Node statuses

Status

-

Status Attribute

-

Description

-

Available

-

Stable state

-

The node is running properly and is connected to the cluster.

-

Nodes in this state can provide services.

-

Unavailable

-

Stable state

-

The node is not running properly.

-

Instances in this state no longer provide services. In this case, perform the operations in Resetting a Node.

-

Creating

-

Intermediate state

-

The node has been created but is not running.

-

Installing

-

Intermediate state

-

The Kubernetes software is being installed on the node.

-

Deleting

-

Intermediate state

-

The node is being deleted.

-

If this state stays for a long time, an exception occurs.

-

Stopped

-

Stable state

-

The node is stopped properly.

-

A node in this state cannot provide services. You can start the node on the ECS console.

-

Error

-

Stable state

-

The node is abnormal.

-

Instances in this state no longer provide services. In this case, perform the operations in Resetting a Node.

-
-
-
-

Mapping between Node OSs and Container Engines

-
- - - - - - - - - - - - - - - - -
Table 2 Node OSs and container engines in CCE clusters

OS

-

Kernel Version

-

Container Engine

-

Container Storage Rootfs

-

Container Runtime

-

CentOS 7.x

-

3.x

-

Docker

-

Clusters of v1.19 and earlier use Device Mapper.

-

Clusters of v1.21 and later use OverlayFS.

-

runC

-

EulerOS 2.5

-

Device Mapper

-
-
- -
- - - - - - - - - - - - - - - - - - - - - - -
Table 3 Node OSs and container engines in CCE Turbo clusters

Node Type

-

OS

-

Kernel Version

-

Container Engine

-

Container Storage Rootfs

-

Container Runtime

-

VM

-

centos 7.x

-

3.x

-

Docker

-

OverlayFS

-

Runc

-

BMS in the shared resource pool

-

EulerOS 2.9

-

4.x

-

containerd

-

Device Mapper

-

Kata

-
-
-
-

Secure Containers and Common Containers

Secure (Kata) containers are distinguished from common containers in a few aspects.

-

The most significant difference is that each secure container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualization layer. CCE provides container isolation that is more secure than independent private Kubernetes clusters. With isolated OS kernels, computing resources, and networks, pod resources and data will not be preempted and stolen by other pods.

-

You can run common or secure containers on a single node in a CCE Turbo cluster. The differences between them are as follows:

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Category

-

Secure Container (Kata)

-

Common Container (Docker)

-

Common Container (containerd)

-

Node type used to run containers

-

Bare-metal server (BMS)

-

VM

-

VM

-

Container engine

-

containerd

-

Docker

-

Default value for common containers created on the console.

-

containerd

-

Container runtime

-

Kata

-

runC

-

runC

-

Container kernel

-

Exclusive kernel

-

Sharing the kernel with the host

-

Sharing the kernel with the host

-

Container isolation

-

Lightweight VMs

-

cgroups and namespaces

-

cgroups and namespaces

-

Container engine storage driver

-

Device Mapper

-

OverlayFS2

-

OverlayFS

-

Pod overhead

-

Memory: 50 MiB

-

CPU: 0.1 cores

-

Pod overhead is a feature for accounting for the resources consumed by the pod infrastructure on top of the container requests and limits. For example, if limits.cpu is set to 0.5 cores and limits.memory to 256 MiB for a pod, the pod will request 0.6-core CPUs and 306 MiB of memory.

-

None

-

None

-

Minimal specifications

-

Memory: 256 MiB

-

CPU: 0.25 cores

-

None

-

None

-

Container engine CLI

-

crictl

-

docker

-

crictl

-

Pod computing resources

-

The request and limit values must be the same for both CPU and memory.

-

The request and limit values can be different for both CPU and memory.

-

The request and limit values can be different for both CPU and memory.

-

Host network

-

Not supported

-

Supported

-

Supported

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0182.html b/docs/cce/umn/cce_01_0182.html deleted file mode 100644 index 9950a338..00000000 --- a/docs/cce/umn/cce_01_0182.html +++ /dev/null @@ -1,100 +0,0 @@ - - -

Monitoring Overview

-

CCE works with AOM to comprehensively monitor clusters. When a node is created, the ICAgent (the DaemonSet named icagent in the kube-system namespace of the cluster) of AOM is installed by default. The ICAgent collects monitoring data of underlying resources and workloads running on the cluster. It also collects monitoring data of custom metrics of the workload.

-
  • Resource metrics

    Basic resource monitoring includes CPU, memory, and disk monitoring. For details, see Resource Metrics. You can view these metrics of clusters, nodes, and workloads on the CCE or AOM console.

    -
-

AOM is available only in certain regions.

-
-

Resource Metrics

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Resource metrics

Metric

-

Description

-

CPU Allocation Rate

-

Indicates the percentage of CPUs allocated to workloads.

-

Memory Allocation Rate

-

Indicates the percentage of memory allocated to workloads.

-

CPU Usage

-

Indicates the CPU usage.

-

Memory Usage

-

Indicates the memory usage.

-

Disk Usage

-

Indicates the disk usage.

-

Down

-

Indicates the speed at which data is downloaded to a node. The unit is KB/s.

-

Up

-

Indicates the speed at which data is uploaded from a node. The unit is KB/s.

-

Disk Read Rate

-

Indicates the data volume read from a disk per second. The unit is KB/s.

-

Disk Write Rate

-

Indicates the data volume written to a disk per second. The unit is KB/s.

-
-
-
-

Viewing Cluster Monitoring Data

In the navigation pane of the CCE console, choose Resource Management > Clusters. Click on the cluster card to access the cluster monitoring page.

-

-

The cluster monitoring page displays the monitoring status of cluster resources, CPU, memory, and disk usage of all nodes in a cluster, and CPU and memory allocation rates.

-

Explanation of monitoring metrics:

-
  • CPU allocation rate = Sum of CPU quotas requested by pods in the cluster/Sum of CPU quotas that can be allocated of all nodes (excluding master nodes) in the cluster
  • Memory allocation rate = Sum of memory quotas requested by pods in the cluster/Sum of memory quotas that can be allocated of all nodes (excluding master nodes) in the cluster
  • CPU usage: Average CPU usage of all nodes (excluding master nodes) in a cluster
  • Memory usage: Average memory usage of all nodes (excluding master nodes) in a cluster
-

Allocatable node resources (CPU or memory) = Total amount – Reserved amount – Eviction thresholds. For details, see Formula for Calculating the Reserved Resources of a Node.

-
-

On the cluster monitoring page, you can also view monitoring data of nodes, workloads, and pods. You can click to view the detailed data.

-

-
-

Viewing Monitoring Data of Master Nodes

CCE allows you to view monitoring data of master nodes. You can view the monitoring data of a master node in the upper right corner of the cluster details page. Clicking More will direct you to the AOM console.

-

-
-

Viewing Monitoring Data of Worker Nodes

In addition to the cluster monitoring page, you can also view node monitoring data on the node console by clicking Monitoring in the row where the node resides.

-

-

The node list page also displays the data about the allocable resources of the node. Allocatable resources indicate the upper limit of resources that can be requested by pods on a node, and are calculated based on the requests. Allocatable resources do not indicate the actual available resources of the node.

-

The calculation formulas are as follows:

-
  • Allocatable CPU = Total CPU – Requested CPU of all pods – Reserved CPU for other resources
  • Allocatable memory = Total memory – Requested memory of all pods – Reserved memory for other resources
-

-
-

Viewing Workload Monitoring Data

You can view monitoring data of a workload on the Monitoring tab page of the workload details page.

-

-

You can also click AOM to go to the AOM console to view monitoring data of the workload.

-

-

-
-

Viewing Pod Monitoring Data

You can view monitoring data of a pod on the Pods tab page of the workload details page.

-

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0183.html b/docs/cce/umn/cce_01_0183.html deleted file mode 100644 index 281f03a3..00000000 --- a/docs/cce/umn/cce_01_0183.html +++ /dev/null @@ -1,41 +0,0 @@ - - -

Nodes

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0184.html b/docs/cce/umn/cce_01_0184.html deleted file mode 100644 index 882614b4..00000000 --- a/docs/cce/umn/cce_01_0184.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

Synchronizing Node Data

-

Scenario

Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required.

-

Some information about CCE nodes is maintained independently from the ECS console. After you change the name, EIP, billing mode, or specifications of an ECS on the ECS console, you need to synchronize the ECS information to the corresponding node on the CCE console. After the synchronization, information on both consoles is consistent.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes.
  2. In the same row as the node whose data will be synchronized, choose More > Sync Node data.

    Alternatively, click the node name, and click Sync Node Data in the upper right corner of the node details page.

    -
    -
    Figure 1 Synchronizing node data
    -

    After the synchronization is complete, the "Sync success" message is displayed in the upper right corner.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0185.html b/docs/cce/umn/cce_01_0185.html deleted file mode 100644 index af83d767..00000000 --- a/docs/cce/umn/cce_01_0185.html +++ /dev/null @@ -1,52 +0,0 @@ - - -

Logging In to a Node

-

Notes and Constraints

  • If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).
  • Only login to a running ECS is allowed.
  • Only the user linux can log in to a Linux server.
-
-

Login Modes

You can log in to an ECS in either of the following modes:

-
  • Management console (VNC)

    If an ECS has no EIP, log in to the ECS console and click Remote Login in the same row as the ECS.

    -
  • SSH

    This mode applies only to ECSs running Linux. Usually, you can use a remote login tool, such as PuTTY, Xshell, and SecureCRT, to log in to your ECS. If none of the remote login tools can be used, log in to the ECS console and click Remote Login in the same row as the ECS to view the connection status and running status of the ECS.

    -
    • When you use the Windows OS to log in to a Linux node, set Auto-login username to linux.
    • The CCE console does not support node OS upgrade. Do not upgrade the node OS using the yum update command. Otherwise, the container networking components will be unavailable.
    -
    -
-
- -
- - - - - - - - - - - - - - - - - -
Table 1 Linux ECS login modes

EIP Binding

-

On-Premises OS

-

Connection Method

-

Yes

-

Windows

-

Use a remote login tool, such as PuTTY or Xshell.

-

Yes

-

Linux

-

Run commands.

-

Yes/No

-

Windows/Linux

-

Use the remote login function available on the console.

-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0186.html b/docs/cce/umn/cce_01_0186.html deleted file mode 100644 index 455684ab..00000000 --- a/docs/cce/umn/cce_01_0186.html +++ /dev/null @@ -1,20 +0,0 @@ - - -

Deleting a Node

-

Scenario

When a node in a CCE cluster is deleted, services running on the node will also be deleted. Exercise caution when performing this operation.

-
-

Notes and Constraints

  • After a CCE cluster is deleted, the ECS nodes in the cluster are also deleted.
-
-

Notes

  • Deleting a node will lead to pod migration, which may affect services. Perform this operation during off-peak hours.
  • Unexpected risks may occur during the operation. Back up related data in advance.
  • During the operation, the backend will set the node to the unschedulable state.
  • Only worker nodes can be deleted.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes. In the same row as the node you will delete, choose More > Delete.
  2. In the Delete Node dialog box, enter DELETE and click Yes.

    • After the node is deleted, pods on it are automatically migrated to other available nodes.
    • If the disks and EIPs bound to the node are important resources, unbind them first. Otherwise, they will be deleted with the node.
    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0187.html b/docs/cce/umn/cce_01_0187.html deleted file mode 100644 index b8d36291..00000000 --- a/docs/cce/umn/cce_01_0187.html +++ /dev/null @@ -1,71 +0,0 @@ - - -

Permissions Overview

-

CCE permissions management allows you to assign permissions to IAM users and user groups under your tenant accounts. CCE combines the advantages of Identity and Access Management (IAM) and Kubernetes Role-based Access Control (RBAC) authorization to provide a variety of authorization methods, including IAM fine-grained authorization, IAM token authorization, cluster-scoped authorization, and namespace-wide authorization.

-

If you need to perform refined permissions management on CCE clusters and related resources, for example, to control the access of employees in different departments to cloud resources, you can perform multi-dimensional permissions management on CCE.

-

This section describes the CCE permissions management mechanism and related concepts. If your account has met your service requirements, you can skip the configurations in this chapter.

-

CCE Permissions Management

CCE permissions are described as follows:
  • Cluster-level permissions: Cluster-level permissions management evolves out of the system policy authorization feature of IAM. IAM users in the same user group have the same permissions. On IAM, you can configure system policies to describe which IAM user groups can perform which operations on cluster resources. For example, you can grant user group A to create and delete cluster X, add a node, or install an add-on, while granting user group B to view information about cluster X.

    Cluster-level permissions involve CCE non-Kubernetes APIs and support fine-grained IAM policies and enterprise project management capabilities.

    -
  • Namespace-level permissions: You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles. CCE has also been enhanced based on open-source capabilities. It supports RBAC authorization based on IAM user or user group, and RBAC authentication on access to APIs using IAM tokens.

    Namespace-level permissions involve CCE Kubernetes APIs and are enhanced based on the Kubernetes RBAC capabilities. Namespace-level permissions can be granted to IAM users or user groups for authentication and authorization, but are independent of fine-grained IAM policies.

    -

    Starting from version 1.11.7-r2, CCE clusters allow you to configure namespace permissions. Clusters earlier than v1.11.7-r2 have all namespace permissions by default.

    -
-
-

In general, you configure CCE permissions in two scenarios. The first is creating and managing clusters and related resources, such as nodes. The second is creating and using Kubernetes resources in the cluster, such as workloads and Services.

-
Figure 1 Illustration on CCE permissions
-

These permissions allow you to manage resource users at a finer granularity.

-
-

Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based)

Users with different cluster permissions (assigned using IAM) have different namespace permissions (assigned using Kubernetes RBAC). Table 1 lists the namespace permissions of different users.

- -
- - - - - - - - - - - - - - - - - - - - - -
Table 1 Differences in namespace permissions

User

-

Clusters Earlier Than v1.11.7-r2

-

Clusters of v1.11.7-r2

-

User with the Tenant Administrator permissions

-

All namespace permissions

-
  • Has all namespace permissions when using CCE on the console.
  • Requires Kubernetes RBAC authorization when using CCE via kubectl.
-
NOTE:

When such a user accesses the CCE console, an administrator group is added. Therefore, the user has all namespace permissions.

-
-

IAM user with the CCE Administrator role

-

All namespace permissions

-
  • Has all namespace permissions when using CCE on the console.
  • Requires Kubernetes RBAC authorization when using CCE via kubectl.
-
NOTE:

When such a user accesses the CCE console, an administrator group is added. Therefore, the user has all namespace permissions.

-
-

IAM user with the CCE Viewer role

-

All namespace permissions

-

Requires Kubernetes RBAC authorization.

-

IAM user with the Tenant Guest role

-

All namespace permissions

-

Requires Kubernetes RBAC authorization.

-
-
-
-

kubectl Permissions

You can use kubectl to access Kubernetes resources in a cluster.

-

When you access a cluster using kubectl, CCE uses the kubeconfig.json file generated on the cluster for authentication. This file contains user information, based on which CCE determines which Kubernetes resources can be accessed by kubectl. The permissions recorded in a kubeconfig.json file vary from user to user. The permissions that a user has are listed in Table 1.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0188.html b/docs/cce/umn/cce_01_0188.html deleted file mode 100644 index 8441b6b1..00000000 --- a/docs/cce/umn/cce_01_0188.html +++ /dev/null @@ -1,92 +0,0 @@ - - -

Cluster Permissions (IAM-based)

-

CCE cluster permissions are assigned based on IAM system policies and custom policies. You can use user groups to assign permissions to IAM users.

-

Cluster permissions are configured only for cluster-related resources (such as clusters and nodes). You must also configure namespace permissions to operate Kubernetes resources (such as workloads and Services).

-
-

Prerequisites

  • A user with the Security Administrator role has all IAM permissions except role switching. Only these users can view user groups and their permissions on the Permissions Management page on the CCE console.
-
-

Configuration

On the CCE console, when you choose Permissions Management > Cluster-Level Permissions to create a user group, you will be directed to the IAM console to complete the process. After the user group is created and its permissions are configured, you can view the information on the Cluster-Level Permissions tab page. This section describes the operations in IAM.

-
-

Process Flow

Figure 1 Process of assigning CCE permissions
-

-
  1. Create a user group and assign permissions to it.

    Create a user group on the IAM console, and assign CCE permissions, for example, the CCE Viewer policy to the group.

    -

    CCE is deployed by region. On the IAM console, select Region-specific projects when assigning CCE permissions.

    -
    -
  2. Create a user and add it to a user group.

    Create a user on the IAM console and add the user to the group created in 1.

    -
  3. Log in and verify permissions.

    Log in to the management console as the user you created, and verify that the user has the assigned permissions.

    -
    • Log in to the management console and switch to the CCE console. Click Create Cluster in the upper right corner. If you fail to do so (assuming that only the CCE Viewer role is assigned), the permission control policy takes effect.
    • Switch to the console of any other service. If a message appears indicating that you do not have the required permissions to access the service, the CCE Viewer policy takes effect.
    -
-
-

Custom Policies

Custom policies can be created as a supplement to the system-defined policies of CCE.

-

You can create custom policies in either of the following ways:

-
  • Visual editor: Select cloud services, actions, resources, and request conditions. This does not require knowledge of policy syntax.
  • JSON: Edit JSON policies from scratch or based on an existing policy.
-

This section provides examples of common custom CCE policies.

-
-

Example Custom Policies:

-
  • Example 1: Creating a cluster named test
    {
    -    "Version": "1.1",
    -    "Statement": [
    -        {
    -            "Effect": "Allow",
    -            "Action": [
    -                "cce:cluster:create"
    -            ]
    -        }
    -    ]
    -}
    -
  • Example 2: Denying node deletion

    A policy with only "Deny" permissions must be used in conjunction with other policies to take effect. If the permissions assigned to a user contain both "Allow" and "Deny", the "Deny" permissions take precedence over the "Allow" permissions.

    -

    The following method can be used if you need to assign permissions of the CCEFullAccess policy to a user but you want to prevent the user from deleting nodes (cce:node:delete). Create a custom policy for denying node deletion, and attach both policies to the group to which the user belongs. Then, the user can perform all operations on CCE except deleting nodes. The following is an example of a deny policy:

    -
    {
    -    "Version": "1.1",
    -    "Statement": [
    -        {
    -            "Effect": "Deny",
    -            "Action": [
    -                "cce:node:delete"
    -            ]
    -        }
    -    ]
    -}
    -
  • Example 3: Defining permissions for multiple services in a policy

    A custom policy can contain the actions of multiple services that are of the global or project-level type. The following is an example policy containing actions of multiple services:

    -
    {
    -    "Version": "1.1",
    -    "Statement": [
    -        {
    -            "Action": [
    -                "ecs:cloudServers:resize",
    -                "ecs:cloudServers:delete",
    -                "ecs:cloudServers:delete",
    -                "ims:images:list",
    -                "ims:serverImages:create"
    -            ],
    -            "Effect": "Allow"
    -        }
    -    ]
    -}
    -
-

CCE Cluster Permissions and Enterprise Projects

CCE supports resource management and permission allocation by cluster and enterprise project.

-

Note that:

-
  • IAM projects are based on physical isolation of resources, whereas enterprise projects provide global logical groups of resources, which better meet the actual requirements of enterprises. In addition, IAM policies can be managed based on enterprise projects. Therefore, you are advised to use enterprise projects for permissions management.
  • When there are both IAM projects and enterprise projects, IAM preferentially matches the IAM project policies.
  • When creating a cluster or node using purchased cloud resources, ensure that IAM users have been granted the required permissions in the enterprise project to use these resources. Otherwise, the cluster or node may fail to be created.
-
-

CCE Cluster Permissions and IAM RBAC

CCE is compatible with IAM system roles for permissions management. You are advised to use fine-grained policies provided by IAM to simplify permissions management.

-

CCE supports the following roles:

-
  • Basic IAM roles:
    • te_admin (Tenant Administrator): Users with this role can call all APIs of all services except IAM.
    • readonly (Tenant Guest): Users with this role can call APIs with the read-only permissions of all services except IAM.
    -
  • Custom CCE administrator role: CCE Administrator
-
  • Tenant Administrator and Tenant Guest are special IAM system roles. After any system or custom policy is configured, Tenant Administrator and Tenant Guest take effect as system policies to achieve compatibility with IAM RBAC and ABAC scenarios.
  • If a user has the Tenant Administrator or CCE Administrator system role, the user has the cluster-admin permissions in Kubernetes RBAC and the permissions cannot be removed after the cluster is created.
    If the user is the cluster creator, the cluster-admin permissions in Kubernetes RBAC are granted to the user by default. The permissions can be manually removed after the cluster is created.
    • Method 1: Choose Permissions Management > Namespace-Level Permissions > Delete at the same role as cluster-creator on the CCE console.
    • Method 2: Delete ClusterRoleBinding: cluster-creator through the API or kubectl.
    -
    -
-
-

When RBAC and IAM policies co-exist, the backend authentication logic for open APIs or console operations on CCE is as follows:

-

-

Certain CCE APIs involve namespace-level permissions or key operations and therefore, they require special permissions:

-

Using clusterCert to obtain the cluster kubeconfig: cceadm/teadmin

-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0189.html b/docs/cce/umn/cce_01_0189.html deleted file mode 100644 index 4577b49d..00000000 --- a/docs/cce/umn/cce_01_0189.html +++ /dev/null @@ -1,234 +0,0 @@ - - -

Namespace Permissions (Kubernetes RBAC-based)

-

Namespace Permissions (Kubernetes RBAC-based)

You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles. The RBAC API declares four kinds of Kubernetes objects: Role, ClusterRole, RoleBinding, and ClusterRoleBinding, which are described as follows:

-
  • Role: defines a set of rules for accessing Kubernetes resources in a namespace.
  • RoleBinding: defines the relationship between users and roles.
  • ClusterRole: defines a set of rules for accessing Kubernetes resources in a cluster (including all namespaces).
  • ClusterRoleBinding: defines the relationship between users and cluster roles.
-

Role and ClusterRole specify actions that can be performed on specific resources. RoleBinding and ClusterRoleBinding bind roles to specific users, user groups, or ServiceAccounts. Illustration:

-
Figure 1 Role binding
-

On the CCE console, you can assign permissions to a user or user group to access resources in one or multiple namespaces. By default, the CCE console provides the following ClusterRoles:

-
  • view: read-only permission on most resources in all or selected namespaces.
  • edit: read and write permissions on most resources in all or selected namespaces. If this ClusterRole is configured for all namespaces, its capability is the same as the O&M permission.
  • admin: read and write permissions on most resources in all namespaces, and read-only permission on nodes, storage volumes, namespaces, and quota management.
  • cluster-admin: read and write permissions on all resources in all namespaces.
-
-

Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based)

Users with different cluster permissions (assigned using IAM) have different namespace permissions (assigned using Kubernetes RBAC). Table 1 lists the namespace permissions of different users.

- -
- - - - - - - - - - - - - - - - - - - - - -
Table 1 Differences in namespace permissions

User

-

Clusters Earlier Than v1.11.7-r2

-

Clusters of v1.11.7-r2

-

User with the Tenant Administrator permissions

-

All namespace permissions

-
  • Has all namespace permissions when using CCE on the console.
  • Requires Kubernetes RBAC authorization when using CCE via kubectl.
-
NOTE:

When such a user accesses the CCE console, an administrator group is added. Therefore, the user has all namespace permissions.

-
-

IAM user with the CCE Administrator role

-

All namespace permissions

-
  • Has all namespace permissions when using CCE on the console.
  • Requires Kubernetes RBAC authorization when using CCE via kubectl.
-
NOTE:

When such a user accesses the CCE console, an administrator group is added. Therefore, the user has all namespace permissions.

-
-

IAM user with the CCE Viewer role

-

All namespace permissions

-

Requires Kubernetes RBAC authorization.

-

IAM user with the Tenant Guest role

-

All namespace permissions

-

Requires Kubernetes RBAC authorization.

-
-
-
-

Prerequisites

  • Kubernetes RBAC authorization can be used for clusters of v1.11.7-r2 and later. Ensure that you have deployed a supported cluster version. For details about upgrading a cluster, see Performing Replace/Rolling Upgrade (v1.13 and Earlier).
  • After you create a cluster of v1.11.7-r2 or later, CCE automatically assigns the cluster-admin permission to you, which means you have full control on all resources in all namespaces in the cluster.
  • A user with the Security Administrator role has all IAM permissions except role switching. Only these users can assign permissions on the Permissions Management page on the CCE console.
-
-

Configuring Namespace Permissions (on the Console)

You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles.

-
  1. Log in to the CCE console. In the navigation pane, choose Permissions Management.
  2. On the displayed page, click the Namespace-Level Permissions tab. In the upper right corner of the namespace permissions list, select the cluster that contains the namespace whose access will be managed, and click Add Permissions.
  3. Confirm the cluster name and select the namespace to assign permissions for. For example, select All namespaces, the target user or user group, and select the permissions.

    -

  4. Click Create.
-
-

Using kubectl to Configure Namespace Permissions

When you access a cluster using kubectl, CCE uses the kubeconfig.json file generated on the cluster for authentication. This file contains user information, based on which CCE determines which Kubernetes resources can be accessed by kubectl. The permissions recorded in a kubeconfig.json file vary from user to user. The permissions that a user has are listed in Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based).

-
-

In addition to cluster-admin, admin, edit, and view, you can define Roles and RoleBindings to configure the permissions to add, delete, modify, and query resources, such as pods, Deployments, and Services, in the namespace.

-

The procedure for creating a Role is very simple. To be specific, specify a namespace and then define rules. The rules in the following example are to allow GET and LIST operations on pods in the default namespace.

-
kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  namespace: default                          # Namespace
-  name: role-example
-rules:
-- apiGroups: [""]
-  resources: ["pods"]                         # The pod can be accessed.
-  verbs: ["get", "list"]                      # The GET and LIST operations can be performed.
-
  • apiGroups indicates the API group to which the resource belongs.
  • resources indicates the resources that can be operated. Pods, Deployments, ConfigMaps, and other Kubernetes resources are supported.
  • verbs indicates the operations that can be performed. get indicates querying a specific object, and list indicates listing all objects of a certain type. Other value options include create, update, and delete.
-

For details, see Using RBAC Authorization.

-

After creating a Role, you can bind the Role to a specific user, which is called RoleBinding. The following is an example.

-
kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: RoleBinding-example
-  namespace: default
-  annotations:
-    CCE.com/IAM: 'true'
-roleRef:
-  kind: Role
-  name: role-example
-  apiGroup: rbac.authorization.k8s.io
-subjects:
-- kind: User
-  name: 0c97ac3cb280f4d91fa7c0096739e1f8 # User ID of the user-example
-  apiGroup: rbac.authorization.k8s.io
-

The subjects section binds a Role with an IAM user so that the IAM user can obtain the permissions defined in the Role, as shown in the following figure.

-
Figure 2 A RoleBinding binds the Role to the user.
-

You can also specify a user group in the subjects section. In this case, all users in the user group obtain the permissions defined in the Role.

-
subjects:
-- kind: Group
-  name: 0c96fad22880f32a3f84c009862af6f7    # User group ID
-  apiGroup: rbac.authorization.k8s.io
-

Use the IAM user user-example to connect to the cluster and obtain the pod information. The following is an example of the returned pod information.

-
# kubectl get pod
-NAME                                   READY   STATUS    RESTARTS   AGE
-deployment-389584-2-6f6bd4c574-2n9rk   1/1     Running   0          4d7h
-deployment-389584-2-6f6bd4c574-7s5qw   1/1     Running   0          4d7h
-deployment-3895841-746b97b455-86g77    1/1     Running   0          4d7h
-deployment-3895841-746b97b455-twvpn    1/1     Running   0          4d7h
-nginx-658dff48ff-7rkph                 1/1     Running   0          4d9h
-nginx-658dff48ff-njdhj                 1/1     Running   0          4d9h
-# kubectl get pod nginx-658dff48ff-7rkph
-NAME                     READY   STATUS    RESTARTS   AGE
-nginx-658dff48ff-7rkph   1/1     Running   0          4d9h
-

Try querying Deployments and Services in the namespace. The output shows user-example does not have the required permissions. Try querying the pods in namespace kube-system. The output shows user-example does not have the required permissions, neither. This indicates that the IAM user user-example has only the GET and LIST Pod permissions in the default namespace, which is the same as expected.

-
# kubectl get deploy
-Error from server (Forbidden): deployments.apps is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "deployments" in API group "apps" in the namespace "default"
-# kubectl get svc
-Error from server (Forbidden): services is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "services" in API group "" in the namespace "default"
-# kubectl get pod --namespace=kube-system
-Error from server (Forbidden): pods is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "pods" in API group "" in the namespace "kube-system"
-
-

Example: Assigning All Cluster Permissions (cluster-admin)

You can use the cluster-admin role to assign all permissions on a cluster. This role contains the permissions for cluster resources (such as PVs and StorageClasses).

-

In the following example kubectl output, a ClusterRoleBinding has been created and binds the cluster-admin role to the user group cce-role-group.

-
# kubectl get clusterrolebinding
-NAME                                                              ROLE                           AGE
-clusterrole_cluster-admin_group0c96fad22880f32a3f84c009862af6f7   ClusterRole/cluster-admin      61s
-
-# kubectl get clusterrolebinding clusterrole_cluster-admin_group0c96fad22880f32a3f84c009862af6f7 -oyaml
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  annotations:
-    CCE.com/IAM: "true"
-  creationTimestamp: "2021-06-23T09:15:22Z"
-  name: clusterrole_cluster-admin_group0c96fad22880f32a3f84c009862af6f7
-  resourceVersion: "36659058"
-  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/clusterrole_cluster-admin_group0c96fad22880f32a3f84c009862af6f7
-  uid: d6cd43e9-b4ca-4b56-bc52-e36346fc1320
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: cluster-admin
-subjects:
-- apiGroup: rbac.authorization.k8s.io
-  kind: Group
-  name: 0c96fad22880f32a3f84c009862af6f7
-

Connect to the cluster as an authorized user. If the PVs and StorageClasses can be queried, the permission configuration takes effect.

-
# kubectl get pv
-No resources found
-# kubectl get sc
-NAME                PROVISIONER                     RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
-csi-disk            everest-csi-provisioner         Delete          Immediate              true                   75d
-csi-disk-topology   everest-csi-provisioner         Delete          WaitForFirstConsumer   true                   75d
-csi-nas             everest-csi-provisioner         Delete          Immediate              true                   75d
-csi-obs             everest-csi-provisioner         Delete          Immediate              false                  75d
-csi-sfsturbo        everest-csi-provisioner         Delete          Immediate              true                   75d
-
-

Example: Assigning All Namespace Permissions (admin)

The admin role contains all permissions on a namespace. You can assign permissions to users to access one or multiple namespaces.

-

In the following example kubectl output, a RoleBinding has been created, the admin role is bound to the user group cce-role-group, and the target namespace is the default namespace.

-
# kubectl get rolebinding
-NAME                                                      ROLE                AGE
-clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7   ClusterRole/admin   18s
-# kubectl get rolebinding clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7 -oyaml
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  annotations:
-    CCE.com/IAM: "true"
-  creationTimestamp: "2021-06-24T01:30:08Z"
-  name: clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7
-  namespace: default
-  resourceVersion: "36963685"
-  selfLink: /apis/rbac.authorization.k8s.io/v1/namespaces/default/rolebindings/clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7
-  uid: 6c6f46a6-8584-47da-83f5-9eef1f7b75d6
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: admin
-subjects:
-- apiGroup: rbac.authorization.k8s.io
-  kind: Group
-  name: 0c96fad22880f32a3f84c009862af6f7
-

Connect to a cluster as an authorized user. In this example, you can create and query resources in the default namespace, but cannot query resources in the kube-system namespace or cluster resources.

-
# kubectl get pod
-NAME                    READY   STATUS    RESTARTS   AGE
-test-568d96f4f8-brdrp   1/1     Running   0          33m
-test-568d96f4f8-cgjqp   1/1     Running   0          33m
-# kubectl get pod -nkube-system
-Error from server (Forbidden): pods is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "pods" in API group "" in the namespace "kube-system"
-# kubectl get pv
-Error from server (Forbidden): persistentvolumes is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "persistentvolumes" in API group "" at the cluster scope
-
-

Example: Assigning Read-Only Namespace Permissions (view)

The view role has the read-only permissions on a namespace. You can assign permissions to users to view one or multiple namespaces.

-

In the following example kubectl output, a RoleBinding has been created, the view role is bound to the user group cce-role-group, and the target namespace is the default namespace.

-
# kubectl get rolebinding
-NAME                                                     ROLE               AGE
-clusterrole_view_group0c96fad22880f32a3f84c009862af6f7   ClusterRole/view   7s
-
-# kubectl get rolebinding clusterrole_view_group0c96fad22880f32a3f84c009862af6f7 -oyaml
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  annotations:
-    CCE.com/IAM: "true"
-  creationTimestamp: "2021-06-24T01:36:53Z"
-  name: clusterrole_view_group0c96fad22880f32a3f84c009862af6f7
-  namespace: default
-  resourceVersion: "36965800"
-  selfLink: /apis/rbac.authorization.k8s.io/v1/namespaces/default/rolebindings/clusterrole_view_group0c96fad22880f32a3f84c009862af6f7
-  uid: b86e2507-e735-494c-be55-c41a0c4ef0dd
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: view
-subjects:
-- apiGroup: rbac.authorization.k8s.io
-  kind: Group
-  name: 0c96fad22880f32a3f84c009862af6f7
-

Connect to the cluster as an authorized user. In this example, you can query resources in the default namespace but cannot create resources.

-
# kubectl get pod
-NAME                    READY   STATUS    RESTARTS   AGE
-test-568d96f4f8-brdrp   1/1     Running   0          40m
-test-568d96f4f8-cgjqp   1/1     Running   0          40m
-# kubectl run -i --tty --image tutum/dnsutils dnsutils --restart=Never --rm /bin/sh
-Error from server (Forbidden): pods is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot create resource "pods" in API group "" in the namespace "default"
-
-

Example: Assigning Permissions for a Specific Kubernetes Resource Object

You can assign permissions on a specific Kubernetes resource object, such as pod, Deployment, and Service. For details, see Using kubectl to Configure Namespace Permissions.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0191.html b/docs/cce/umn/cce_01_0191.html deleted file mode 100644 index f22ae1e0..00000000 --- a/docs/cce/umn/cce_01_0191.html +++ /dev/null @@ -1,17 +0,0 @@ - - -

Overview

-

CCE uses Helm, a Kubernetes package manager, to simplify deployment and management of packages (also called charts). A chart is a collection of files that describe a related set of Kubernetes resources. The use of charts handles all the complexity in Kubernetes resource installation and management, making it possible to achieve unified resource scheduling and management.

-

Helm is a tool for packaging Kubernetes applications. For more information, see Helm documentation.

-
-

Custom charts simplify workload deployment.

-

This section describes how to create a workload using a custom chart. You can use multiple methods to create an orchestration chart on the CCE console.

-

Notes and Constraints

  • The number of charts that can be uploaded by a single user is limited. The value displayed on the console of each region is the allowed quantity.
  • CCE uses Helm v2.12. If you use Helm v3 or later to manage CCE, compatibility problems may occur.
  • A chart with multiple versions consumes the same amount of portion of chart quota.
  • Users with chart operation permissions can perform multiple operations on clusters. Therefore, exercise caution when assigning users the chart lifecycle management permissions, including uploading charts and creating, deleting, and updating chart releases.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0197.html b/docs/cce/umn/cce_01_0197.html deleted file mode 100644 index b3ed445e..00000000 --- a/docs/cce/umn/cce_01_0197.html +++ /dev/null @@ -1,200 +0,0 @@ - - -

Overview

-

To enable interoperability from one Kubernetes installation to the next, you must upgrade your Kubernetes clusters before the maintenance period ends.

-

After the latest Kubernetes version is available in CCE, CCE will describe the changes in this version.

-

You can use the CCE console to upgrade the Kubernetes version of a cluster.

-

An upgrade flag will be displayed on the cluster card view if there is a new version for the cluster to upgrade.

-

How to check:

-

Choose Resource Management > Clusters and check whether there is an upgrade flag in the upper right corner of the cluster card view. If yes, the cluster can be upgraded.

-
Figure 1 Cluster with the upgrade flag
-

Cluster Upgrade

The following table describes the target version to which each cluster version can be upgraded, the supported upgrade modes, and upgrade impacts.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Cluster upgrade paths and impacts

Source Version

-

Target Version

-

Upgrade Modes

-

Impacts

-

v1.21

-

v1.23

-

In-place upgrade

-

You need to identify the differences between versions.

-

v1.19

-

v1.21

-

In-place upgrade

-

You need to identify the differences between versions.

-

v1.17

-

v1.15

-

v1.19

-

In-place upgrade

-

You need to identify the differences between versions.

-

v1.13

-

v1.15

-

Rolling upgrade

-

Replace upgrade

-
  • The proxy configuration item in the coredns add-on configuration is not supported and needs to be replaced with forward.
  • The storage add-on is changed from storage-driver to everest.
-

v1.11

-

v1.9

-

1.15

-

Replace upgrade

-
  • The cluster signature certificate mechanism is changed. As a result, the original cluster certificate becomes invalid. You need to obtain the certificate or kubeconfig file again after the cluster is upgraded.
  • RBAC is enabled for clusters of Kubernetes v1.13 by default. Applications need to adapt to RBAC.
  • After the cluster is upgraded from v1.9 to v1.15, kube-dns in the cluster will be replaced with CoreDNS. Before the upgrade, you need to back up the kube-dns configuration. After the upgrade, you need to reconfigure kube-dns in the coredns add-on.
-

v1.9

-

v1.7

-

Latest version that can be created on the console

-

Migration

-

You need to identify the differences between versions.

-
-
-
-

Upgrade Modes

CCE provides the following upgrade modes based on the cluster version and deployment site. The upgrade processes are the same for master nodes. The differences between the upgrade modes of worker nodes are described as follows:

- -
- - - - - - - - - - - - - - - - - - - - - -
Table 2 Differences between upgrade modes and their advantages and disadvantages

Upgrade Mode

-

Method

-

Advantage

-

Disadvantage

-

In-place upgrade

-

Kubernetes components, network components, and CCE management components are upgraded on the node. During the upgrade, service pods and networks are not affected. The SchedulingDisabled label will be added to all existing nodes. After the upgrade is complete, you can properly use existing nodes.

-

You do not need to migrate services, ensuring service continuity.

-

In-place upgrade does not upgrade the OS of a node. If you want to upgrade the OS, clear the corresponding node after the node upgrade is complete and reset the node to upgrade the OS to a new version.

-

Rolling upgrade

-

Only the Kubernetes components and certain network components are upgraded on the node. The SchedulingDisabled label will be added to all existing nodes to ensure that the running applications are not affected. After the upgrade is complete, you need to manually create nodes and gradually release the old nodes, thereby migrating your applications to the new nodes. In this mode, you can control the upgrade process.

-

Services are not interrupted.

-

-

-

Replace upgrade

-

The latest worker node image is used to reset the node OS.

-

This is the fastest upgrade mode and requires few manual interventions.

-

Data or configurations on the node will be lost, and services will be interrupted for a period of time.

-
-
-
-

Cluster Upgrade Between Major Versions

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 3 Changelog between minor versions

Source Version

-

Target Version

-

Description

-

v1.21

-

v1.23

-
-

v1.19

-

v1.21

-
-

v1.17

-

v1.19

-
-

v1.15

-

v1.17

-
-

v1.13

-

v1.15

-
-

v1.11

-

v1.9

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0200.html b/docs/cce/umn/cce_01_0200.html deleted file mode 100644 index 8926fd6b..00000000 --- a/docs/cce/umn/cce_01_0200.html +++ /dev/null @@ -1,58 +0,0 @@ - - -

Creating a Linux LVM Disk Partition for Docker

-

Scenario

This section describes how to check whether there are available raw disks and Linux LVM disk partitions and how to create Linux LVM disk partitions.

-
-

Prerequisites

To improve the system stability, attach a data disk to Docker and use the direct-lvm mode.

-
-

Procedure

  1. Check whether available raw disks exist on the current node.

    1. Log in to the target node as the root user.
    2. Check the raw disk device.

      lsblk -l | grep disk

      -

      If the following information is displayed, the raw disks named xvda and xvdb exist on the node.

      -
      xvda  202:0    0   40G  0 disk
      -xvdb  202:16   0  100G  0 disk
      -
    3. Check whether the raw disk is in use.

      lsblk /dev/<devicename>

      -

      devicename indicates the raw disk name, for example, xvda and xvdb in the previous step.

      -

      Run the lsblk /dev/xvda and lsblk /dev/xvdb commands. If the following information is displayed, xvda has been partitioned and used while xvdb is available. If no raw disk is available, bind an EVS disk to the node. It is advised that the disk space be no less than 80 GB.

      -
      NAME    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
      -xvda    202:0    0   40G  0 disk
      -├─xvda1 202:1    0  100M  0 part /boot
      -└─xvda2 202:2    0 39.9G  0 part /
      -
      NAME MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
      -xvdb 202:16   0  100G  0 disk
      -
    -

  2. Check whether there are partitions available. Currently, only Linux LVM partitions are supported.

    1. Log in to the target node as the root user.
    2. Check the partition whose system type is Linux LVM.

      sfdisk -l 2>>/dev/null| grep "Linux LVM"

      -

      If the following information is displayed, two Linux LVM partitions, /dev/nvme0n1p1 and /dev/nvme0n1p2, exist in the system.

      -
      /dev/nvme0n1p1          1  204800  204800  209715200   8e  Linux LVM
      -/dev/nvme0n1p2     204801  409600  204800  209715200   8e  Linux LVM
      -
    3. Check whether the partition is in use.

      lsblk <partdevice>

      -

      <partdevice> is the Linux LVM partition found in the previous step.

      -

      In this example, run the lsblk/dev/nvme0n1p1 and lsblk/dev/nvme0n1p2 commands. If the following information is displayed, partition nvme0n1p is in use while nvme0n1p2 is available.

      -
      NAME                       MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
      -nvme0n1p1                   259:3    0  200G  0 part
      -└─vgpaas-thinpool_tdata   251:8    0  360G  0 lvm
      -  └─vgpaas-thinpool       251:10   0  360G  0 lvm
      -
      NAME      MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
      -nvme0n1p2 259:1    0  100G  0 part
      -

      If no AZ is available, perform 3 to create a partition for Docker.

      -
    -

  3. Create a Linux LVM disk partition for Docker.

    1. Run the following command to create a disk partition. devicename indicates the available raw disk name, for example, xvdb in 1.

      fdisk /dev/devicename

      -
    2. Enter n to create a new partition. Enter p to display the primary partition number. Enter 4 to indicate the fourth primary partition.
      Figure 1 Creating a partition
      -
    3. Configure the start and last sectors as follows for example:
      Start sector (1048578048-4294967295, 1048578048 by default):
      -1048578048
      -Last sector, +sector or size {K, M, or G} (1048578048-4294967294, 4294967294 by default): +100G
      -

      This configuration indicates that partition 4 has been set to the Linux type and the size is 100 GiB.

      -
    4. Enter t to change the partition system type. Enter the hex code 8e when prompted to change the system type to Linux LVM.
      Command (enter m to obtain help): t
      -Partition ID (ranging from 1 to 4, 4 by default): 4
      -Hex code (enter L to list all codes): 8e
      -This configuration changes the type of the partition Linux to Linux LVM.
      -
    5. Enter w to save the modification.
      Command (enter m to obtain help): w
      -The partition table has been altered!
      -
    6. Run the partprobe command to refresh the disk partition.
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0205.html b/docs/cce/umn/cce_01_0205.html deleted file mode 100644 index 7db26d83..00000000 --- a/docs/cce/umn/cce_01_0205.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

metrics-server

-

From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly accessed by users (for example, by using the kubectl top command) or used by controllers (for example, Horizontal Pod Autoscaler) in a cluster for decision-making. The specific component is metrics-server, which is used to substitute for heapster for providing the similar functions. heapster has been gradually abandoned since v1.11.

-

metrics-server is an aggregator for monitoring data of core cluster resources. You can quickly install this add-on on the CCE console.

-

After metrics-server is installed, you can create an HPA policy on the Workload Scaling tab page of the Auto Scaling page. For details, see Creating an HPA Policy for Workload Auto Scaling.

-

The official community project and documentation are available at https://github.com/kubernetes-sigs/metrics-server.

-

Notes and Constraints

This add-on can be installed only in CCE clusters of v1.13 or later.

-
-

Installing the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Marketplace tab page, click Install Add-on under metrics-server.
  2. On the Install Add-on page, select the cluster and the add-on version, and click Next: Configuration.
  3. Select Single or HA for Add-on Specifications, and click Install.

    After the add-on is installed, click Go Back to Previous Page. On the Add-on Instance tab page, select the corresponding cluster to view the running instance. This indicates that the add-on has been installed on each node in the cluster.

    -

-
-

Upgrading the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Upgrade under metrics-server.

    • If the Upgrade button is not available, the current add-on is already up-to-date and no upgrade is required.
    • During the upgrade, the metrics-server add-on of the original version on cluster nodes will be discarded, and the add-on of the target version will be installed.
    -
    -

  2. On the Basic Information page, select the add-on version and click Next.
  3. Set the parameters by referring to the parameter description in Installing the Add-on and click Upgrade.
-
-

Uninstalling the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Uninstall under metrics-server.
  2. In the dialog box displayed, click Yes to uninstall the add-on.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0207.html b/docs/cce/umn/cce_01_0207.html deleted file mode 100644 index a66fda91..00000000 --- a/docs/cce/umn/cce_01_0207.html +++ /dev/null @@ -1,15 +0,0 @@ - - -

Auto Scaling

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0208.html b/docs/cce/umn/cce_01_0208.html deleted file mode 100644 index fc05f43b..00000000 --- a/docs/cce/umn/cce_01_0208.html +++ /dev/null @@ -1,78 +0,0 @@ - - -

Creating an HPA Policy for Workload Auto Scaling

-

Horizontal Pod Autoscaling (HPA) in Kubernetes implements horizontal scaling of pods. In a CCE HPA policy, you can configure different cooldown time windows and scaling thresholds for different applications based on the Kubernetes HPA.

-

Prerequisites

The metrics-server add-on has been installed. This add-on collects public metrics of kubelet in Kubernetes clusters, including the CPU usage and memory usage.

-
-

Notes and Constraints

  • HPA policies can be created only for clusters of v1.13 or later.
  • Only one policy can be created for each workload. That is, if you have created an HPA policy, you cannot create other HPA policies for the workload. You can delete the created HPA policy and create a new one.
  • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

    For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volume mounted, a new pod cannot be started because EVS disks cannot be attached.

    -
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Workload Scaling tab page, click Create HPA Policy.
  2. In the Check Add-ons step:

    • If is displayed next to the add-on name, click Install, set add-on parameters as required, and click Install to install the add-on.
    • If is displayed next to the add-on name, the add-on has been installed.
    -

  3. After the required add-ons have been installed, click Next: Policy configuration.

    If the add-ons have been installed, after you click Create HPA Policy, you will directly land on the second step to configure the policy. The first step (checking the add-ons) has been completed almost instantly.

    -
    -

  4. Set policy parameters by referring to Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 HPA policy parameters

    Parameter

    -

    Description

    -

    Policy Name

    -

    Name of the policy to be created. Set this parameter as required.

    -

    Cluster Name

    -

    Cluster to which the workload belongs.

    -

    Namespace

    -

    Namespace to which the workload belongs.

    -

    Associated Workload

    -

    Workload with which the HPA policy is associated.

    -

    Pod Range

    -

    Minimum and maximum numbers of pods.

    -

    When a policy is triggered, the workload pods are scaled within this range.

    -

    Cooldown Period

    -

    Interval between a scale-in and a scale-out. The unit is minute. The interval cannot be shorter than 1 minute.

    -

    This parameter is available only for clusters of v1.15 and later. It is not supported in clusters of v1.13 or earlier.

    -

    This parameter indicates the interval between consecutive scaling operations. The cooldown period ensures that a scaling operation is initiated only when the previous one is completed and the system is running stably.

    -

    Rules

    -

    Policy rules can be based on system metrics.

    -

    System metrics

    -
    • Metric: You can select CPU usage or Memory usage.
      NOTE:

      Usage = CPUs or memory used by pods/Requested CPUs or memory.

      -
      -
    • Expected Value: Enter the expected average resource usage.

      This parameter indicates the expected value of the selected metric. The number of new pods required (rounded up) = Current metric value/Expected value x Number of current pods

      -
    • Threshold: Enter the scaling thresholds.

      If the metric value is greater than the scale-in threshold and less than the scale-out threshold, no scaling is triggered. This parameter is supported only in clusters of v1.15 or later.

      -
    -

    You can click Add Rule again to add more scaling policies.

    -
    NOTE:

    When calculating the number of pods to be added or reduced, the HPA policy uses the maximum metrics values in the last 5 minutes.

    -
    -
    -
    -

  5. After the configuration is complete, click Create. If the system displays a message indicating that the request to create workload policy *** is successfully submitted, click Back to Workload Scaling.
  6. On the Workload Scaling tab page, you can view the newly created HPA policy.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0209.html b/docs/cce/umn/cce_01_0209.html deleted file mode 100644 index d7c4ce4e..00000000 --- a/docs/cce/umn/cce_01_0209.html +++ /dev/null @@ -1,207 +0,0 @@ - - -

Creating a Node Scaling Policy

-

CCE provides auto scaling through the autoscaler add-on. Nodes with different specifications can be automatically added across AZs on demand.

-

If a node scaling policy and the configuration in the autoscaler add-on take effect at the same time, for example, there are pods that cannot be scheduled and the value of a metric reaches the threshold at the same time, scale-out is performed first for the unschedulable pods.

-
  • If the scale-out succeeds for the unschedulable pods, the system skips the metric-based rule logic and enters the next loop.
  • If the scale-out fails for the unschedulable pods, the metric-based rule is executed.
-

Prerequisites

Before using the node scaling function, you must install the autoscaler add-on of v1.13.8 or later.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Node Scaling tab page, click Create Node Scaling Policy.
  2. In the Check Add-ons step:

    • If is displayed next to the add-on name, click Install, set add-on parameters as required, and click Install to install the add-on.
    • If is displayed next to the add-on name, the add-on has been installed.
    -

  3. After the required add-ons have been installed, click Next: Policy configuration.

    If the add-ons have been installed, after you click Create Node Scaling Policy, you will directly land on the second step to configure the policy. The first step (checking the add-ons) has been completed almost instantly.

    -
    -

  4. On the Create Node Scaling Policy page, set the following policy parameters.

    • Policy Name: name of the policy to be created, which can be customized.
    • Associated Node Pool: Click Add Node Pool and select the node pool to be associated. You can associate multiple node pools to use the same scaling policy.

      Priority is now supported for node pools. CCE will select a node pool for auto scaling based on the following policies:

      -
      1. CCE uses algorithms to determine whether a node pool meets the conditions to allow scheduling of a pod in pending state, including whether the node resources are greater than requested by the pod, and whether the nodeSelect, nodeAffinity, and taints meet the conditions. In addition, the node pools that fail to be scaled (due to insufficient resources or other reasons) and are still in the 15-minute cool-down interval are filtered.
      2. If multiple node pools meet the scaling requirements, the system checks the priority of each node pool and selects the node pool with the highest priority for scaling. The value ranges from 0 to 100 and the default priority is 0. The value 100 indicates the highest priority, and the value 0 indicates the lowest priority.
      3. If multiple node pools have the same priority or no priority is configured for them, the system selects the node pool that will consume the least resources based on the configured VM specification.
      4. If the VM specifications of multiple node pools are the same but the node pools are deployed in different AZs, the system randomly selects a node pool to trigger scaling.
      5. If the resources of the preferred node pool are insufficient, the system automatically selects next node pool based on the priority.
      -

      For details about the node pool priority, see Autoscaler.

      -
      -
    • Execution Rules: Click Add Rule. In the dialog box displayed, set the following parameters:

      Name: Enter a rule name.

      -

      Type: You can select Metric-based or Periodic. The differences between the two types are as follows:

      -
      • Metric-based:
        • Condition: Select CPU allocation or Memory allocation and enter a value. The value must be greater than the scale-in percentage configured in the autoscaler add-on.
          • Resource allocation (%) = Resources requested by pods in the node pool/Resources allocatable to pods in the node pool
          • If multiple rules meet the conditions, the rules are executed in either of the following modes:

            If rules based on the CPU allocation rate and memory allocation rate are configured and two or more rules meet the scale-out conditions, the rule that will add the most nodes will be executed.

            -

            If a rule based on the CPU allocation rate and a periodic rule are configured and they both meet the scale-out conditions, one of them will be executed randomly. The rule executed first (rule A) changes the node pool to the scaling state. As a result, the other rule (rule B) cannot be executed. After rule A is executed and the node pool status becomes normal, rule B will not be executed.

            -
          • If rules based on the CPU allocation rate and memory allocation rate are configured, the policy detection period varies with the processing logic of each loop of the autoscaler add-on. Scale-out is triggered once the conditions are met, but it is constrained by other factors such as the cool-down interval and node pool status.
          -
          -
        • Action: Set an action to be performed when the trigger condition is met.
        -
      • Periodic:
        • Triggered At: You can select a specific time point every day, every week, every month, or every year.
        • Action: Set an action to be performed when the Triggered At value is reached.
        -
      -

      You can click Add Rule again to add more node scaling policies. You can add a maximum of one CPU usage-based rule and one memory usage-based rule. The total number of rules cannot exceed 10.

      -
    -

  5. After the configuration is complete, click Create. If the system displays a message indicating that the request to create a node scaling policy is submitted successfully, click Back to Node Scaling Policy List.
  6. On the Node Scaling tab page, you can view the created node scaling policy.
-
-

Constraints on Scale-in

CCE cannot trigger scale-in by using node scaling policies. You can set a scale-in policy when installing the autoscaler add-on.

-

Node scale-in can be triggered only by the resource allocation rate. When CPU and memory allocation rates in a cluster are lower than the specified thresholds (set when the autoscaler add-on is installed or modified), scale-in is triggered for nodes in the node pool (this function can be disabled).

-
-

Example YAML File

The following is a YAML example of a node scaling policy:

-
apiVersion: autoscaling.cce.io/v1alpha1
-kind: HorizontalNodeAutoscaler
-metadata:
-  creationTimestamp: "2020-02-13T12:47:49Z"
-  generation: 1
-  name: xxxx
-  namespace: kube-system
-  resourceVersion: "11433270"
-  selfLink: /apis/autoscaling.cce.io/v1alpha1/namespaces/kube-system/horizontalnodeautoscalers/xxxx
-  uid: c2bd1e1d-60aa-47b5-938c-6bf3fadbe91f
-spec:
-  disable: false
-  rules:
-  - action:
-      type: ScaleUp
-      unit: Node
-      value: 1
-    cronTrigger:
-      schedule: 47 20 * * *
-    disable: false
-    ruleName: cronrule
-    type: Cron
-  - action:
-      type: ScaleUp
-      unit: Node
-      value: 2
-    disable: false
-    metricTrigger:
-      metricName: Cpu
-      metricOperation: '>'
-      metricValue: "40"
-      unit: Percent
-    ruleName: metricrule
-    type: Metric
-  targetNodepoolIds:
-  - 7d48eca7-3419-11ea-bc29-0255ac1001a8
-
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Key parameters

Parameter

-

Type

-

Description

-

spec.disable

-

Bool

-

Whether to enable the scaling policy. This parameter takes effect for all rules in the policy.

-

spec.rules

-

Array

-

All rules in a scaling policy.

-

spec.rules[x].ruleName

-

String

-

Rule name.

-

spec.rules[x].type

-

String

-

Rule type. Currently, Cron and Metric are supported.

-

spec.rules[x].disable

-

Bool

-

Rule switch. Currently, only false is supported.

-

spec.rules[x].action.type

-

String

-

Rule action type. Currently, only ScaleUp is supported.

-

spec.rules[x].action.unit

-

String

-

Rule action unit. Currently, only Node is supported.

-

spec.rules[x].action.value

-

Integer

-

Rule action value.

-

spec.rules[x].cronTrigger

-

/

-

Optional. This parameter is valid only in periodic rules.

-

spec.rules[x].cronTrigger.schedule

-

String

-

Cron expression of a periodic rule.

-

spec.rules[x].metricTrigger

-

/

-

Optional. This parameter is valid only in metric-based rules.

-

spec.rules[x].metricTrigger.metricName

-

String

-

Metric of a metric-based rule. Currently, Cpu and Memory are supported.

-

spec.rules[x].metricTrigger.metricOperation

-

String

-

Comparison operator of a metric-based rule. Currently, only > is supported.

-

spec.rules[x].metricTrigger.metricValue

-

String

-

Metric threshold of a metric-based rule. The value can be any integer from 1 to 100 and must be a character string.

-

spec.rules[x].metricTrigger.Unit

-

String

-

Unit of the metric-based rule threshold. Currently, only % is supported.

-

spec.targetNodepoolIds

-

Array

-

All node pools associated with the scaling policy.

-

spec.targetNodepoolIds[x]

-

String

-

ID of the node pool associated with the scaling policy.

-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0210.html b/docs/cce/umn/cce_01_0210.html deleted file mode 100644 index b5cfa4ec..00000000 --- a/docs/cce/umn/cce_01_0210.html +++ /dev/null @@ -1,60 +0,0 @@ - - -

Migrating Services Across Clusters of Different Versions

-

Application Scenarios

This section describes how to migrate services from a cluster of an earlier version to a cluster of a later version in CCE.

-

This operation is applicable when a cross-version cluster upgrade is required (for example, upgrade from v1.7.* or v1.9.* to 1.17.*) and new clusters can be created for service migration.

-
-

Prerequisites

-
- - - - - - - - - - - - - - - - - - - -
Table 1 Checklist before migration

Category

-

Description

-

Cluster

-

NodeIP-related: Check whether node IP addresses (including EIPs) of the cluster before the migration have been used in other configurations or whitelists.

-

Workloads

-

Record the number of workloads for post-migration check.

-

Storage

-
  1. Check whether the storage resources in use are provisioned by the cloud or by your organization.
  2. Change the automatically created storage to the existing storage in the new cluster.
-

Network

-
  1. Pay special attention to the ELB and ingress.
  2. Clusters of an earlier version support only the classic load balancer. To migrate services to a new cluster, you need to change load balancer type to shared load balancer. Then, the corresponding ELB service will be re-established.
-

O&M

-

Private configuration: Check whether kernel parameters or system data have been configured on nodes in the cluster.

-
-
-
-

Procedure

  1. Create a CCE cluster.

    Create a cluster with the same specifications and configurations as the cluster of the earlier version. For details, see Creating a CCE Cluster.

    -

  2. Add a node.

    Add nodes with the same specifications and manual configuration items. For details, see Creating a Node.

    -

  3. Create a storage volume in the new cluster.

    Use an existing storage volume to create a PVC in the new cluster. The PVC name remains unchanged. For details, see PersistentVolumeClaims (PVCs).

    -

    Storage switching supports only OBS buckets, SFS file systems, and shared EVS disks. If a non-shared EVS disk is used, you need to suspend the workloads in the old cluster to switch the storage resources. As a result, services will be interrupted.

    -
    -

  4. Create a workload in the new cluster.

    The workload name and specifications remain unchanged. For details about how to create a workload, see Creating a Deployment or Creating a StatefulSet. For details about how to mount a storage volume to the workload, see Creating a Pod Mounted with an EVS Volume.

    -

  5. Create a Service in the new cluster.

    The Service name and specifications remain unchanged. For details about how to create a Service, see Services.

    -

  6. Commission services.

    After all resources are created, commission the containerized services. If the commissioning is successful, migrate the services to the new cluster.

    -

  7. Delete the old cluster.

    When all functions of the new cluster are stable, delete the old cluster. For details about how to delete a cluster, see Deleting a Cluster.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0211.html b/docs/cce/umn/cce_01_0211.html deleted file mode 100644 index ea44b5ae..00000000 --- a/docs/cce/umn/cce_01_0211.html +++ /dev/null @@ -1,67 +0,0 @@ - - -

Snapshots and Backups

-

CCE works with EVS to support snapshots. A snapshot is a complete copy or image of EVS disk data at a certain point of time, which can be used for data DR.

-

You can create snapshots to rapidly save the disk data at specified time points. In addition, you can use snapshots to create new disks so that the created disks will contain the snapshot data in the beginning.

-

Precautions

  • The snapshot function is available only for clusters of v1.15 or later and requires the CSI-based everest add-on.
  • The subtype (common I/O, high I/O, or ultra-high I/O), disk mode (SCSI or VBD), data encryption, sharing status, and capacity of an EVS disk created from a snapshot must be the same as those of the disk associated with the snapshot. These attributes cannot be modified after being queried or set.
  • Snapshots can be created only for available or in-use CSI disks. During the free trial, you can create up to 7 snapshots per disk.
  • Snapshot data of encrypted disks is stored encrypted, and that of non-encrypted disks is stored non-encrypted.
-
-

Application Scenario

The snapshot feature helps address your following needs:

-
  • Routine data backup

    You can create snapshots for EVS disks regularly and use snapshots to recover your data in case that data loss or data inconsistency occurred due to misoperations, viruses, or attacks.

    -
  • Rapid data restoration

    You can create a snapshot or multiple snapshots before an OS change, application software upgrade, or a service data migration. If an exception occurs during the upgrade or migration, service data can be rapidly restored to the time point when the snapshot was created.

    -
    For example, a fault occurred on system disk A of ECS A, and therefore ECS A cannot be started. Because system disk A is already faulty, the data on system disk A cannot be restored by rolling back snapshots. In this case, you can use an existing snapshot of system disk A to create EVS disk B and attach it to ECS B that is running properly. Then, ECS B can read data from system disk A using EVS disk B.

    The snapshot capability provided by CCE is the same as the CSI snapshot function provided by the Kubernetes community. EVS disks can be created only based on snapshots, and snapshots cannot be rolled back to source EVS disks.

    -
    -
    -
  • Rapid deployment of multiple services

    You can use a snapshot to create multiple EVS disks containing the same initial data, and these disks can be used as data resources for various services, for example, data mining, report query, and development and testing. This method protects the initial data and creates disks rapidly, meeting the diversified service data requirements.

    -
-
-

Creating a Snapshot

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Go to the cluster details page, choose Storage from the navigation pane, and click the Snapshots and Backups tab.
  3. Click Create Snapshot in the upper right corner. In the dialog box displayed, set related parameters.

    • Snapshot Name: Enter a snapshot name.
    • Storage: Select the PVC for which you want to create a snapshot.
    -

  4. Click Create.
-

Using YAML

-
kind: VolumeSnapshot
-apiVersion: snapshot.storage.k8s.io/v1beta1
-metadata:
-  finalizers:
-    - snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
-    - snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
-  name: cce-disksnap-test
-  namespace: default
-spec:
-  source:
-    persistentVolumeClaimName: pvc-evs-test     # PVC name. Only an EVS PVC can be created.
-  volumeSnapshotClassName: csi-disk-snapclass
-
-

Using a Snapshot to Creating a PVC

The disk type, encryption setting, and disk mode of the created EVS PVC are consistent with those of the snapshot's source EVS disk.

-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Go to the cluster details page, choose Storage from the navigation pane, and click the PersistentVolumeClaims (PVCs) tab.
  3. Click Create PVC in the upper right corner. In the dialog box displayed, set the PVC parameters.

    • Creation Mode: Select Snapshot.
    • PVC Name: name of a PVC.
    • Snapshot: Select the snapshot to be used.
    -

  4. Click Create.
-

Using YAML

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-test
-  namespace: default
-  annotations:
-    everest.io/disk-volume-type: SSD     # EVS disk type, which must be the same as that of the source EVS disk of the snapshot.
-  labels:
-    failure-domain.beta.kubernetes.io/region: eu-de
-    failure-domain.beta.kubernetes.io/zone: eu-de-01
-spec:
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: '10'
-  storageClassName: csi-disk
-  dataSource:
-    name: cce-disksnap-test             # Snapshot name
-    kind: VolumeSnapshot
-    apiGroup: snapshot.storage.k8s.io
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0212.html b/docs/cce/umn/cce_01_0212.html deleted file mode 100644 index 19db47f2..00000000 --- a/docs/cce/umn/cce_01_0212.html +++ /dev/null @@ -1,20 +0,0 @@ - - -

Deleting a Cluster

-

Scenario

This section describes how to delete a cluster.

-
-

Precautions

  • Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workloads, and Services. Related services cannot be restored. Before performing this operation, ensure that data has been backed up or migrated. Deleted data cannot be restored.
    Resources that are not created in CCE will not be deleted:
    • Accepted nodes (only the nodes created in CCE are deleted);
    • ELB load balancers associated with Services and ingresses (only the automatically created load balancers are deleted);
    • Manually created cloud storage resources associated with PVs or imported cloud storage resources (only the cloud storage resources automatically created by PVCs are deleted)
    -
    -
  • A hibernated cluster cannot be deleted. Wake up the cluster and try again.
  • If a cluster whose status is Unavailable is deleted, some storage resources of the cluster may need to be manually deleted.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters.
  2. Choose More > Delete.
  3. Delete the cluster.

    Figure 1 Deleting a cluster
    -

  4. Click Yes to start deleting the cluster.

    The delete operation takes 1 to 3 minutes to complete.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0213.html b/docs/cce/umn/cce_01_0213.html deleted file mode 100644 index 90563979..00000000 --- a/docs/cce/umn/cce_01_0213.html +++ /dev/null @@ -1,196 +0,0 @@ - - -

Configuring Kubernetes Parameters

-

Scenario

CCE clusters allow you to manage Kubernetes parameters, through which you can let core components work under your very requirements.

-
-

Notes and Constraints

This function is supported only in clusters of v1.15 and later. It is not displayed for versions earlier than v1.15.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters.
  2. Choose More > Configuration.
  3. On the Configuration page on the right, change the values of the following Kubernetes parameters:

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Kubernetes parameters

    Component

    -

    Parameter

    -

    Description

    -

    Value

    -

    kube-apiserver

    -

    default-not-ready-toleration-seconds

    -

    notReady tolerance time, in seconds. NoExecute that is added by default to every pod that does not already have such a toleration.

    -

    Default: 300

    -

    default-unreachable-toleration-seconds

    -

    unreachable tolerance time, in seconds. NoExecute that is added by default to every pod that does not already have such a toleration.

    -

    Default: 300

    -

    max-mutating-requests-inflight

    -

    Maximum number of concurrent mutating requests. When the value of this parameter is exceeded, the server rejects requests.

    -

    The value 0 indicates no limitation.

    -

    Manual configuration is no longer supported since cluster version 1.21. The value is automatically specified based on the cluster scale.

    -
    • 200 for clusters with 50 or 200 nodes
    • 500 for clusters with 1000 nodes
    • 1000 for clusters with 2000 nodes
    -

    Default: 1000

    -

    max-requests-inflight

    -

    Maximum number of concurrent non-mutating requests. When the value of this parameter is exceeded, the server rejects requests.

    -

    The value 0 indicates no limitation.

    -

    Manual configuration is no longer supported since cluster version 1.21. The value is automatically specified based on the cluster scale.

    -
    • 400 for clusters with 50 or 200 nodes
    • 1000 for clusters with 1000 nodes
    • 2000 for clusters with 2000 nodes
    -

    Default: 2000

    -

    service-node-port-range

    -

    Range of node port numbers.

    -

    Default:

    -

    30000-32767

    -

    Options:

    -

    min>20105

    -

    max<32768

    -

    kube-controller-manager

    -

    -

    concurrent-deployment-syncs

    -

    Number of Deployments that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    concurrent-endpoint-syncs

    -

    Number of endpoints that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    concurrent-gc-syncs

    -

    Number of garbage collector workers that are allowed to synchronize concurrently.

    -

    Default: 20

    -

    concurrent-job-syncs

    -

    Number of jobs that can be synchronized at the same time.

    -

    Default: 5

    -

    concurrent-namespace-syncs

    -

    Number of namespaces that are allowed to synchronize concurrently.

    -

    Default: 10

    -

    concurrent-replicaset-syncs

    -

    Number of ReplicaSets that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    concurrent-resource-quota-syncs

    -

    Number of resource quotas that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    concurrent-service-syncs

    -

    Number of Services that are allowed to synchronize concurrently.

    -

    Default: 10

    -

    concurrent-serviceaccount-token-syncs

    -

    Number of service account tokens that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    concurrent-ttl-after-finished-syncs

    -

    Number of TTL-after-finished controller workers that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    concurrent_rc_syncs

    -

    Number of replication controllers that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    horizontal-pod-autoscaler-sync-period

    -

    How often HPA audits metrics in a cluster.

    -

    Default: 15 seconds

    -

    kube-api-qps

    -

    Query per second (QPS) to use while talking with kube-apiserver.

    -

    Default: 100

    -

    kube-api-burst

    -

    Burst to use while talking with kube-apiserver.

    -

    Default: 100

    -

    kube-scheduler

    -

    kube-api-qps

    -

    Query per second (QPS) to use while talking with kube-apiserver.

    -

    Default: 100

    -

    kube-api-burst

    -

    Burst to use while talking with kube-apiserver.

    -

    Default: 100

    -
    -
    -

  4. Click OK.
-
- -
-
- -
- diff --git a/docs/cce/umn/cce_01_0214.html b/docs/cce/umn/cce_01_0214.html deleted file mode 100644 index fa37a1e8..00000000 --- a/docs/cce/umn/cce_01_0214.html +++ /dev/null @@ -1,22 +0,0 @@ - - -

Hibernating and Waking Up a Cluster

-

Scenario

If you do not need to use a cluster temporarily, you are advised to hibernate the cluster to save cluster management costs.

-

After a cluster is hibernated, resources such as workloads cannot be created or managed in the cluster.

-

A hibernated cluster can be quickly woken up and used normally.

-
-

Hibernating a Cluster

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters.
  2. Choose More > Hibernate for the target cluster.
  3. In the dialog box displayed, check the precautions and click Yes. Wait until the cluster is hibernated.

    • After a cluster is hibernated, resources, such as worker nodes (ECSs), bound EIPs, and bandwidth, are still billed based on their own billing modes. To shut down nodes, select Stop all nodes in the cluster in the dialog box or see Stopping a Node.
    -
    -

  4. When the cluster status changes from Hibernating to Hibernation, the cluster is hibernated.
-
-

Waking Up a Cluster

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters.
  2. Choose More > Wake.
  3. In the dialog box displayed, click Yes and wait until the cluster is woken up.
  4. When the cluster status changes from Waking to Available, the cluster is woken up.

    After the cluster is woken up, billing will be resumed for the resources on the master node.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0215.html b/docs/cce/umn/cce_01_0215.html deleted file mode 100644 index b8a84ec8..00000000 --- a/docs/cce/umn/cce_01_0215.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

Upgrading a Cluster

-
-
- - - -
- diff --git a/docs/cce/umn/cce_01_0216.html b/docs/cce/umn/cce_01_0216.html deleted file mode 100644 index 4c551437..00000000 --- a/docs/cce/umn/cce_01_0216.html +++ /dev/null @@ -1,127 +0,0 @@ - - -

Creating a DaemonSet

-

Scenario

CCE provides deployment and management capabilities for multiple types of containers and supports features of container workloads, including creation, configuration, monitoring, scaling, upgrade, uninstall, service discovery, and load balancing.

-

DaemonSet ensures that only one pod runs on all or some nodes. When a node is added to a cluster, a new pod is also added for the node. When a node is removed from a cluster, the pod is also reclaimed. If a DaemonSet is deleted, all pods created by it will be deleted.

-

The typical application scenarios of a DaemonSet are as follows:

-
  • Run the cluster storage daemon, such as glusterd or Ceph, on each node.
  • Run the log collection daemon, such as Fluentd or Logstash, on each node.
  • Run the monitoring daemon, such as Prometheus Node Exporter, collectd, Datadog agent, New Relic agent, or Ganglia (gmond), on each node.
-

You can deploy a DaemonSet for each type of daemons on all nodes, or deploy multiple DaemonSets for the same type of daemons. In the second case, DaemonSets have different flags and different requirements on memory and CPU for different hardware types.

-
-

Prerequisites

You must have one cluster available before creating a DaemonSet. For details on how to create a cluster, see Creating a CCE Cluster.

- -
-

Procedure

  1. Log in to the CCE console.
  2. In the navigation pane on the left, choose Workloads > DaemonSets. Click Create DaemonSet in the upper right corner of the page. Set basic workload parameters as described in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - - - - -
    Table 1 Basic workload parameters

    Parameter

    -

    Description

    -

    * Workload Name

    -

    Name of the containerized workload to be created. The name must be unique.

    -

    Enter 4 to 63 characters starting with a letter and ending with a letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    * Cluster Name

    -

    Cluster to which the workload belongs.

    -

    * Namespace

    -

    In a single cluster, data in different namespaces is isolated from each other. This enables applications to share the services of the same cluster without interfering each other. If no namespace is set, the default namespace is used.

    -

    Time Zone Synchronization

    -

    If this parameter is enabled, the container and the node use the same time zone.

    -
    NOTICE:

    After time zone synchronization is enabled, disks of the hostPath type will be automatically added and listed in the Data Storage > Local Volume area. Do not modify or delete the disks.

    -
    -

    Description

    -

    Description of the workload.

    -
    -
    -

  3. Click Next: Add Container.

    1. Click Add Container and select the image to be deployed.
      • My Images: Create a workload using an image in the image repository you created.
      • Third-Party Images: Create a workload using an image from any third-party image repository. When you create a workload using a third-party image, ensure that the node where the workload is running can access public networks. For details on how to create a workload using a third-party image, see Using a Third-Party Image.
        • If your image repository does not require authentication, set Secret Authentication to No, enter an image pull address, and then click OK.
        • If your image repository must be authenticated (account and password), you need to create a secret and then use a third-party image. For details, see Using a Third-Party Image.
        -
      • Shared Images: Create a workload using an image shared by another tenant through the SWR service.
      -
    2. Configure basic image information.

      A workload is an abstract model of a group of pods. One pod can encapsulate one or more containers. You can click Add Container in the upper right corner to add multiple container images and set them separately.

      - -
      - - - - - - - - - - - - - - - - - - - -
      Table 2 Image parameters

      Parameter

      -

      Description

      -

      Image Name

      -

      Name of the image. You can click Change Image to update it.

      -

      *Image Version

      -

      Select the image tag to be deployed.

      -

      *Container Name

      -

      Name of the container. You can modify it.

      -

      Privileged Container

      -

      Programs in a privileged container have certain privileges.

      -

      If Privileged Container is On, the container is granted superuser permissions. For example, privileged containers can manipulate network devices on the host machine and modify kernel parameters.

      -

      Container Resources

      -

      CPU

      -
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      -

      Memory

      -
      • Request: minimum amount of memory required by a container. The default value is 512 MiB.
      • Limit: maximum amount of memory available for a container. When memory usage exceeds the specified memory limit, the container will be terminated.
      -

      For more information about Request and Limit, see Setting Container Specifications.

      -

      GPU: configurable only when the cluster contains GPU nodes.

      -

      It indicates the percentage of GPU resources reserved for a container. Select Use and set the percentage. For example, if this parameter is set to 10%, the container is allowed to use 10% of GPU resources. If you do not select Use or set this parameter to 0, no GPU resources can be used.

      -

      GPU/Graphics Card: The workload's pods will be scheduled to the node with the specified GPU.

      -

      If Any GPU type is selected, the container uses a random GPU in the node. If you select a specific GPU, the container uses this GPU accordingly.

      -
      -
      -
    3. Lifecycle: Commands for starting and running containers can be set. -
    4. Health Check: CCE provides two types of probes: liveness probe and readiness probe. They are used to determine whether containers and user services are running properly. For more information, see Setting Health Check for a Container.
      • Liveness Probe: used to restart the unhealthy container.
      • Readiness Probe: used to change the container to the unready state when detecting that the container is unhealthy. In this way, service traffic will not be directed to the container.
      -
    5. Environment Variables: Environment variables can be added to a container. In general, environment variables are used to set parameters.
      On the Environment Variables tab page, click Add Environment Variable. Currently, three types of environment variables are supported:
      • Added manually: Set Variable Name and Variable Value/Reference.
      • Added from Secret: Set Variable Name and select the desired secret name and data. A secret must be created in advance. For details, see Creating a Secret.
      • Added from ConfigMap: Set Variable Name and select the desired ConfigMap name and data. A ConfigMap must be created in advance. For details, see Creating a ConfigMap.

        To edit an environment variable that has been set, click Edit. To delete an environment variable that has been set, click Delete.

        -
        -
      -
      -
    6. Data Storage: Data storage can be mounted to containers for persistent storage and high disk I/O. Local volume and cloud storage are supported. For details, see Storage (CSI).

      Currently, cloud storage cannot be mounted to secure (Kata) containers in a CCE Turbo cluster.

      -
      -
    7. Security Context: Container permissions can be configured to protect CCE and other containers from being affected.

      Enter the user ID to set container permissions and prevent systems and other containers from being affected.

      -
    8. Log Policies: Log collection policies and log directory can be configured to collect container logs for unified management and analysis. For details, see Container Logs.
    -

  4. Click Next: Set Application Access. Then, click Add Service and set the workload access type.

    If your workload will be reachable to other workloads or public networks, add a Service to define the workload access type.

    -

    The workload access type determines the network attributes of the workload. Workloads with different access types can provide different network capabilities. For details, see Overview.

    -

  5. Click Next: Configure Advanced Settings to configure advanced policies.

    • Upgrade Policy:
      • Upgrade Mode: Only Rolling upgrade is supported. During a rolling upgrade, old pods are gradually replaced with new ones. During the upgrade, service traffic is evenly distributed to both pods to ensure service continuity.
      • Maximum Number of Unavailable Pods: Maximum number of unavailable pods allowed in a rolling upgrade. If the number is equal to the total number of pods, services may be interrupted. Minimum number of alive pods = Total pods – Maximum number of unavailable pods
      -
    • Graceful Deletion:

      Graceful Time Window: Enter the time. The graceful scale-in policy provides a time window for workload deletion and is reserved for executing commands in the PreStop phase in the lifecycle. If workload processes are not terminated after the time window elapses, the workload will be forcibly deleted.

      -
    • Scheduling Policies: You can combine static global scheduling policies or dynamic runtime scheduling policies as required. For details, see Scheduling Policy Overview.
    • Advanced Pod Settings
      • Pod Label: The built-in app label is specified when the workload is created. It is used to set affinity and anti-affinity scheduling and cannot be modified. You can click Add Label to add labels.
      -
      Figure 1 Advanced pod settings
      -
    • Client DNS Configuration: A CCE cluster has a built-in DNS add-on (CoreDNS) to provide domain name resolution for workloads in the cluster.
      • DNS Policy
        • ClusterFirst: The default DNS configuration overrides the Nameserver and DNS Search Domain configurations of the client.
        • None: Only the Nameserver and DNS Search Domain configurations are used for domain name resolution.
        • Default: The pod inherits the DNS configuration from the node on which the pod runs.
        -
      • Nameserver: You can configure a domain name server for a user-defined domain name. The value is one or a group of DNS IP addresses, for example, 1.2.3.4.
      • DNS Search Domain: a search list for host-name lookup. When a domain name cannot be resolved, DNS queries will be attempted combining the domain name with each domain in the search list in turn until a match is found or all domains in the search list are tried.
      • Timeout (s): amount of time the resolver will wait for a response from a remote name server before retrying the query on a different name server. Set it based on the site requirements.
      • ndots: threshold for the number of dots that must appear in a domain name before an initial absolute query will be made. If a domain name has ndots or more than ndots dots, the name is a fully qualified domain name (FQDN) and will be tried first as an absolute name. If a domain name has less than ndots dots, the operating system will look up the name in a list of search domain names.
      -
    -

  6. After the preceding configurations are complete, click Create. On the page displayed, click Return to Workload List to view the workload status.

    If the workload is in the Running state, it has been successfully created.

    -

    Workload status is not updated in real time. Click in the upper right corner or press F5 to refresh the page.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0220.html b/docs/cce/umn/cce_01_0220.html deleted file mode 100644 index 7c37b420..00000000 --- a/docs/cce/umn/cce_01_0220.html +++ /dev/null @@ -1,54 +0,0 @@ - - -

Workload-Workload Affinity

-

Using the CCE Console

  1. When Creating a Deployment or Creating a StatefulSet, in the Scheduling Policies area on the Configure Advanced Settings page, choose Inter-Pod Affinity and Anti-affinity > Affinity with Pods > Add.
  2. Select the workloads that will be co-located with the current workload on the same node, and click OK.

    The workload to be created will be deployed on the same node as the selected affinity workloads.

    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to create a workload using kubectl.

-

Prerequisites

-

The ECS where the kubectl client runs has been connected to your cluster. For details, see Connecting to a Cluster Using kubectl.

-

Procedure

-

When using kubectl to create a Deployment or using kubectl to create a StatefulSet, configure workload-workload affinity. The following is an example YAML file for workload-workload affinity.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        podAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: app          #workload's label key
-                operator: In        
-                values:
-                - test     #workload's label value
-
-

Setting the Object Type After Creating a Workload

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click the name of the workload for which you will add a scheduling policy. On the workload details page, choose Scheduling Policies > Add Simple Scheduling Policy > Add Affinity Object.
  3. Set Object Type to Workload and select the workloads to be deployed on the same node as the created workload. The created workload and the selected workloads will be deployed on the same node.

    This method can be used to add, edit, or delete scheduling policies.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0222.html b/docs/cce/umn/cce_01_0222.html deleted file mode 100644 index 37001a83..00000000 --- a/docs/cce/umn/cce_01_0222.html +++ /dev/null @@ -1,214 +0,0 @@ - - -

Managing a Node Pool

-

Notes and Constraints

The default node pool DefaultPool does not support the following management operations.

-
-

Configuring Kubernetes Parameters

CCE allows you to highly customize Kubernetes parameter settings on core components in a cluster. For more information, see kubelet.

-

This function is supported only in clusters of v1.15 and later. It is not displayed for clusters earlier than v1.15.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the displayed page, select a cluster to filter node pools by cluster.
  3. Click Configuration next to the node pool name.
  4. On the Configuration page on the right, change the values of the following Kubernetes parameters:

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Kubernetes parameters

    Component

    -

    Parameter

    -

    Description

    -

    Default Value

    -

    Remarks

    -

    docker

    -

    -

    native-umask

    -

    `--exec-opt native.umask

    -

    normal

    -

    Cannot be changed.

    -

    docker-base-size

    -

    `--storage-opts dm.basesize

    -

    10G

    -

    Cannot be changed.

    -

    insecure-registry

    -

    Address of an insecure image registry

    -

    false

    -

    Cannot be changed.

    -

    limitcore

    -

    Limit on the number of cores

    -

    5368709120

    -

    -

    -

    default-ulimit-nofile

    -

    Limit on the number of handles in a container

    -

    {soft}:{hard}

    -

    -

    -

    kube-proxy

    -

    conntrack-min

    -

    sysctl -w net.nf_conntrack_max

    -

    131072

    -

    The values can be modified during the node pool lifecycle.

    -

    conntrack-tcp-timeout-close-wait

    -

    sysctl -w net.netfilter.nf_conntrack_tcp_timeout_close_wait

    -

    1h0m0s

    -

    kubelet

    -

    cpu-manager-policy

    -

    `--cpu-manager-policy

    -

    none

    -

    The values can be modified during the node pool lifecycle.

    -

    kube-api-qps

    -

    Query per second (QPS) to use while talking with kube-apiserver.

    -

    100

    -

    kube-api-burst

    -

    Burst to use while talking with kube-apiserver.

    -

    100

    -

    max-pods

    -

    Maximum number of pods managed by kubelet.

    -

    110

    -

    pod-pids-limit

    -

    PID limit in Kubernetes

    -

    -1

    -

    with-local-dns

    -

    Whether to use the local IP address as the ClusterDNS of the node.

    -

    false

    -

    allowed-unsafe-sysctls

    -

    Insecure system configuration allowed.

    -

    Starting from v1.17.17, CCE enables pod security policies for kube-apiserver. You need to add corresponding configurations to allowedUnsafeSysctls of a pod security policy to make the policy take effect. (This configuration is not required for clusters earlier than v1.17.17.) For details, see Example of Enabling Unsafe Sysctls in Pod Security Policy.

    -

    []

    -
    -
    -

  5. Click OK.
-
-

Editing a Node Pool

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the displayed page, select a cluster to filter node pools by cluster.
  3. Click Edit next to the name of the node pool you will edit. In the Edit Node Pool dialog box, edit the following parameters:

    -

    - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Node pool parameters

    Parameter

    -

    Description

    -

    Name

    -

    Name of the node pool.

    -

    Nodes

    -

    Modify the number of nodes based on service requirements.

    -

    Autoscaler

    -

    By default, autoscaler is disabled.

    -

    After you enable autoscaler by clicking , nodes in the node pool are automatically created or deleted based on service requirements.

    -
    • Maximum Nodes and Minimum Nodes: You can set the maximum and minimum number of nodes to ensure that the number of nodes to be scaled is within a proper range.
    • Priority: A larger value indicates a higher priority. For example, if this parameter is set to 1 and 4 respectively for node pools A and B, B has a higher priority than A, and auto scaling is first triggered for B. If the priorities of multiple node pools are set to the same value, for example, 2, the node pools are not prioritized and the system performs scaling based on the minimum resource waste principle.
    -

    If the Autoscaler field is set to on, install the autoscaler add-on to use the autoscaler feature.

    -

    Taints

    -
    • This field is left blank by default. Taints allow nodes to repel a set of pods. You can add a maximum of 10 taints for each node pool. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
      -
      NOTICE:

      If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.

      -
      -
    -

    K8S Labels

    -

    K8S labels are key/value pairs that are attached to objects, such as pods. Labels are used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system. For more information, see Labels and Selectors.

    -

    Resource Tags

    -

    It is recommended that you use TMS's predefined tag function to add the same tag to different cloud resources.

    -

    Predefined tags are visible to all service resources that support the tagging function. You can use predefined tags to improve tag creation and migration efficiency.

    -

    Tag changes do not affect the node.

    -
    -
    -

  4. After the configuration is complete, click Save.

    In the node pool list, the node pool status becomes Scaling. After the status changes to Completed, the node pool parameters are modified successfully. The modified configuration will be synchronized to all nodes in the node pool.

    -

-
-

Deleting a Node Pool

Deleting a node pool will delete nodes in the pool. Pods on these nodes will be automatically migrated to available nodes in other node pools. If pods in the node pool have a specific node selector and none of the other nodes in the cluster satisfies the node selector, the pods will become unschedulable.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the displayed page, select a cluster to filter node pools by cluster.
  3. Choose More > Delete next to a node pool name to delete the node pool.
  4. Read the precautions in the Delete Node Pool dialog box.
  5. Enter DELETE in the text box and click Yes to confirm that you want to continue the deletion.
-
-

Copying a Node Pool

You can copy the configuration of an existing node pool to create a new node pool on the CCE console.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the displayed page, select a cluster to filter node pools by cluster.
  3. Choose More > Copy next to a node pool name to copy the node pool.
  4. The configuration of the selected node pool is replicated to the Create Node Pool page. You can edit the configuration as required and click Next: Confirm.
  5. On the Confirm page, confirm the node pool configuration and click Create Now. Then, a new node pool is created based on the edited configuration.
-
-

Migrating a Node

Nodes in a node pool can be migrated. Currently, nodes in a node pool can be migrated only to the default node pool (defaultpool) in the same cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the displayed page, select a cluster to filter node pools by cluster.
  3. Click More > Migrate next to the name of the node pool.
  4. In the dialog box displayed, select the destination node pool and the node to be migrated.

    After node migration, original resource tags, Kubernetes labels, and taints will be retained, and new Kubernetes labels and taints from the destination node pool will be added.

    -
    -

  5. Click OK.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0225.html b/docs/cce/umn/cce_01_0225.html deleted file mode 100644 index b1ba45e5..00000000 --- a/docs/cce/umn/cce_01_0225.html +++ /dev/null @@ -1,54 +0,0 @@ - - -

Workload-Node Affinity

-

Using the CCE Console

  1. When Creating a Deployment or Creating a StatefulSet, in the Scheduling Policies area on the Configure Advanced Settings page, choose Workload-Node Affinity and Anti-affinity > Affinity with Nodes > Add.
  2. Select the node on which you want to deploy the workload, and click OK.

    If you select multiple nodes, the system automatically chooses one of them during workload deployment.

    -

-
-

Using kubectl

This section uses an Nginx workload as an example to describe how to create a workload using kubectl.

-

Prerequisites

-

The ECS where the kubectl client runs has been connected to your cluster. For details, see Connecting to a Cluster Using kubectl.

-

Procedure

-

When using kubectl to create a Deployment or using kubectl to create a StatefulSet, configure workload-node affinity. The following is an example YAML file for workload-node affinity.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: nodeName          #node's label key
-                operator: In
-                values:
-                - test-node-1          #node's label value
-
-

Setting the Object Type After Creating a Workload

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click the name of the workload for which you will add a scheduling policy. On the workload details page, choose Scheduling Policies > Add Simple Scheduling Policy > Add Affinity Object.
  3. Set Object Type to Node and select the node where the workload is to be deployed. The workload will be deployed on the selected node.

    This method can be used to add, edit, or delete scheduling policies.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0226.html b/docs/cce/umn/cce_01_0226.html deleted file mode 100644 index ec597e04..00000000 --- a/docs/cce/umn/cce_01_0226.html +++ /dev/null @@ -1,54 +0,0 @@ - - -

Workload-Node Anti-Affinity

-

Using the CCE Console

  1. When Creating a Deployment or Creating a StatefulSet, in the Scheduling Policies area on the Configure Advanced Settings page, choose Workload-Node Affinity and Anti-affinity > Anti-affinity with Nodes > Add.
  2. Select the node on which the workload is ineligible to be deployed, and click OK.

    If you select multiple nodes, the workload will not be deployed on these nodes.

    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to create a workload using kubectl.

-

Prerequisites

-

The ECS where the kubectl client runs has been connected to your cluster. For details, see Connecting to a Cluster Using kubectl.

-

Procedure

-

When using kubectl to create a Deployment or using kubectl to create a StatefulSet, configure workload-node affinity. The following is an example YAML file for workload-node anti-affinity.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: nodeName          #node's label key
-                operator: NotIn        #Indicates that the workload will not be deployed on the node.
-                values:
-                - test-node-1          #node's label value
-
-

Setting the Object Type After Creating a Workload

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click the name of the workload for which you will add a scheduling policy. On the workload details page, choose Scheduling Policies > Add Simple Scheduling Policy > Add Anti-affinity Object.
  3. Set Object Type to Node and select the node on which the workload is ineligible to be deployed. The workload will be constrained from being deployed on the selected node.

    This method can be used to add, edit, or delete scheduling policies.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0227.html b/docs/cce/umn/cce_01_0227.html deleted file mode 100644 index 47e3e3ef..00000000 --- a/docs/cce/umn/cce_01_0227.html +++ /dev/null @@ -1,54 +0,0 @@ - - -

Workload-Workload Anti-Affinity

-

Using the CCE Console

  1. When Creating a Deployment or Creating a StatefulSet, in the Scheduling Policies area on the Configure Advanced Settings page, choose Inter-Pod Affinity and Anti-affinity > Anti-affinity with Pods > Add.
  2. Select the workloads to which you want to deploy the target workload on a different node, and click OK.

    The workload to be created and the selected workloads will be deployed on different nodes.

    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to create a workload using kubectl.

-

Prerequisites

-

The ECS where the kubectl client runs has been connected to your cluster. For details, see Connecting to a Cluster Using kubectl.

-

Procedure

-

When using kubectl to create a Deployment or using kubectl to create a StatefulSet, configure workload-workload anti-affinity. The following is an example YAML file for workload-workload anti-affinity.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        podAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: app          #workload's label key
-                operator: NotIn        
-                values:
-                - test     #workload's label value
-
-

Setting the Object Type After Creating a Workload

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click the name of the workload for which you will add a scheduling policy. On the workload details page, choose Scheduling Policies > Add Simple Scheduling Policy > Add Anti-affinity Object.
  3. Set Object Type to Workload and select the workloads to be deployed on a different node from the created workload. The created workload and the selected workloads will be deployed on different nodes.

    This method can be used to add, edit, or delete scheduling policies.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0228.html b/docs/cce/umn/cce_01_0228.html deleted file mode 100644 index 4a855538..00000000 --- a/docs/cce/umn/cce_01_0228.html +++ /dev/null @@ -1,54 +0,0 @@ - - -

Workload-AZ Affinity

-

Using the CCE Console

  1. When Creating a Deployment or Creating a StatefulSet, in the Scheduling Policies area on the Configure Advanced Settings page, click next to Workload-AZ Affinity and Anti-affinity > Affinity with AZs.
  2. Select the AZ in which you want to deploy the workload.

    The created workload will be deployed in the selected AZ.

    -

-
-

Using kubectl

This section uses an Nginx workload as an example to describe how to create a workload using kubectl.

-

Prerequisites

-

The ECS where the kubectl client runs has been connected to your cluster. For details, see Connecting to a Cluster Using kubectl.

-

Procedure

-

When using kubectl to create a Deployment or using kubectl to create a StatefulSet, configure workload-AZ affinity. The following is an example YAML file for workload-AZ affinity.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: az-in-deployment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: az-in-deployment
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: az-in-deployment
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: failure-domain.beta.kubernetes.io/zone #node's label key
-                operator: In        
-                values:
-                - az1                              #node's key value
-
-

Setting the Object Type After Creating a Workload

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click the name of the workload for which you will add a scheduling policy. On the workload details page, choose Scheduling Policies > Add Simple Scheduling Policy > Add Affinity Object.
  3. Set Object Type to Availability Zone, and select the AZ in which the workload is eligible to be deployed. The workload will be deployed in the selected AZ.

    This method can be used to add, edit, or delete scheduling policies.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0229.html b/docs/cce/umn/cce_01_0229.html deleted file mode 100644 index 139c19d9..00000000 --- a/docs/cce/umn/cce_01_0229.html +++ /dev/null @@ -1,54 +0,0 @@ - - -

Workload-AZ Anti-Affinity

-

Using the CCE Console

  1. When Creating a Deployment or Creating a StatefulSet, in the Scheduling Policies area on the Configure Advanced Settings page, click next to Workload-AZ Affinity and Anti-affinity > Anti-affinity with AZs.
  2. Select an AZ in which the workload is ineligible to be deployed.

    The created workload is not deployed on the selected AZ.

    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to create a workload using kubectl.

-

Prerequisites

-

The ECS where the kubectl client runs has been connected to your cluster. For details, see Connecting to a Cluster Using kubectl.

-

Procedure

-

When using kubectl to create a Deployment or using kubectl to create a StatefulSet, configure workload-AZ anti-affinity. The following is an example YAML file for workload-AZ anti-affinity.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: failure-domain.beta.kubernetes.io/zone       #node's label key   
-                operator: NotIn        
-                values:
-                - az1                                   #node's key value
-
-

Setting the Object Type After Creating a Workload

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click the name of the workload for which you will add a scheduling policy. On the workload details page, choose Scheduling Policies > Add Simple Scheduling Policy > Add Anti-affinity Object.
  3. Set Object Type to Availability Zone and select the AZ in which the workload is ineligible to be deployed. The workload will be constrained from being deployed in the selected AZ.

    This method can be used to add, edit, or delete scheduling policies.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0230.html b/docs/cce/umn/cce_01_0230.html deleted file mode 100644 index 859c94f5..00000000 --- a/docs/cce/umn/cce_01_0230.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

Simple Scheduling Policies

-
-
- - - -
- diff --git a/docs/cce/umn/cce_01_0231.html b/docs/cce/umn/cce_01_0231.html deleted file mode 100644 index 81fe1dbf..00000000 --- a/docs/cce/umn/cce_01_0231.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

Custom Scheduling Policies

-
-
- - - -
- diff --git a/docs/cce/umn/cce_01_0232.html b/docs/cce/umn/cce_01_0232.html deleted file mode 100644 index d6ce9c4e..00000000 --- a/docs/cce/umn/cce_01_0232.html +++ /dev/null @@ -1,111 +0,0 @@ - - -

Node Affinity

-

Using the CCE Console

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click a workload name in the Deployment or StatefulSet list. On the displayed workload details page, click the Scheduling Policies tab and then click Add Custom Scheduling Policy.
  3. In the Node Affinity area, you can specify node labels to meet required or preferred rules in scheduling.

    -

    - - - - - - - - - - -
    Table 1 Node affinity settings

    Parameter

    -

    Description

    -

    Required

    -

    It specifies a rule that must be met in scheduling. It corresponds to requiredDuringSchedulingIgnoredDuringExecution in Kubernetes. You can click Add Rule to add multiple required rules. A pod will be scheduled on a node that meets any of the rules configured.

    -

    Preferred

    -

    It specifies a preference in scheduling. It corresponds to preferredDuringSchedulingIgnoredDuringExecution in Kubernetes. You can click Add Rule to add multiple preferred rules. The scheduler will try to enforce the rules but will not guarantee. If the scheduler cannot satisfy any one of the rules, the pod will still be scheduled.

    -
    -
    -

  4. Set a rule according to the following table. You can click Add Selector to configure multiple selectors for a rule.

    -

    - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Selector settings

    Parameter

    -

    Description

    -

    Weight

    -
    • This parameter is unavailable for a required rule.
    • Set the weight of a preferred rule. A higher weight indicates a higher priority.
    -

    Label

    -

    Node label. You can use the default label or customize a label.

    -

    Operator

    -

    The following relations are supported: In, NotIn, Exists, DoesNotExist, Gt, and Lt

    -

    Value

    -

    Tag value.

    -

    Operators In and NotIn allow one or more label values. Values are separated with colons (;). Operators Exists and DoesNotExist are used to determine whether a label exists, and do not require a label value. If you set the operator to Gt or Lt for a label, the label value must be greater than or less than a certain integer.

    -

    Operation

    -

    You can click Delete to delete a selector.

    -

    Add Selector

    -

    A selector corresponds to matchExpressions in Kubernetes. You can click Add Selector to add multiple selectors for a scheduling rule. The rule is applied in scheduling only when all its selectors are satisfied.

    -
    -
    -
    Figure 1 Node affinity scheduling policy
    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to configure node affinity.

-

Prerequisites

-

A workload that uses the nginx container image has been deployed on a node.

-

Procedure

-

Set Label to kubernetes.io/hostname, add affinity nodes, and set the operator to In. Then, click OK.

-

YAML file of the workload with node affinity:

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-  namespace: default
-spec:
-  replicas: 2
-  selector:
-    matchLabels:
-      app: nginx
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-              - matchExpressions:
-                  - key: kubernetes.io/hostname
-                    operator: In
-                    values:
-                     - 192.168.6.174
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0233.html b/docs/cce/umn/cce_01_0233.html deleted file mode 100644 index 866f8ba1..00000000 --- a/docs/cce/umn/cce_01_0233.html +++ /dev/null @@ -1,133 +0,0 @@ - - -

Workload Affinity

-

Using the CCE Console

Workload affinity determines the pods as which the target workload will be deployed in the same topology domain.

-
  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click a workload name in the Deployment or StatefulSet list. On the displayed workload details page, click the Scheduling Policies tab and then click Add Custom Scheduling Policy.
  3. In the Pod Affinity area, set the namespace, topology key, and the label requirements to be met.

    There are two types of pod affinity rules: Required (hard rule) and Preferred (soft rule). The label operators include In, NotIn, Exists, and DoesNotExist.

    - -
    - - - - - - - - - - -
    Table 1 Pod affinity settings

    Parameter

    -

    Description

    -

    Required

    -

    It specifies a rule that must be met in scheduling. It corresponds to requiredDuringSchedulingIgnoredDuringExecution in Kubernetes. You can click Add Rule to add multiple required rules. Ensure that all the labels specified in the rules must be in the same workload. Each rule requires a namespace and topology key.

    -

    Preferred

    -

    It specifies a preference in scheduling. It corresponds to preferredDuringSchedulingIgnoredDuringExecution in Kubernetes. You can click Add Rule to add multiple preferred rules. The scheduler will try to enforce the rules but will not guarantee. If the scheduler cannot satisfy any one of the rules, the pod will still be scheduled.

    -
    -
    -

  4. Set a rule according to the following table. You can click Add Selector to configure multiple selectors for a rule.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Selector settings

    Parameter

    -

    Description

    -

    Weight

    -
    • This parameter is unavailable for a required rule.
    • Set the weight of a preferred rule. A higher weight indicates a higher priority.
    -

    Namespace

    -

    By default, the namespace of the current pod is used. You can also use another namespace.

    -

    Topology Key

    -

    Key of the worker node label that the system uses to denote a topology domain in which scheduling can be performed. Default and custom node labels can be used.

    -

    Label

    -

    Label of the workload. You can customize the label name.

    -

    Operator

    -

    The following relations are supported: In, NotIn, Exists, and DoesNotExist

    -

    Value

    -

    Tag value.

    -

    Operators In and NotIn allow one or more label values. Values are separated with colons (;). Operators Exists and DoesNotExist are used to determine whether a label exists, and do not require a label value.

    -

    Operation

    -

    You can click Delete to delete a selector.

    -

    Add Selector

    -

    A selector corresponds to matchExpressions in Kubernetes. You can click Add Selector to add multiple selectors for a scheduling rule. The rule is applied in scheduling only when all its selectors are satisfied.

    -
    -
    -
    Figure 1 Pod affinity scheduling policy
    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to configure pod affinity.

-

Prerequisites

-

A workload that uses the nginx container image has been deployed on a node.

-

Procedure

-

Set Namespace to default and Topology Key to the built-in node label kubernetes.io/hostname, which means that the scheduling scope is a node. Set labels app and type and their value to redis and database, respectively. Set Operator to In and click OK.

-

The YAML of the workload with pod affinity is as follows:

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-  namespace: default
-spec:
-  replicas: 2
-  selector:
-    matchLabels:
-      app: nginx
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity: {}
-        podAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            - labelSelector:
-                matchExpressions:
-                  - key: app
-                    operator: In
-                    values:
-                      - redis
-                  - key: type
-                    operator: In
-                    values:
-                      - database
-              namespaces:
-                - default
-              topologyKey: kubernetes.io/hostname
-
-

In this example, only when a candidate workload (for example, workload A) with both labels app=redis and type=database is found can the workload Nginx be successfully scheduled to the node of the candidate workload.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0234.html b/docs/cce/umn/cce_01_0234.html deleted file mode 100644 index 88593f42..00000000 --- a/docs/cce/umn/cce_01_0234.html +++ /dev/null @@ -1,128 +0,0 @@ - - -

Workload Anti-Affinity

-

Using the CCE Console

Workload anti-affinity determines the pods from which the target workload will be deployed in a different topology domain.

-
  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click a workload name in the Deployment or StatefulSet list. On the displayed workload details page, click the Scheduling Policies tab and then click Add Custom Scheduling Policy.
  3. In the Pod Anti-Affinity area, set the namespace, topology key, and the label requirements to be met.

    There are two types of pod anti-affinity rules: Required (hard rule) and Preferred (soft rule), and the label operators include In, NotIn, Exists, and DoesNotExist.

    - -
    - - - - - - - - - - -
    Table 1 Workload anti-affinity settings

    Parameter

    -

    Description

    -

    Required

    -

    It specifies a rule that must be met in scheduling. It corresponds to requiredDuringSchedulingIgnoredDuringExecution in Kubernetes. You can add multiple required rules. Ensure that all the labels specified in the rules must be in the same workload. Each rule requires a namespace and topology key.

    -

    Preferred

    -

    It specifies a preference in scheduling. It corresponds to preferredDuringSchedulingIgnoredDuringExecution in Kubernetes. You can add multiple preferred rules. The scheduler will try to enforce the rules but will not guarantee. If the scheduler cannot satisfy any one of the rules, the pod will still be scheduled.

    -
    -
    -

  4. Set a rule according to the following table. You can click Add Selector to configure multiple selectors for a rule.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Selector settings

    Parameter

    -

    Description

    -

    Weight

    -
    • This parameter is unavailable for a required rule.
    • Set the weight of a preferred rule. A higher weight indicates a higher priority.
    -

    Namespace

    -

    By default, the namespace of the current pod is used. You can also use another namespace.

    -

    Topology Key

    -

    Key of the worker node label that the system uses to denote a topology domain in which scheduling can be performed. Default and custom node labels can be used.

    -

    Label

    -

    Label of the workload. You can customize the label name.

    -

    Operator

    -

    The following relations are supported: In, NotIn, Exists, and DoesNotExist

    -

    Value

    -

    Tag value.

    -

    Operators In and NotIn allow one or more label values. Values are separated with colons (;). Operators Exists and DoesNotExist are used to determine whether a label exists, and do not require a label value.

    -

    Operation

    -

    You can click Delete to delete a selector.

    -

    Add Selector

    -

    A selector corresponds to matchExpressions in Kubernetes. You can click Add Selector to add multiple selectors for a scheduling rule. The rule is applied in scheduling only when all its selectors are satisfied.

    -
    -
    -
    Figure 1 Pod anti-affinity scheduling policy
    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to configure pod anti-affinity.

-

Prerequisites

-

A workload that uses the nginx container image has been deployed on a node.

-

Procedure

-

Set Namespace to default and Topology Key to the built-in node label kubernetes.io/hostname, which means that the scheduling scope is a node. Set the label app and its value to redis. Set Operator to In and click OK.

-

The YAML of the workload with pod anti-affinity:

-
-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-  namespace: default
-spec:
-  replicas: 2
-  selector:
-    matchLabels:
-      app: nginx 
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity: {}
-        podAffinity: {}
-        podAntiAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            - labelSelector:
-                matchExpressions:
-                  - key: app
-                    operator: In
-                    values:
-                      - redis
-              namespaces:
-                - default
-              topologyKey: kubernetes.io/hostname
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0247.html b/docs/cce/umn/cce_01_0247.html deleted file mode 100644 index 0a04fdcf..00000000 --- a/docs/cce/umn/cce_01_0247.html +++ /dev/null @@ -1,27 +0,0 @@ - - - -

Services

- -

-
- -
- - - -
- diff --git a/docs/cce/umn/cce_01_0248.html b/docs/cce/umn/cce_01_0248.html deleted file mode 100644 index 7fa196e8..00000000 --- a/docs/cce/umn/cce_01_0248.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

Ingress

-
-
- - - -
- diff --git a/docs/cce/umn/cce_01_0251.html b/docs/cce/umn/cce_01_0251.html deleted file mode 100644 index 13aa656a..00000000 --- a/docs/cce/umn/cce_01_0251.html +++ /dev/null @@ -1,51 +0,0 @@ - - -

Using ELB Ingresses on the Console

-

Prerequisites

  • An ingress provides network access for backend workloads. Ensure that a workload is available in a cluster. If no workload is available, deploy a workload by referring to Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet.
  • A NodePort Service has been configured for the workload. For details about how to configure the Service, see NodePort.
-
-

Precautions

  • It is recommended that other resources not use the load balancer automatically created by an ingress. Otherwise, the load balancer will be occupied when the ingress is deleted, resulting in residual resources.
  • After an ingress is created, upgrade and maintain the configuration of the selected load balancers on the CCE console. Do not modify the configuration on the ELB console. Otherwise, the ingress service may be abnormal.
  • The URL registered in an ingress forwarding policy must be the same as the URL exposed by the backend Service. Otherwise, a 404 error will be returned.
-
-

Adding an ELB Ingress

This section uses an Nginx workload as an example to describe how to add an ELB ingress.

-
  1. Log in to the CCE console.
  2. In the navigation pane, choose Resource Management > Network. On the Ingresses tab page, select the corresponding cluster and namespace.
  3. Click Create Ingress to access the ingress configuration page.

    Set the ingress parameters as required. The key parameters are as follows:
    • Access Type: Use a load balancer to access Services. Requests can be forwarded only to NodePort Services.
    • Ingress Name: Specify a name of an ingress, for example, ingress-demo.
    • Cluster Name: Select the cluster to which the ingress is to be added.
    • Namespace: Select the namespace to which the ingress is to be added.
    • ELB Configuration: Ingress uses the load balancer of the ELB service to provide layer-7 network access. You can select an existing load balancer or have the system automatically create a new one. To manually create a load balancer, click Create Load Balancer and then click the refresh button.
      • It is recommended that other resources not use the load balancer automatically created by an ingress. Otherwise, the load balancer will be occupied when the ingress is deleted, resulting in residual resources.
      • Dedicated load balancers are supported only when the cluster version is 1.17 or later.
      • To interconnect with an existing dedicated load balancer, ensure that HTTP is supported and the network type supports private networks.
      -
      -

      Elastic Load Balancer: The selected or created load balancer must be in the same VPC as the current cluster, and it must match the load balancer type (private or public network).

      -

      You can create public network or private network load balancers. The default value is Public network.

      -
      • Public network: After you attach an EIP to a load balancer, the load balancer can distribute requests from the Internet to backend servers.
        • Enterprise Project: Select an enterprise project in which the load balancer is created.
        • Change Configuration: When selecting Public network > Automatically created, you can click Change Configuration to modify the name, specifications, billing mode, and bandwidth of the ELB instance to be created.
        -
      • Private network: After you attach a private IP address to a load balancer, the load balancer can distribute requests from the clients in the same VPC to backends.
        • Enterprise Project: Select an enterprise project in which the load balancer is created.
        -
      -
    • Listener Configuration: Ingress configures a listener for the load balancer, which listens to requests from the load balancer and distributes traffic. After the configuration is complete, a listener is created on the load balancer. The default listener name is k8s__<Protocol type>_<Port number>, for example, k8s_HTTP_80.
      • Front-End Protocol: HTTP and HTTPS are available.
      • External Port: Port number that is open to the ELB service address. The port number can be specified randomly.
      • Server Certificate: When an HTTPS listener is created for a load balancer, you need to bind a certificate to the load balancer to support encrypted authentication for HTTPS data transmission. For details on how to create a secret, see Creating a Secret.

        If there is already an HTTPS ingress for the chosen port on the load balancer, the certificate of the new HTTPS ingress must be the same as the certificate of the existing ingress. This means that a listener has only one certificate. If two certificates, each with a different ingress, are added to the same listener of the same load balancer, only the certificate added earliest takes effect on the load balancer.

        -
        -
      • SNI: Click to enable the Server Name Indication (SNI) function. SNI is an extended protocol of TLS. It allows multiple TLS-based access domain names to be provided for external systems using the same IP address and port number. Different domain names can use different security certificates. After SNI is enabled, the client is allowed to submit the requested domain name when initiating a TLS handshake request. After receiving the TLS request, the load balancer searches for the certificate based on the domain name in the request. If the certificate corresponding to the domain name is found, the load balancer returns the certificate for authorization. Otherwise, the default certificate (server certificate) is returned for authorization.
        • The SNI option is available only when HTTPS is selected.
        -
        • This function is supported only for clusters of v1.15.11 and later.
        • Specify the domain name for the SNI certificate. Only one domain name can be specified for each certificate. Wildcard-domain certificates are supported.
        -
        -
      • Security Policy: combinations of different TLS versions and supported cipher suites available to HTTPS listeners.

        For details about security policies, see ELB User Guide.

        -
        • Security Policy is available only when HTTPS is selected.
        • This function is supported only for clusters of v1.17.9 and later.
        -
        -
      -
    • Forwarding Policies: When the access address of a request matches the forwarding policy (a forwarding policy consists of a domain name and URL, for example, 10.117.117.117:80/helloworld), the request is forwarded to the corresponding target Service for processing. Click Add Forwarding Policies to add multiple forwarding policies.
      • Domain Name: actual domain name. Ensure that the domain name has been registered and archived. Once a domain name rule is configured, you must use the domain name for access.
      • Rule Matching
        • Prefix match: If the URL is set to /healthz, the URL that meets the prefix can be accessed. For example, /healthz/v1 and /healthz/v2.
        • Exact match: The URL can be accessed only when it is fully matched. For example, if the URL is set to /healthz, only /healthz can be accessed.
        • Regular expression: The URL is matched based on the regular expression. For example, if the regular expression is /[A-Za-z0-9_.-]+/test, all URLs that comply with this rule can be accessed, for example, /abcA9/test and /v1-Ab/test. Two regular expression standards are supported: POSIX and Perl.
        -
      • URL: access path to be registered, for example, /healthz.
      • Target Service: Select an existing Service or create a Service. Services that do not meet search criteria are automatically filtered out.
      • Service Access Port: Select the access port of the target Service.
      • ELB Settings: If multiple routes use the same Service, they are using the same Service load balancing configuration.
        • Algorithm Type: Three algorithms are available: weighted round robin, weighted least connections algorithm, or source IP hash. For details about the allocation policies, see LoadBalancer.
        • Sticky Session: This function is disabled by default. After this function is enabled, you need to select a sticky session type and set the sticky session duration.

          ELB cookie: The load balancer generates a cookie after receiving a request from the client. All subsequent requests with the cookie are routed to the same backend server for processing.

          -

          Application cookie: The application deployed on the backend server generates a cookie after receiving the first request from the client. All subsequent requests that contain the cookie are routed to this backend server. This sticky session type is supported by shared load balancers.

          -
        • Health Check: This function is disabled by default. To enable this function, set parameters as prompted. For details about the parameters, see Configuring a Health Check.
        -
      • Operation: Click Delete to delete the configuration.
      -
    -
    -

  4. After the configuration is complete, click Create. After the ingress is created, it is displayed in the ingress list.

    On the ELB console, you can view the ELB automatically created through CCE. The default name is cce-lb-ingress.UID. Click the ELB name to access its details page. On the Listeners tab page, view the route settings of the ingress, including the URL, listener port, and backend server group port.

    -

    After the ingress is created, upgrade and maintain the selected load balancer on the CCE console. Do not maintain the load balancer on the ELB console. Otherwise, the ingress service may be abnormal.

    -
    -

  5. Access the /healthz interface of the workload, for example, workload defaultbackend.

    1. Obtain the access address of the /healthz interface of the workload. The access address consists of the load balancer IP address, external port, and mapping URL, for example, 10.**.**.**:80/healthz.
    2. Enter the URL of the /healthz interface, for example, http://10.**.**.**:80/healthz, in the address box of the browser to access the workload, as shown in Figure 1.
      Figure 1 Accessing the /healthz interface of defaultbackend
      -
    -

-
-

Updating an Ingress

After adding an ingress, you can update its port, domain name, and route configuration. The procedure is as follows:

-

You can modify the load balancer settings, including algorithm, sticky session, and health check configurations, after you select a Service in Forwarding Policies on the CCE console. Do not modify these configurations on the ELB console.

-
-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Network. On the Ingresses tab page, filter ingresses by cluster and namespace, and click Update for the ingress to be updated.
  2. On the Update Ingress page, modify the required parameters.

    The parameters are the same as those set during creation.

    -

  3. Click Submit. The ingress will be updated for the workload.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0252.html b/docs/cce/umn/cce_01_0252.html deleted file mode 100644 index b6e957e3..00000000 --- a/docs/cce/umn/cce_01_0252.html +++ /dev/null @@ -1,595 +0,0 @@ - - -

Using kubectl to Create an ELB Ingress

-

Scenario

This section uses an Nginx workload as an example to describe how to create an ELB ingress using kubectl.

- -
-

Prerequisites

  • An ingress provides network access for backend workloads. Ensure that a workload is available in a cluster. If no workload is available, deploy a sample Nginx workload by referring to Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet.
  • A NodePort Service has been configured for the workload. For details about how to configure the Service, see NodePort.
-
-

Creating an Ingress - Automatically Creating a Load Balancer

The following describes how to run the kubectl command to automatically create a load balancer when creating an ingress.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create a YAML file named ingress-test.yaml. The file name can be customized.

    vi ingress-test.yaml

    -
    • For clusters of v1.15 or later, the value of apiVersion is networking.k8s.io/v1beta1.
    • For clusters of v1.13 or earlier, the value of apiVersion is extensions/v1beta1.
    -
    -

    You can create a load balancer as required. The YAML files are as follows:

    -
    Example of using a dedicated public network load balancer:
    apiVersion: networking.k8s.io/v1beta1
    -kind: Ingress 
    -metadata: 
    -  name: ingress-test
    -  annotations: 
    -    kubernetes.io/elb.class: union
    -    kubernetes.io/ingress.class: cce
    -    kubernetes.io/elb.port: '80'
    -    kubernetes.io/elb.autocreate: 
    -      '{
    -          "type":"public",
    -          "bandwidth_name":"cce-bandwidth-******",
    -          "bandwidth_chargemode":"traffic",
    -          "bandwidth_size":5,
    -          "bandwidth_sharetype":"PER",
    -          "eip_type":"5_bgp"
    -        }'
    -spec:
    -  rules: 
    -  - host: ''
    -    http: 
    -      paths: 
    -      - path: '/'
    -        backend: 
    -          serviceName: <your_service_name>  # Replace it with the name of your target Service.
    -          servicePort: 80
    -        property:
    -          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
    -
    -

    Example of using a dedicated public network load balancer:

    -
    apiVersion: networking.k8s.io/v1beta1
    -kind: Ingress
    -metadata:
    -  name: ingress-test
    -  namespace: default
    -  annotations:
    -    kubernetes.io/elb.class: performance
    -    kubernetes.io/ingress.class: cce
    -    kubernetes.io/elb.port: '80'
    -    kubernetes.io/elb.autocreate: 
    -      '{
    -          "type": "public",
    -          "bandwidth_name": "cce-bandwidth-******",
    -          "bandwidth_chargemode": "traffic",
    -          "bandwidth_size": 5,
    -          "bandwidth_sharetype": "PER",
    -          "eip_type": "5_bgp",
    -          "available_zone": [
    -              "eu-de-01"
    -          ],
    -          "l7_flavor_name": "L7_flavor.elb.s1.small"
    -       }'
    -spec:
    -  rules:
    -  - host: ''
    -    http:
    -      paths:
    -      - path: '/'
    -        backend: 
    -          serviceName: <your_service_name>  # Replace it with the name of your target Service.
    -          servicePort: 80
    -        property:
    -          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    kubernetes.io/elb.class

    -

    No

    -

    String

    -

    Select a proper load balancer type.

    -

    The value can be:

    -
    • union: shared load balancer
    • performance: dedicated load balancer..
    -

    The default value is union.

    -

    kubernetes.io/ingress.class

    -

    Yes

    -

    String

    -

    cce: The self-developed ELBIngress is used.

    -

    This parameter is mandatory when an ingress is created by calling the API.

    -

    kubernetes.io/elb.port

    -

    Yes

    -

    Integer

    -

    This parameter indicates the external port registered with the address of the LoadBalancer Service.

    -

    Supported range: 1 to 65535

    -

    kubernetes.io/elb.subnet-id

    -

    -

    -

    String

    -

    ID of the subnet where the cluster is located. The value can contain 1 to 100 characters.

    -
    • Mandatory when a cluster of v1.11.7-r0 or earlier is to be automatically created.
    • Optional for clusters later than v1.11.7-r0. It is left blank by default.
    -

    kubernetes.io/elb.enterpriseID

    -

    No

    -

    String

    -

    Kubernetes clusters of v1.15 and later versions support this field. In Kubernetes clusters earlier than v1.15, load balancers are created in the default project by default.

    -

    ID of the enterprise project in which the load balancer will be created.

    -

    The value contains 1 to 100 characters.

    -

    How to obtain:

    -

    Log in to the management console and choose Enterprise > Project Management on the top menu bar. In the list displayed, click the name of the target enterprise project, and copy the ID on the enterprise project details page.

    -

    kubernetes.io/elb.autocreate

    -

    Yes

    -

    elb.autocreate object

    -

    Whether to automatically create a load balancer associated with an ingress. For details about the field description, see Table 2.

    -

    Example

    -
    • If a public network load balancer will be automatically created, set this parameter to the following value:

      '{"type":"public","bandwidth_name":"cce-bandwidth-******","bandwidth_chargemode":"traffic","bandwidth_size":5,"bandwidth_sharetype":"PER","eip_type":"5_bgp","name":"james"}'

      -
    • If a private network load balancer will be automatically created, set this parameter to the following value:

      {"type":"inner","name":"A-location-d-test"}

      -
    -

    host

    -

    No

    -

    String

    -

    Domain name for accessing the Service. By default, this parameter is left blank, and the domain name needs to be fully matched.

    -

    path

    -

    Yes

    -

    String

    -

    User-defined route path. All external access requests must match host and path.

    -

    serviceName

    -

    Yes

    -

    String

    -

    Name of the target Service bound to the ingress.

    -

    servicePort

    -

    Yes

    -

    Integer

    -

    Access port of the target Service.

    -

    ingress.beta.kubernetes.io/url-match-mode

    -

    No

    -

    String

    -

    Route matching policy.

    -

    Default: STARTS_WITH (prefix match)

    -

    Options:

    -
    • EQUAL_TO: exact match
    • STARTS_WITH: prefix match
    • REGEX: regular expression match
    -
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Data structure of the elb.autocreate field

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    type

    -

    No

    -

    String

    -

    Network type of the load balancer.

    -
    • public: public network load balancer
    • inner: private network load balancer
    -

    The default value is inner.

    -

    bandwidth_name

    -

    Yes for public network load balancers

    -

    String

    -

    Bandwidth name. The default value is cce-bandwidth-******.

    -

    Value range: a string of 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

    -

    bandwidth_chargemode

    -

    Yes

    -

    String

    -

    Bandwidth billing mode.

    -
    • traffic: billed by traffic
    -

    bandwidth_size

    -

    Yes for public network load balancers

    -

    Integer

    -

    Bandwidth size. The value ranges from 1 Mbit/s to 2000 Mbit/s by default. The actual range varies depending on the configuration in each region.

    -
    • The minimum increment for bandwidth adjustment varies depending on the bandwidth range. The details are as follows:
      • The minimum increment is 1 Mbit/s if the allowed bandwidth ranges from 0 Mbit/s to 300 Mbit/s (with 300 Mbit/s included).
      • The minimum increment is 50 Mbit/s if the allowed bandwidth ranges from 300 Mbit/s to 1000 Mbit/s.
      • The minimum increment is 500 Mbit/s if the allowed bandwidth is greater than 1000 Mbit/s.
      -
    -

    bandwidth_sharetype

    -

    Yes for public network load balancers

    -

    String

    -

    Bandwidth type.

    -

    PER: dedicated bandwidth

    -

    eip_type

    -

    Yes for public network load balancers

    -

    String

    -

    EIP type, which may vary depending on sites. For details, see the type parameter specified when creating an EIP.

    -
    • 5_bgp: dynamic BGP
    • 5_gray: dedicated load balancer
    -

    name

    -

    No

    -

    String

    -

    Name of the automatically created load balancer.

    -

    Value range: a string of 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

    -

    Default value: cce-lb+ingress.UID

    -
    -
    -

  3. Create an ingress.

    kubectl create -f ingress-test.yaml

    -

    If information similar to the following is displayed, the ingress has been created.

    -
    ingress/ingress-test created
    -

    kubectl get ingress

    -

    If information similar to the following is displayed, the ingress has been created successfully and the workload is accessible.

    -
    NAME             HOSTS     ADDRESS          PORTS   AGE
    -ingress-test     *         121.**.**.**     80      10s
    -

  4. Enter http://121.**.**.**:80 in the address box of the browser to access the workload (for example, Nginx workload).

    121.**.**.** indicates the IP address of the unified load balancer.

    -

-
-

Creating an Ingress - Interconnecting with an Existing Load Balancer

CCE allows you to connect to an existing load balancer when creating an ingress.
  • For clusters of v1.15 or later, the value of apiVersion is networking.k8s.io/v1beta1.
  • For clusters of v1.13 or earlier, the value of apiVersion is extensions/v1beta1.
  • To interconnect with an existing dedicated load balancer, ensure that HTTP is supported and the network type supports private networks.
-
-
-

If the cluster version is 1.15 or later, the YAML file configuration is as follows:

-
apiVersion: networking.k8s.io/v1beta1
-kind: Ingress 
-metadata: 
-  name: ingress-test
-  annotations: 
-    kubernetes.io/elb.class: performance                               # Load balancer type
-    kubernetes.io/elb.id: <your_elb_id>  # Replace it with the ID of your existing load balancer.
-    kubernetes.io/elb.ip: <your_elb_ip>  # Replace it with your existing load balancer IP.
-    kubernetes.io/elb.port: '80'
-    kubernetes.io/ingress.class: cce
-spec:
-  rules: 
-  - host: ''
-    http: 
-      paths: 
-      - path: '/'
-        backend: 
-          serviceName: <your_service_name>  # Replace it with the name of your target Service.
-          servicePort: 80
-        property:
-          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
- -
- - - - - - - - - - - - - - - - - - - - - -
Table 3 Key parameters

Parameter

-

Mandatory

-

Type

-

Description

-

kubernetes.io/elb.class

-

No

-

String

-

Select a proper load balancer type.

-

The value can be:

-
  • union: shared load balancer
  • performance: dedicated load balancer..
-

Defaults to union.

-

kubernetes.io/elb.id

-

Yes

-

String

-

This parameter indicates the ID of a load balancer. The value can contain 1 to 100 characters.

-

How to obtain:

-

On the management console, click Service List, and choose Networking > Elastic Load Balance. Click the name of the target load balancer. On the Summary tab page, find and copy the ID.

-

kubernetes.io/elb.ip

-

Yes

-

String

-

This parameter indicates the service address of a load balancer. The value can be the public IP address of a public network load balancer or the private IP address of a private network load balancer.

-
-
-
-

Configuring HTTPS Certificates

Ingress supports TLS certificate configuration and provides security services in HTTPS mode.

-
  • If a Service needs to be exposed using HTTPS, you must configure the TLS certificate in the ingress. For details on how to create a secret, see Creating a Secret.
  • If HTTPS is used for the same port of the same load balancer of multiple ingresses, you must select the same certificate.
-
-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following command to create a YAML file named ingress-test-secret.yaml (the file name can be customized):

    vi ingress-test-secret.yaml

    -
    The YAML file is configured as follows:
    apiVersion: v1
    -data:
    -  tls.crt: LS0******tLS0tCg==
    -  tls.key: LS0tL******0tLS0K
    -kind: Secret
    -metadata:
    -  annotations:
    -    description: test for ingressTLS secrets
    -  name: ingress-test-secret
    -  namespace: default
    -type: IngressTLS
    -
    -

    In the preceding information, tls.crt and tls.key are only examples. Replace them with the actual files. The values of tls.crt and tls.key are the content encrypted using Base64.

    -
    -

  3. Create a secret.

    kubectl create -f ingress-test-secret.yaml

    -

    If information similar to the following is displayed, the secret is being created:

    -
    secret/ingress-test-secret created
    -

    View the created secrets.

    -

    kubectl get secrets

    -

    If information similar to the following is displayed, the secret has been created successfully:

    -
    NAME                         TYPE                                  DATA      AGE
    -ingress-test-secret          IngressTLS                            2         13s
    -

  4. Create a YAML file named ingress-test.yaml. The file name can be customized.

    vi ingress-test.yaml

    -

    Security policy (kubernetes.io/elb.tls-ciphers-policy) is supported only in clusters of v1.17.11 or later.

    -
    -

    Example YAML file to associate an existing load balancer:

    -
    apiVersion: networking.k8s.io/v1beta1
    -kind: Ingress 
    -metadata: 
    -  name: ingress-test
    -  annotations: 
    -    kubernetes.io/elb.class: performance                               # Load balancer type
    -    kubernetes.io/elb.id: <your_elb_id>  # Replace it with the ID of your existing load balancer.
    -    kubernetes.io/elb.ip: <your_elb_ip>  # Replace it with the IP of your existing load balancer.
    -    kubernetes.io/ingress.class: cce
    -    kubernetes.io/elb.port: '443'
    -    kubernetes.io/elb.tls-ciphers-policy: tls-1-2
    -spec:
    -  tls: 
    -  - secretName: ingress-test-secret
    -  rules: 
    -  - host: ''
    -    http: 
    -      paths: 
    -      - path: '/'
    -        backend: 
    -          serviceName: <your_service_name>  # Replace it with the name of your target Service.
    -          servicePort: 80
    -        property:
    -          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 4 Key parameters

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    kubernetes.io/elb.tls-ciphers-policy

    -

    No

    -

    String

    -

    The default value is tls-1-2, which is the security policy used by the listener and takes effect only when the HTTPS protocol is used.

    -

    Options:

    -
    • tls-1-0
    • tls-1-1
    • tls-1-2
    • tls-1-2-strict
    -

    For details of cipher suites for each security policy, see Table 5.

    -

    tls

    -

    No

    -

    Array of strings

    -

    This parameter is mandatory if HTTPS is used. Multiple independent domain names and certificates can be added to this parameter. For details, see Configuring the Server Name Indication (SNI).

    -

    secretName

    -

    No

    -

    String

    -

    This parameter is mandatory if HTTPS is used. Set this parameter to the name of the created secret.

    -
    -
    - -
    - - - - - - - - - - - - - - - - - - - -
    Table 5 tls_ciphers_policy parameter description

    Security Policy

    -

    TLS Version

    -

    Cipher Suite

    -

    tls-1-0

    -

    TLS 1.2

    -

    TLS 1.1

    -

    TLS 1.0

    -

    ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES128-SHA256:AES256-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-SHA:AES256-SHA

    -

    tls-1-1

    -

    TLS 1.2

    -

    TLS 1.1

    -

    tls-1-2

    -

    TLS 1.2

    -

    tls-1-2-strict

    -

    TLS 1.2

    -

    ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES128-SHA256:AES256-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384

    -
    -
    -

  5. Create an ingress.

    kubectl create -f ingress-test.yaml

    -

    If information similar to the following is displayed, the ingress has been created.

    -
    ingress/ingress-test created
    -

    View the created ingress.

    -

    kubectl get ingress

    -

    If information similar to the following is displayed, the ingress has been created successfully and the workload is accessible.

    -
    NAME             HOSTS     ADDRESS          PORTS   AGE
    -ingress-test     *         121.**.**.**     80      10s
    -

  6. Enter https://121.**.**.**:443 in the address box of the browser to access the workload (for example, Nginx workload).

    121.**.**.** indicates the IP address of the unified load balancer.

    -

-
-

Configuring the Server Name Indication (SNI)

SNI allows multiple TLS-based access domain names to be provided for external systems using the same IP address and port number. Different domain names can use different security certificates.
  • Only one domain name can be specified for each SNI certificate. Wildcard-domain certificates are supported.
  • Security policy (kubernetes.io/elb.tls-ciphers-policy) is supported only in clusters of v1.17.11 or later.
-
-
-
You can enable SNI when the preceding conditions are met. The following uses the automatic creation of a load balancer as an example. In this example, sni-test-secret-1 and sni-test-secret-2 are SNI certificates. The domain names specified by the certificates must be the same as those in the certificates.
apiVersion: networking.k8s.io/v1beta1
-kind: Ingress 
-metadata: 
-  name: ingress-test
-  annotations: 
-    kubernetes.io/elb.class: performance                               # Load balancer type
-    kubernetes.io/elb.id: <your_elb_id>  # Replace it with the ID of your existing load balancer.
-    kubernetes.io/elb.ip: <your_elb_ip>  # Replace it with the IP of your existing load balancer.
-    kubernetes.io/ingress.class: cce
-    kubernetes.io/elb.port: '443'
-    kubernetes.io/elb.tls-ciphers-policy: tls-1-2
-spec:
-  tls: 
-  - secretName: ingress-test-secret
-  - hosts:
-      - example.top  # Domain name specified a certificate is issued
-    secretName: sni-test-secret-1  
-  - hosts:
-      - example.com  # Domain name specified a certificate is issued
-    secretName: sni-test-secret-2
-  rules: 
-  - host: ''
-    http: 
-      paths: 
-      - path: '/'
-        backend: 
-          serviceName: <your_service_name>  # Replace it with the name of your target Service.
-          servicePort: 80
-        property:
-          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
-
-
-

Accessing Multiple Services

Ingresses can route requests to multiple backend Services based on different matching policies. The spec field in the YAML file is set as below. You can access www.example.com/foo, www.example.com/bar, and foo.example.com/ to route to three different backend Services.

-

The URL registered in an ingress forwarding policy must be the same as the URL exposed by the backend Service. Otherwise, a 404 error will be returned.

-
-
spec:
-  rules: 
-  - host: 'www.example.com'
-    http: 
-      paths: 
-      - path: '/foo'
-        backend: 
-          serviceName: <your_service_name>  # Replace it with the name of your target Service.
-          servicePort: 80
-        property:
-          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
-      - path: '/bar'
-        backend:
-          serviceName: <your_service_name>  # Replace it with the name of your target Service.
-          servicePort: 80
-        property:
-          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
-  - host: 'foo.example.com'
-    http:
-      paths:
-      - path: '/'
-        backend:
-          serviceName: <your_service_name>  # Replace it with the name of your target Service.
-          servicePort: 80
-        property:
-          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0254.html b/docs/cce/umn/cce_01_0254.html deleted file mode 100644 index b806cd0a..00000000 --- a/docs/cce/umn/cce_01_0254.html +++ /dev/null @@ -1,151 +0,0 @@ - - -

Using EVS Volumes

-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

  • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple jobs.
  • Data in a shared disk cannot be shared between nodes in a CCE cluster. If the same EVS disk is attached to multiple nodes, read and write conflicts and data cache conflicts may occur. When creating a Deployment, you are advised to create only one pod if you want to use EVS disks.
  • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

    For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volume mounted, a new pod cannot be started because EVS disks cannot be attached.

    -
  • When you create a StatefulSet and add a cloud storage volume, existing EVS volumes cannot be used.
  • EVS disks that have partitions or have non-ext4 file systems cannot be imported.
  • Container storage in CCE clusters of Kubernetes 1.13 or later version supports encryption. Currently, E2E encryption is supported only in certain regions.
  • EVS volumes cannot be created in specified enterprise projects. Only the default enterprise project is supported.
-
-

Creating an EVS Disk

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. Click Create EVS Disk.
  2. Configure basic disk information. Table 1 describes the parameters.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Configuring basic disk information

    Parameter

    -

    Description

    -

    * PVC Name

    -

    New PVC Name: name of the PVC to be created. A storage volume is automatically created when a PVC is created. One PVC corresponds to one storage volume. The storage volume name is automatically generated when the PVC is created.

    -

    Cluster Name

    -

    Cluster where the EVS disk is deployed.

    -

    Namespace

    -

    Select the namespace where the EVS disk is deployed. If you do not need to select a namespace, retain the default value.

    -

    Volume Capacity (GB)

    -

    Size of the storage to be created.

    -

    Access Mode

    -

    Access permissions of user applications on storage resources (PVs).

    -
    • ReadWriteOnce (RWO): The volume can be mounted as read-write by a single node, and data reading and writing are supported based on a non-shared EVS volume. EVS volumes in RWO mode are supported since v1.13.10-r1.
    -

    Primary AZ

    -

    AZ to which the volume belongs.

    -

    Type

    -

    Type of the new EVS disk.

    -
    • Common I/O: uses Serial Advanced Technology Attachment (SATA) drives to store data.
    • High I/O: uses serial attached SCSI (SAS) drives to store data.
    • Ultra-high I/O: uses solid state disk (SSD) drives to store data.
    -

    Storage Format

    -

    The default value is CSI and cannot be changed.

    -

    Encryption

    -

    KMS Encryption is deselected by default.

    -

    After KMS Encryption is selected, Key Management Service (KMS), an easy-to-use and highly secure cloud service for your keys, will be used for EVS disks. If no agency has been created, click Create Agency and set the following parameters:

    -
    • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name EVSAccessKMS indicates that EVS is granted the permission to access KMS. After EVS is authorized successfully, it can obtain KMS keys to encrypt and decrypt EVS systems.
    • Key Name: After a key is created, it can be loaded and used in containerized applications.
    • Key ID: generated by default.
    -

    This function is supported only for clusters of v1.13.10 and later in certain regions.

    -
    -
    -

  3. Review your order, click Submit, and wait until the creation is successful.

    The file system is displayed in the list. When its status becomes Normal, the file system is created successfully.

    -

  4. Click the volume name to view detailed information about the volume.
-
-

Adding an EVS Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, or Creating a Job. During creation, expand Data Storage after adding a container. On the Cloud Volume tab page, click Add Cloud Volume.
  2. Set the storage volume type to EVS.

    -

    - - - - - - - - - - - - - - - - - - -
    Table 2 Parameters required for mounting an EVS volume

    Parameter

    -

    Description

    -

    Type

    -

    EVS: You can use EVS disks the same way you use traditional hard disks on servers. EVS disks deliver higher data reliability and I/O throughput and are easy to use. They can be used for file systems, databases, or other system software and applications that require block storage resources.

    -
    CAUTION:
    • To attach an EVS disk to a workload, you must set the number of pods to 1 when creating the workload. If multiple pods are configured, you cannot attach EVS disks.
    • When you create a StatefulSet and add a cloud storage volume, existing EVS volumes cannot be used.
    • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple jobs.
    -
    -

    Allocation Mode

    -

    Manual

    -

    Select a created disk. If no disk is available, follow the prompts to create one.

    -

    For the same cluster and namespace, you can use an existing storage volume when creating a Deployment (with Allocation Mode set to Manual).

    -

    When creating a StatefulSet, you can only use a volume automatically allocated by the system (only Automatic is available for Allocation Mode).

    -

    Automatic

    -

    If you select Automatic, you need to configure the following items:

    -
    1. Access Mode: permissions of user applications on storage resources (PVs).
      • ReadWriteOnce (RWO): The volume can be mounted as read-write by a single node, and data reading and writing are supported based on a non-shared EVS volume. EVS volumes in RWO mode are supported since v1.13.10-r1.
      -
    2. Availability Zone: AZ where the storage volume is located. Only the AZ where the node is located can be selected.
    3. Sub-Type: Select a storage subtype.
      • Common I/O: uses Serial Advanced Technology Attachment (SATA) drives to store data.
      • High I/O: uses serial attached SCSI (SAS) drives to store data.
      • Ultra-high I/O: uses solid state disk (SSD) drives to store data.
      -
    4. Storage Capacity: Enter the storage capacity in the unit of GB. Ensure that the storage capacity quota is not exceeded; otherwise, creation will fail.
    5. Storage Format: The default value is CSI.

      The container storage interface (CSI) is used to establish a set of standard storage management interfaces between Kubernetes and external storage systems to provide storage services for containers.

      -
    6. After you select KMS Encryption, Key Management Service (KMS), an easy-to-use and highly secure service, will be enabled for EVS disks. This function is supported only for clusters of v1.13.10 and later in certain regions. If no agency has been created, click Create Agency and set the following parameters:
      • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name EVSAccessKMS indicates that EVS is granted the permission to access KMS. After EVS is authorized successfully, it can obtain KMS keys to encrypt and decrypt EVS systems.
      • Key Name: After a key is created, it can be loaded and used in containerized applications.
      • Key ID: generated by default.
      -
    -

    Add Container Path

    -
    1. Click Add Container Path.
    2. Container Path: Enter the container path to which the data volume is mounted.
      NOTICE:
      • Do not mount a data volume to a system directory such as / or /var/run; this action may cause a container error to occur. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      • If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.
      -
      -
    3. Set permissions.
      • Read-only: You can only read the data volumes mounted to the path.
      • Read/Write: You can modify the data volumes mounted to the path. Newly written data is not migrated if the container is migrated, which may cause a data loss.
      -
    -
    -
    -

  3. Click OK.
-
-

Importing an EVS Disk

CCE allows you to import existing EVS disks.

-

An EVS disk can be imported into only one namespace. If an EVS disk has been imported into a namespace, it is invisible in other namespaces and cannot be imported again. If you want to import an EVS disk that has file system (ext4) formatted, ensure that no partition has been created for the disk. Otherwise, data may be lost.

-
-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the EVS tab page, click Import.
  2. Select one or more EVS disks that you want to import. Then, click OK.
-
-

Unbinding an EVS Disk

After an EVS volume is successfully created or imported, the EVS volume is automatically bound to the current cluster and cannot be used by other clusters. When the volume is unbound from the cluster, other clusters can still use the volume.

-

If the EVS volume has been mounted to a workload, it cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the EVS disk list, click Unbind next to the target EVS disk.
  2. Confirm the unbinding, and click OK.
-
-

Related Operations

After an EVS volume is created, you can perform operations described in Table 3. -
- - - - - - - -
Table 3 Other operations

Operation

-

Description

-

Deleting an EVS volume

-
  1. Select the EVS volume to be deleted and click Delete in the Operation column.
  2. Follow the prompts to delete the EVS volume.
-
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0257.html b/docs/cce/umn/cce_01_0257.html deleted file mode 100644 index e0fa479a..00000000 --- a/docs/cce/umn/cce_01_0257.html +++ /dev/null @@ -1,207 +0,0 @@ - - -

Creating a Pod Mounted with an EVS Volume

-

Scenario

After an EVS volume is created or imported to CCE, you can mount it to a workload.

-

EVS disks cannot be attached across AZs. Before mounting a volume, you can run the kubectl get pvc command to query the available PVCs in the AZ where the current cluster is located.

-
-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Using EVS Volumes for Deployments

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the evs-deployment-example.yaml file, which is used to create a Deployment.

    touch evs-deployment-example.yaml

    -

    vi evs-deployment-example.yaml

    -
    Example of mounting an EVS volume to a Deployment (PVC-based, shared volume):
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -  name: evs-deployment-example 
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: evs-deployment-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: evs-deployment-example 
    -    spec: 
    -      containers: 
    -      - image: nginx
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp 
    -          name: pvc-evs-example 
    -      imagePullSecrets:
    -        - name: default-secret
    -      restartPolicy: Always 
    -      volumes: 
    -      - name: pvc-evs-example 
    -        persistentVolumeClaim: 
    -          claimName: pvc-evs-auto-example
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    spec.template.spec.containers.volumeMounts

    -

    name

    -

    Name of the volume mounted to the container.

    -

    spec.template.spec.containers.volumeMounts

    -

    mountPath

    -

    Mount path of the container. In this example, the volume is mounted to the /tmp directory.

    -

    spec.template.spec.volumes

    -

    name

    -

    Name of the volume.

    -

    spec.template.spec.volumes.persistentVolumeClaim

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the workload:

    kubectl create -f evs-deployment-example.yaml

    -

-
-

Using EVS Volumes for StatefulSets

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the evs-statefulset-example.yaml file, which is used to create a Deployment.

    touch evs-statefulset-example.yaml

    -

    vi evs-statefulset-example.yaml

    -

    Mounting an EVS volume to a StatefulSet (PVC template-based, non-shared volume):

    -
    Example YAML:
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: evs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: evs-statefulset-example
    -  template:
    -    metadata:
    -      labels:
    -        app: evs-statefulset-example
    -    spec:
    -      containers:
    -        - name: container-0
    -          image: 'nginx:latest'
    -          volumeMounts:
    -            - name: pvc-evs-auto-example
    -              mountPath: /tmp
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -  volumeClaimTemplates:
    -    - metadata:
    -        name: pvc-evs-auto-example
    -        namespace: default
    -        labels:
    -          failure-domain.beta.kubernetes.io/region: eu-de
    -          failure-domain.beta.kubernetes.io/zone: eu-de-01
    -        annotations:
    -          everest.io/disk-volume-type: SAS
    -      spec:
    -        accessModes:
    -          - ReadWriteOnce
    -        resources:
    -          requests:
    -            storage: 10Gi
    -        storageClassName: csi-disk   
    -  serviceName: evs-statefulset-example-headless
    -  updateStrategy:
    -    type: RollingUpdate
    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    metadata

    -

    name

    -

    Name of the created workload.

    -

    spec.template.spec.containers

    -

    image

    -

    Image of the workload.

    -

    spec.template.spec.containers.volumeMount

    -

    mountPath

    -

    Mount path of the container. In this example, the volume is mounted to the /tmp directory.

    -

    spec

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.

    -
    -
    -

  3. Run the following command to create the workload:

    kubectl create -f evs-statefulset-example.yaml

    -

-
-

Verifying Persistent Storage of an EVS Volume

  1. Query the pod and EVS files of the deployed workload (for example, evs-statefulset-example).

    1. Run the following command to query the pod name of the workload:
      kubectl get po | grep evs-statefulset-example
      -

      Expected outputs:

      -
      evs-statefulset-example-0   1/1     Running   0          22h
      -
    2. Run the following command to check whether an EVS volume is mounted to the /tmp directory:
      kubectl exec evs-statefulset-example-0 -- df tmp
      -

      Expected outputs:

      -
      /dev/sda        10255636 36888  10202364   1% /tmp
      -
    -

  2. Run the following command to create a file named test in the /tmp directory:

    kubectl exec evs-statefulset-example-0 -- touch /tmp/test
    -

  3. Run the following command to view the file in the /tmp directory:

    kubectl exec evs-statefulset-example-0 -- ls -l /tmp
    -

    Expected outputs:

    -
    -rw-r--r-- 1 root root     0 Jun  1 02:50 test
    -

  4. Run the following command to delete the pod named evs-statefulset-example-0:

    kubectl delete po evs-statefulset-example-0
    -

  5. Check whether the file still exists after the pod is rebuilt.

    1. Run the following command to query the name of the rebuilt pod:
      kubectl get po
      -

      Expected outputs:

      -
      evs-statefulset-example-0   1/1     Running   0          2m
      -
    2. Run the following command to view the file in the /tmp directory:
      kubectl exec evs-statefulset-example-0 -- ls -l /tmp
      -

      Expected outputs:

      -
      -rw-r--r-- 1 root root     0 Jun  1 02:50 test
      -
    3. The test file still exists after the pod is rebuilt, indicating that the data in the EVS volume can be persistently stored.
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0259.html b/docs/cce/umn/cce_01_0259.html deleted file mode 100644 index 214b986b..00000000 --- a/docs/cce/umn/cce_01_0259.html +++ /dev/null @@ -1,141 +0,0 @@ - - -

Using SFS Volumes

-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

  • SFS volumes are available only in certain regions.
  • Container storage in CCE clusters of Kubernetes 1.13 or later version supports encryption. Currently, E2E encryption is supported only in certain regions.
  • Volumes cannot be created in specified enterprise projects. Only the default enterprise project is supported.
-
-

Creating an SFS Volume

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage.
  2. On the SFS tab, click Create SFS File System.
  3. Configure basic information, as shown in Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating an SFS volume

    Parameter

    -

    Parameter Description

    -

    * PVC Name

    -

    Name of the new PVC, which is different from the volume name. The actual volume name is automatically generated when the PV is created by the PVC.

    -

    Cluster Name

    -

    Cluster to which the file system volume belongs.

    -

    Namespace

    -

    Namespace in which the volume is created.

    -

    Total Capacity

    -

    The total capacity is the capacity of a single volume. Fees are charged by actual usage.

    -

    Access Mode

    -

    Access permissions of user applications on storage resources (PVs).

    -
    • ReadWriteMany (RWX): The SFS volume can be mounted as read-write by multiple nodes.
    -

    Storage Format

    -

    The default value is CSI and cannot be changed.

    -

    Encryption

    -

    KMS Encryption is deselected by default.

    -

    After KMS Encryption is selected, Key Management Service (KMS), an easy-to-use and highly secure key service, will be used for SFS file systems. If no agency has been created, click Create Agency and set the following parameters:

    -
    • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name SFSAccessKMS indicates that SFS is granted the permission to access KMS. After SFS is authorized successfully, it can obtain KMS keys to encrypt and decrypt file systems.
    • Key Name: After a key is created, it can be loaded and used in containerized applications.
    • Key ID: generated by default.
    -

    This function is supported only for clusters of v1.13.10 and later in certain regions.

    -
    -
    -

  4. Click Create.

    The volume is displayed in the list. When PVS Status becomes Bound, the volume is created successfully.

    -

  5. Click the volume name to view detailed information about the volume.
-
-

Adding an SFS Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, Creating a DaemonSet, or Creating a Job. During creation, expand Data Storage after adding a container. On the Cloud Volume tab page, click Add Cloud Volume.
  2. Set the storage class to SFS.

    -

    - - - - - - - - - - - - - - - - - - -
    Table 2 Parameters for mounting an SFS volume

    Parameter

    -

    Parameter Description

    -

    Type

    -

    File Storage (NFS): This type applies to a wide range of scenarios, including media processing, content management, big data, and application analysis.

    -

    Allocation Mode

    -

    Manual

    -
    • Name: Select a created file system. You need to create a file system in advance. For details about how to create a file system, see Creating an SFS Volume.
    • Sub-Type: subtype of the created file storage.
    • Storage Capacity: This field is one of the PVC attributes. If the storage capacity has been expanded on the IaaS side, it is normal that the capacity values are inconsistent. The PVC capacity is the same as the storage entity capacity only after end-to-end container storage capacity expansion is supported for CCE clusters of v1.13.
    -

    Automatic

    -

    An SFS volume is created automatically. You need to enter the storage capacity.

    -
    • Sub-Type: Select NFS.
    • Storage Capacity: Specify the total storage capacity, in GB. Ensure that the storage capacity quota is not exceeded; otherwise, creation will fail.
    • Storage Format: The default value is CSI.

      The container storage interface (CSI) is used to establish a set of standard storage management interfaces between Kubernetes and external storage systems to provide storage services for containers.

      -
    • After you select KMS Encryption, Key Management Service (KMS), an easy-to-use and highly secure service, will be enabled for file systems. This function is supported only for clusters of v1.13.10 and later in certain regions. If no agency has been created, click Create Agency and set the following parameters:
      • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name SFSAccessKMS indicates that SFS is granted the permission to access KMS. After SFS is authorized successfully, it can obtain KMS keys to encrypt and decrypt file systems.
      • Key Name: After a key is created, it can be loaded and used in containerized applications.
      • Key ID: generated by default.
      -
    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter the subpath of the file storage, for example, /tmp.

      If this parameter is not specified, the root path of the data volume is used by default. Currently, only file storage is supported. The value must be a relative path and cannot start with a slash (/) or ../.

      -
    2. Container Path: Enter the path of the container, for example, /tmp.
      The container path must not be a system directory, such as / and /var/run. Otherwise, an exception occurs. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Set permissions.
      • Read-only: You can only read the data volumes mounted to the path.
      • Read/Write: You can modify the data volumes mounted to the path. Newly written data is not migrated if the container is migrated, which may cause a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

  3. Click OK.
-
-

Importing an SFS Volume

CCE allows you to import existing SFS volumes.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the SFS tab page, click Import.
  2. Select one or more SFS volumes that you want to attach.
  3. Select the target cluster and namespace. Then, click OK.
-
-

Unbinding an SFS Volume

When an SFS volume is successfully created or imported, the volume is automatically bound to the current cluster. Other clusters can also use the volume. When the SFS volume is unbound from the cluster, other clusters can still import and use the volume.

-

If the SFS volume has been attached to a workload, the volume cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the SFS volume list, click Unbind next to the target volume.
  2. Confirm the unbinding, and click OK.
-
-

Related Operations

After an SFS volume is created, you can perform the operation described in Table 3. -
- - - - - - - - - - -
Table 3 Other operations

Operation

-

Description

-

Deleting an SFS volume

-
  1. Select the SFS volume to be deleted and click Delete in the Operation column.
  2. Follow the prompts to delete the EVS disk.
-

Importing an SFS volume

-

CCE allows you to import existing SFS volumes.

-
  1. On the SFS tab page, click Import.
  2. Select one or more SFS volumes that you want to attach.
  3. Select the target cluster and namespace.
  4. Click Yes.
-
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0262.html b/docs/cce/umn/cce_01_0262.html deleted file mode 100644 index 8b44b14b..00000000 --- a/docs/cce/umn/cce_01_0262.html +++ /dev/null @@ -1,149 +0,0 @@ - - -

Creating a StatefulSet Mounted with an SFS Volume

-

Scenario

CCE allows you to use an existing SGS volume to create a StatefulSet (by using a PVC).

-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Procedure

  1. Create an SFS volume by referring to PersistentVolumeClaims (PVCs) and record the volume name.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create a YAML file for creating the workload. Assume that the file name is sfs-statefulset-example.yaml.

    touch sfs-statefulset-example.yaml

    -

    vi sfs-statefulset-example.yaml

    -

    Configuration example:

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: sfs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: sfs-statefulset-example
    -  template:
    -    metadata:
    -      labels:
    -        app: sfs-statefulset-example
    -    spec:
    -      volumes: 
    -      - name: pvc-sfs-example 
    -        persistentVolumeClaim:
    -          claimName: pvc-sfs-example     
    -      containers:
    -      - name: container-0
    -        image: 'nginx:latest'
    -        volumeMounts:
    -          - name: pvc-sfs-example
    -            mountPath: /tmp
    -      restartPolicy: Always
    -      imagePullSecrets:
    -      - name: default-secret 
    -  serviceName: sfs-statefulset-example-headless
    -  updateStrategy:
    -    type: RollingUpdate
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    spec

    -

    replicas

    -

    Number of pods.

    -

    metadata

    -

    name

    -

    Name of the new workload.

    -

    spec.template.spec.containers

    -

    image

    -

    Image used by the workload.

    -

    spec.template.spec.containers.volumeMounts

    -

    mountPath

    -

    Mount path of a container.

    -

    spec

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -

    spec.template.spec.volumes.persistentVolumeClaim

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    Example of mounting an SFS volume to a StatefulSet (PVC template-based, dedicated volume):

    -
    Example YAML file:
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: sfs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: sfs-statefulset-example
    -  template:
    -    metadata:
    -      labels:
    -        app: sfs-statefulset-example
    -    spec:
    -      containers:
    -        - name: container-0
    -          image: 'nginx:latest'
    -          volumeMounts:
    -            - name: pvc-sfs-auto-example
    -              mountPath: /tmp
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -  volumeClaimTemplates:
    -    - metadata:
    -        name: pvc-sfs-auto-example
    -        namespace: default
    -      spec:
    -        accessModes:
    -          - ReadWriteMany
    -        resources:
    -          requests:
    -            storage: 10Gi
    -        storageClassName: csi-nas
    -  serviceName: sfs-statefulset-example-headless
    -  updateStrategy:
    -    type: RollingUpdate
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  4. Create a StatefulSet.

    kubectl create -f sfs-statefulset-example.yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0263.html b/docs/cce/umn/cce_01_0263.html deleted file mode 100644 index 9899c4ac..00000000 --- a/docs/cce/umn/cce_01_0263.html +++ /dev/null @@ -1,52 +0,0 @@ - - -

Creating a Deployment Mounted with an SFS Volume

-

Scenario

After an SFS volume is created or imported to CCE, you can mount the volume to a workload.

-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the sfs-deployment-example.yaml file, which is used to create a pod.

    touch sfs-deployment-example.yaml

    -

    vi sfs-deployment-example.yaml

    -
    Example of mounting an SFS volume to a Deployment (PVC-based, shared volume):
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -  name: sfs-deployment-example                                # Workload name
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: sfs-deployment-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: sfs-deployment-example 
    -    spec: 
    -      containers: 
    -      - image: nginx 
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp                                # Mount path 
    -          name: pvc-sfs-example 
    -      imagePullSecrets:
    -        - name: default-secret
    -      restartPolicy: Always 
    -      volumes: 
    -      - name: pvc-sfs-example 
    -        persistentVolumeClaim: 
    -          claimName: pvc-sfs-auto-example                # PVC name
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the workload:

    kubectl create -f sfs-deployment-example.yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0265.html b/docs/cce/umn/cce_01_0265.html deleted file mode 100644 index 3c53fe2b..00000000 --- a/docs/cce/umn/cce_01_0265.html +++ /dev/null @@ -1,158 +0,0 @@ - - -

Using OBS Volumes

-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

  • CCE clusters of v1.7.3-r8 and earlier do not support OBS volumes. You need to upgrade these clusters or create clusters of a later version that supports OBS.
  • Volumes cannot be created in specified enterprise projects. Only the default enterprise project is supported.
-
-

Preparations

To mount reliable and stable OBS buckets as volumes, you must create AK/SK before you create OBS buckets.

-

The procedure for configuring the AK/SK is as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage.
  2. On the OBS tab page, click AK/SK in the notice.
    Figure 1 Configuring the AK/SK
    -
  3. Click , select a key file, and click Upload to upload the key file.
  4. Select the corresponding workload and click Restart.
-

When creating an OBS volume, you must use the AK/SK. If the key file is not uploaded, the pod will fail to be started or OBS data access will be abnormal due to the volume mounting failure.

-
-
-

Creating an OBS Volume

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage.
  2. Click the OBS tab and click Create OBS Bucket.
  3. Configure basic information, as shown in Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating an OBS volume

    Parameter

    -

    Parameter Description

    -

    * PVC Name

    -

    Name of the new PVC, which is different from the volume name. The actual volume name is automatically generated when the PV is created by the PVC.

    -

    The name contains 3 to 55 characters (excluding the prefix). It must contain lowercase letters, digits, and hyphens (-), and cannot start or end with a hyphen (-).

    -

    Cluster Name

    -

    Cluster to which the OBS volume belongs.

    -

    Namespace

    -

    Namespace to which the volume belongs. The default value is default.

    -

    Instance Type

    -

    Type of the storage instance created on OBS.

    -
    • Parallel file system: If the cluster version is v1.15 or later and the everest add-on version is 1.0.2 or later, parallel file systems that can be mounted by obsfs can be created.
    • Object bucket: A bucket is a container for storing objects in OBS. OBS provides flat storage in the form of buckets and objects. Unlike the conventional multi-layer directory structure of file systems, all objects in a bucket are stored at the same logical layer.
    -
    NOTE:

    Parallel file systems are optimized OBS objects. You are advised to use parallel file systems instead of object buckets to mount OBS volumes to containers.

    -
    -

    Storage Class

    -

    This parameter is displayed when you select Object bucket for Instance Type.

    -

    This parameter indicates the storage classes supported by OBS.

    -
    • Standard: applicable to scenarios where a large number of hotspot files or small-sized files need to be accessed frequently (multiple times per month on average) and require fast access response.
    • Infrequent access: applicable to scenarios where data is not frequently accessed (less than 12 times per year on average) but requires fast access response.
    -

    Storage Policy

    -

    Object storage has the following policies:

    -

    Private: Only the bucket owner has full control over the bucket. Unauthorized users do not have permissions to access the bucket.

    -

    Access Mode

    -

    Access permissions of user applications on storage resources (PVs).

    -
    • ReadWriteMany (RWX): The volume is mounted as read-write by multiple nodes.
    -

    Storage Format

    -

    The default type is CSI.

    -

    The container storage interface (CSI) is used to establish a set of standard storage management interfaces between Kubernetes and external storage systems to provide storage services for containers.

    -
    -
    -

  4. Click Create.

    After the OBS volume is successfully created, it is displayed in the OBS volume list. Click the PVC name to view detailed information about the OBS volume.

    -

-
-

Adding an OBS Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, Creating a DaemonSet, or Creating a Job. After you have added a container, choose Data Storage > Cloud Volume, and then click Add Cloud Volume.
  2. Set Type to OBS.

    -

    - - - - - - - - - - - - - - - - - - -
    Table 2 OBS volume parameters

    Parameter

    -

    Description

    -

    Type

    -

    Select OBS.

    -

    OBS: Standard and Infrequent Access OBS buckets are supported. OBS buckets are commonly used for big data analytics, cloud native applications, static website hosting, and backup/active archiving.

    -

    Allocation Mode

    -

    Manual

    -

    Name: Select a created OBS volume.

    -

    Sub-Type: class of the selected volume. The value can be Standard or Infrequent access, and you do not need to set this parameter.

    -

    Automatic

    -

    Type of the storage instance created on OBS.

    -
    • Parallel file system: If the cluster version is v1.15 or later and the everest add-on version is 1.0.2 or later, parallel file systems that can be mounted by obsfs can be created.

      Storage Format: The default value is CSI.

      -
    • Object bucket: A bucket is a container for storing objects in OBS.

      Sub-Type: Select Standard or Infrequent access.

      -

      Storage Format: The default value is CSI.

      -
    -
    NOTE:

    Parallel file systems are optimized OBS objects. You are advised to use parallel file systems instead of object buckets to mount OBS volumes to containers.

    -
    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. Container Path: Enter the mount path in the container, for example, /tmp.
      The mount path must not be a system directory, such as / and /var/run. Otherwise, an exception occurs. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    2. Set permissions.
      • Read-only: You can only read the data in the mounted volumes.
      • Read/Write: You can modify the data in the mounted volumes. Newly written data is not migrated if the container is migrated, which causes a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

  3. Click OK.
-
-

Importing an OBS Volume

CCE allows you to import existing OBS volumes.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the OBS tab page, click Import.
  2. Select one or more OBS volumes that you want to import.

    Parallel file systems are optimized OBS objects. You are advised to use parallel file systems instead of object buckets to mount OBS volumes to containers.

    -
    -

  3. Select the target cluster and namespace.
  4. Click OK.
-
-

Unbinding an OBS Volume

When an OBS volume is successfully created, the OBS volume is automatically bound to the current cluster. Other clusters can also use the OBS volume. When the volume is unbound from the cluster, other clusters can still use the volume.

-

If the volume has been mounted to a workload, the volume cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the OBS volume list, click Unbind next to the target OBS volume.
  2. In the dialog box displayed, click Yes.
-
-

Related Operations

After an OBS volume is created, you can perform the operation described in Table 3. -
- - - - - - - -
Table 3 Other Operations

Operation

-

Description

-

Deleting an OBS volume

-
  1. Select the OBS volume to be deleted and click Delete in the Operation column.
  2. Follow the prompts to delete the volume.
-
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0268.html b/docs/cce/umn/cce_01_0268.html deleted file mode 100644 index 2dc8d80c..00000000 --- a/docs/cce/umn/cce_01_0268.html +++ /dev/null @@ -1,152 +0,0 @@ - - -

Creating a StatefulSet Mounted with an OBS Volume

-

Scenario

CCE allows you to use an existing OBS volume to create a StatefulSet through a PVC.

-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Procedure

  1. Create an OBS volume by referring to PersistentVolumeClaims (PVCs) and obtain the PVC name.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create a YAML file for creating the workload. Assume that the file name is obs-statefulset-example.yaml.

    touch obs-statefulset-example.yaml

    -

    vi obs-statefulset-example.yaml

    -

    Configuration example:

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: obs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: obs-statefulset-example
    -  template:
    -    metadata:
    -      labels:
    -        app: obs-statefulset-example
    -    spec:
    -      volumes: 
    -      - name: pvc-obs-example 
    -        persistentVolumeClaim:
    -          claimName: pvc-obs-example     
    -      containers:
    -      - name: container-0
    -        image: 'nginx:latest'
    -        volumeMounts:
    -          - name: pvc-obs-example
    -            mountPath: /tmp
    -      restartPolicy: Always
    -      imagePullSecrets:
    -      - name: default-secret 
    -  serviceName: obs-statefulset-example-headless    # Name of the headless Service
    - -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    replicas

    -

    Number of pods.

    -

    name

    -

    Name of the new workload.

    -

    image

    -

    Image used by the workload.

    -

    mountPath

    -

    Mount path of a container.

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    Example of mounting an OBS volume to a StatefulSet (PVC template-based, dedicated volume):

    -

    Example YAML:

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: obs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: obs-statefulset-example
    -  template:
    -    metadata:
    -      labels:
    -        app: obs-statefulset-example
    -    spec:
    -      containers:
    -        - name: container-0
    -          image: 'nginx:latest'
    -          volumeMounts:
    -            - name: pvc-obs-auto-example
    -              mountPath: /tmp
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -  volumeClaimTemplates:
    -    - metadata:
    -        name: pvc-obs-auto-example
    -        namespace: default
    -        annotations:
    -          everest.io/obs-volume-type: STANDARD
    -      spec:
    -        accessModes:
    -          - ReadWriteMany
    -        resources:
    -          requests:
    -            storage: 1Gi
    -        storageClassName: csi-obs  
    -  serviceName: obs-statefulset-example-headless
    -

  4. Create a StatefulSet.

    kubectl create -f obs-statefulset-example.yaml

    -

-
-

Verifying Persistent Storage of an OBS Volume

  1. Query the pod and OBS volume of the deployed workload (for example, obs-statefulset-example).

    1. Run the following command to query the pod name of the workload:
      kubectl get po | grep obs-statefulset-example
      -

      Expected outputs:

      -
      obs-statefulset-example-0   1/1     Running   0          2m5s
      -
    2. Run the following command to check whether an OBS volume is mounted to the /tmp directory:
      kubectl exec obs-statefulset-example-0 -- mount|grep /tmp
      -

      Expected outputs:

      -
      s3fs on /tmp type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
      -
    -

  2. Run the following command to create a file named test in the /tmp directory:

    kubectl exec obs-statefulset-example-0 -- touch /tmp/test
    -

  3. Run the following command to view the file in the /tmp directory:

    kubectl exec obs-statefulset-example-0 -- ls -l /tmp
    -

    Expected outputs:

    -
    -rw-r--r-- 1 root root     0 Jun  1 02:50 test
    -

  4. Run the following command to delete the pod named obs-statefulset-example-0:

    kubectl delete po obs-statefulset-example-0
    -

  5. Check whether the file still exists after the pod is rebuilt.

    1. Run the following command to query the name of the rebuilt pod:
      kubectl get po
      -

      Expected outputs:

      -
      obs-statefulset-example-0   1/1     Running   0          2m
      -
    2. Run the following command to view the file in the /tmp directory:
      kubectl exec obs-statefulset-example-0 -- ls -l /tmp
      -

      Expected outputs:

      -
      -rw-r--r-- 1 root root     0 Jun  1 02:50 test
      -
    3. The test file still exists after the pod is rebuilt, indicating that the data in the OBS volume can be persistently stored.
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0269.html b/docs/cce/umn/cce_01_0269.html deleted file mode 100644 index 51b68e11..00000000 --- a/docs/cce/umn/cce_01_0269.html +++ /dev/null @@ -1,52 +0,0 @@ - - -

Creating a Deployment Mounted with an OBS Volume

-

Scenario

After an OBS volume is created or imported to CCE, you can mount the volume to a workload.

-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the obs-deployment-example.yaml file, which is used to create a pod.

    touch obs-deployment-example.yaml

    -

    vi obs-deployment-example.yaml

    -
    Example of mounting an OBS volume to a Deployment (PVC-based, shared volume):
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -  name: obs-deployment-example                        # Workload name
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: obs-deployment-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: obs-deployment-example 
    -    spec: 
    -      containers: 
    -      - image: nginx
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp                       # Mount path
    -          name: pvc-obs-example 
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -      volumes: 
    -      - name: pvc-obs-example  
    -        persistentVolumeClaim: 
    -          claimName: pvc-obs-auto-example       # PVC name
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the workload:

    kubectl create -f obs-deployment-example.yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0271.html b/docs/cce/umn/cce_01_0271.html deleted file mode 100644 index 8d473d86..00000000 --- a/docs/cce/umn/cce_01_0271.html +++ /dev/null @@ -1,58 +0,0 @@ - - -

Using SFS Turbo Volumes

-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

  • SFS Turbo volumes are available only in certain regions.
  • Currently, SFS Turbo file systems cannot be directly created on CCE.
  • Only an SFS Turbo file system in the same VPC as the cluster and in the same subnet as the node can be imported.
  • Inbound ports (111, 445, 2049, 2051, and 20048) must be enabled for the security group to which the SFS Turbo file system belongs.
-
-

Importing an SFS Turbo Volume

CCE allows you to import existing SFS Turbo volumes.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the SFS Turbo tab page, click Import.
  2. Select one or more SFS Turbo volumes that you want to import.
  3. Select the cluster and namespace to which you want to import the volumes.
  4. Click Next. The volumes are displayed in the list. When PVC Status becomes Bound, the volumes are imported successfully.
-
-

Adding an SFS Turbo Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, Creating a DaemonSet, or Creating a Job. After you have added a container, choose Data Storage > Cloud Volume, and then click Add Cloud Volume.
  2. Set the storage volume type to SFS Turbo.

    -

    - - - - - - - - - - - - - - - -
    Table 1 Parameters for configuring an SFS Turbo volume

    Parameter

    -

    Parameter Description

    -

    Type

    -

    SFS Turbo: applicable to DevOps, containerized microservices, and enterprise office applications.

    -

    Allocation Mode

    -

    Manual

    -

    Select an existing SFS Turbo volume. You need to import SFS Turbo volumes in advance. For details, see Importing an SFS Turbo Volume.

    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter the subpath of the file storage, for example, /tmp.

      This parameter specifies a subpath inside the referenced volume instead of its root. If this parameter is not specified, the root path is used. Currently, only file storage is supported. The value must be a relative path and cannot start with a slash (/) or ../.

      -
    2. Container Path: Enter the mount path in the container, for example, /tmp.
      The mount path must not be a system directory, such as / and /var/run. Otherwise, an exception occurs. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Set permissions.
      • Read-only: You can only read the data in the mounted volumes.
      • Read/Write: You can modify the data in the mounted volumes. Newly written data is not migrated if the container is migrated, which causes a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

  3. Click OK.
-
-

Unbinding an SFS Turbo Volume

When an SFS Turbo volume is successfully imported to a cluster, the volume is bound to the cluster. The volume can also be imported to other clusters. When the volume is unbound from the cluster, other clusters can still import and use the volume.

-

If the SFS Turbo volume has been mounted to a workload, the volume cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the SFS Turbo volume list, click Unbind next to the target volume.
  2. In the dialog box displayed, click OK.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0273.html b/docs/cce/umn/cce_01_0273.html deleted file mode 100644 index 9b64e95b..00000000 --- a/docs/cce/umn/cce_01_0273.html +++ /dev/null @@ -1,117 +0,0 @@ - - -

Creating a StatefulSet Mounted with an SFS Turbo Volume

-

Scenario

CCE allows you to use an existing SFS Turbo volume to create a StatefulSet.

-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Procedure

  1. Create an SFS Turbo volume and record the volume name.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create a YAML file for creating the workload. Assume that the file name is sfsturbo-statefulset-example.yaml.

    touch sfsturbo-statefulset-example.yaml

    -

    vi sfsturbo-statefulset-example.yaml

    -

    Configuration example:

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: sfsturbo-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: sfsturbo-statefulset-example
    -  template:
    -    metadata:
    -      labels:
    -        app: sfsturbo-statefulset-example
    -    spec:
    -      volumes: 
    -      - name: pvc-sfsturbo-example 
    -        persistentVolumeClaim:
    -          claimName: pvc-sfsturbo-example     
    -      containers:
    -      - name: container-0
    -        image: 'nginx:latest'
    -        volumeMounts:
    -          - name: pvc-sfsturbo-example
    -            mountPath: /tmp
    -      restartPolicy: Always
    -      imagePullSecrets:
    -      - name: default-secret 
    -  serviceName: sfsturbo-statefulset-example-headless
    -  updateStrategy:
    -    type: RollingUpdate
    - -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    replicas

    -

    Number of pods.

    -

    name

    -

    Name of the new workload.

    -

    image

    -

    Image used by the workload.

    -

    mountPath

    -

    Mount path of a container.

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  4. Create the StatefulSet.

    kubectl create -f sfsturbo-statefulset-example.yaml

    -

-
-

Verifying Persistent Storage of an SFS Turbo Volume

  1. Query the pod and SFS Turbo volume of the deployed workload (for example, sfsturbo-statefulset-example).

    1. Run the following command to query the pod name of the workload:
      kubectl get po | grep sfsturbo-statefulset-example
      -

      Expected outputs:

      -
      sfsturbo-statefulset-example-0   1/1     Running   0          2m5s
      -
    2. Run the following command to check whether an SFS Turbo volume is mounted to the /tmp directory:
      kubectl exec sfsturbo-statefulset-example-0 -- mount|grep /tmp
      -

      Expected outputs:

      -
      192.168.0.108:/ on /tmp type nfs (rw,relatime,vers=3,rsize=1048576,wsize=1048576,namlen=255,hard,nolock,noresvport,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=192.168.0.108,mountvers=3,mountport=20048,mountproto=tcp,local_lock=all,addr=192.168.0.108)
      -
    -

  2. Run the following command to create a file named test in the /tmp directory:

    kubectl exec sfsturbo-statefulset-example-0 -- touch /tmp/test
    -

  3. Run the following command to view the file in the /tmp directory:

    kubectl exec sfsturbo-statefulset-example-0 -- ls -l /tmp
    -

    Expected outputs:

    -
    -rw-r--r-- 1 root root     0 Jun  1 02:50 test
    -

  4. Run the following command to delete the pod named sfsturbo-statefulset-example-0:

    kubectl delete po sfsturbo-statefulset-example-0
    -

  5. Check whether the file still exists after the pod is rebuilt.

    1. Run the following command to query the name of the rebuilt pod:
      kubectl get po
      -

      Expected outputs:

      -
      sfsturbo-statefulset-example-0   1/1     Running   0          2m
      -
    2. Run the following command to view the file in the /tmp directory:
      kubectl exec sfsturbo-statefulset-example-0 -- ls -l /tmp
      -

      Expected outputs:

      -
      -rw-r--r-- 1 root root     0 Jun  1 02:50 test
      -

      The test file still exists after the pod is rebuilt, indicating that the data in the SFS Turbo volume can be persistently stored.

      -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0274.html b/docs/cce/umn/cce_01_0274.html deleted file mode 100644 index 2cd8428e..00000000 --- a/docs/cce/umn/cce_01_0274.html +++ /dev/null @@ -1,77 +0,0 @@ - - -

Creating a Deployment Mounted with an SFS Turbo Volume

-

Scenario

After an SFS Turbo volume is created or imported to CCE, you can mount the volume to a workload.

-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the sfsturbo-deployment-example.yaml file, which is used to create a Deployment:

    touch sfsturbo-deployment-example.yaml

    -

    vi sfsturbo-deployment-example.yaml

    -

    Example of mounting an SFS Turbo volume to a Deployment (PVC-based, shared volume):

    -
    apiVersion: apps/v1  
    -kind: Deployment  
    -metadata:  
    -  name: sfsturbo-deployment-example                                # Workload name
    -  namespace: default  
    -spec:  
    -  replicas: 1  
    -  selector:  
    -    matchLabels:  
    -      app: sfsturbo-deployment-example  
    -  template:  
    -    metadata:  
    -      labels:  
    -        app: sfsturbo-deployment-example  
    -    spec:  
    -      containers:  
    -      - image: nginx  
    -        name: container-0  
    -        volumeMounts:  
    -        - mountPath: /tmp                                # Mount path
    -          name: pvc-sfsturbo-example  
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -      volumes:  
    -      - name: pvc-sfsturbo-example  
    -        persistentVolumeClaim:  
    -          claimName: pvc-sfsturbo-example                # PVC name
    - -
    - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the created Deployment.

    -

    app

    -

    Name of the Deployment.

    -

    mountPath

    -

    Mount path of the container. In this example, the mount path is /tmp.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the workload:

    kubectl create -f sfsturbo-deployment-example.yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0276.html b/docs/cce/umn/cce_01_0276.html deleted file mode 100644 index 2712ee4a..00000000 --- a/docs/cce/umn/cce_01_0276.html +++ /dev/null @@ -1,51 +0,0 @@ - - -

Performing Rolling Upgrade for Nodes

-

Scenario

In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.

-
Figure 1 Workload migration
-
-

Notes and Constraints

  • The original node and the target node to which the workload is to be migrated must be in the same cluster.
  • The cluster must be of v1.13.10 or later.
  • The default node pool DefaultPool does not support this configuration.
-
-

Scenario 1: The Original Node Is in DefaultPool

  1. Create a node.

    1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
    2. Select the cluster to which the original node belongs.
    3. Click Create Node Pool, set the following parameters, and modify other parameters as required. For details about the parameters, see Creating a Node Pool.
      1. Name: Enter the name of the new node pool, for example, nodepool-demo.
      2. Nodes: In this example, add one node.
      3. Specifications: Select node specifications that best suit your needs.
      4. OS: Select the operating system (OS) of the nodes to be created.
      5. Login Mode:
        • If the login mode is Key pair, select a key pair for logging in to the node and select the check box to acknowledge that you have obtained the key file and that without this file you will not be able to log in to the node.

          A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

          -
        -
      -
    4. Click Next: Confirm. Confirm the node pool configuration and click Submit.

      Go back to the node pool list. In the node list, you can view that the new node pool has been created and is in the Normal state.

      -
    -

  2. Click the name of the node pool. The IP address of the new node is displayed in the node list.
  1. Install and configure kubectl.

    1. In the navigation pane of the CCE console, choose Resource Management > Clusters, and click Command Line Tool > Kubectl under the cluster where the original node is located.
    2. On the Kubectl tab page of the cluster details page, connect to the cluster as prompted.
    -

  1. Migrate the workload.

    1. Add a taint to the node where the workload needs to be migrated out.

      kubectl taint node [node] key=value:[effect]

      -

      In the preceding command, [node] indicates the IP address of the node where the workload to be migrated is located. The value of [effect] can be NoSchedule, PreferNoSchedule, or NoExecute. In this example, set this parameter to NoSchedule.

      -
      • NoSchedule: Pods that do not tolerate this taint are not scheduled on the node; existing pods are not evicted from the node.
      • PreferNoSchedule: Kubernetes tries to avoid scheduling pods that do not tolerate this taint onto the node.
      • NoExecute: A pod is evicted from the node if it is already running on the node, and is not scheduled onto the node if it is not yet running on the node.
      -

      To reset a taint, run the kubectl taint node [node] key:[effect]- command to remove the taint.

      -
      -
    2. Safely evicts the workload on the node.

      kubectl drain [node]

      -

      In the preceding command, [node] indicates the IP address of the node where the workload to be migrated is located.

      -
    3. In the navigation pane of the CCE console, choose Workloads > Deployments. In the workload list, the status of the workload to be migrated changes from Running to Unready. If the workload status changes to Running again, the migration is successful.
    -

    During workload migration, if node affinity is configured for the workload, the workload keeps displaying a message indicating that the workload is not ready. In this case, click the workload name to go to the workload details page. On the Scheduling Policies tab page, delete the affinity configuration of the original node and click Add Simple Scheduling Policy to configure the affinity and anti-affinity policies of the new node. For details, see Simple Scheduling Policies.

    -
    -

    After the workload is successfully migrated, you can view that the workload is migrated to the node created in 1 on the Pods tab page of the workload details page.

    -

  1. Delete the original node.

    After the workload is successfully migrated and is running properly, choose Resource Management > Nodes to delete the original node.

    -

-
-

Scenario 2: The Original Node Is Not in DefaultPool

  1. Copy the node pool and add nodes to it.

    1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
    2. Select the cluster to which the original node belongs.

      In the node pool list, locate the node pool to which the original node belongs.

      -
    3. Click More > Copy next to the node pool name. On the Create Node Pool page, set the following parameters and modify other parameters as required. For details about the parameters, see Creating a Node Pool.
      • Name: Enter the name of the new node pool, for example, nodepool-demo.
      • Nodes: In this example, add one node.
      • Specifications: Select node specifications that best suit your needs.
      • OS: Select the operating system (OS) of the nodes to be created.
      • Login Mode:
        • If the login mode is Key pair, select a key pair for logging in to the node and select the check box to acknowledge that you have obtained the key file and that without this file you will not be able to log in to the node.

          A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

          -
        -
      -
    4. Click Next: Confirm. Confirm the node pool configuration and click Submit.

      Go back to the node pool list. In the node list, you can view that the new node pool has been created and is in the Normal state.

      -
    -

  2. Click the name of the node pool. The IP address of the new node is displayed in the node list.
  1. Migrate the workload.

    1. Click Edit on the right of nodepool-demo and set Taints.
    2. Click Add Taint, set Key and Value, and set Effect to NoExecute. The value options of Effect include NoSchedule, PreferNoSchedule, or NoExecute.
      • NoSchedule: Pods that do not tolerate this taint are not scheduled on the node; existing pods are not evicted from the node.
      • PreferNoSchedule: Kubernetes tries to avoid scheduling pods that do not tolerate this taint onto the node.
      • NoExecute: A pod is evicted from the node if it is already running on the node, and is not scheduled onto the node if it is not yet running on the node.
      -

      If you need to reset the taint, enter the new values or click Delete.

      -
      -
    3. Click Save.
    4. In the navigation pane of the CCE console, choose Workloads > Deployments. In the workload list, the status of the workload to be migrated changes from Running to Unready. If the workload status changes to Running again, the migration is successful.
    -

    During workload migration, if node affinity is configured for the workload, the workload keeps displaying a message indicating that the workload is not ready. In this case, click the workload name to go to the workload details page. On the Scheduling Policies tab page, delete the affinity configuration of the original node and click Add Simple Scheduling Policy to configure the affinity and anti-affinity policies of the new node. For details, see Simple Scheduling Policies.

    -
    -

    After the workload is successfully migrated, you can view that the workload is migrated to the node created in 1 on the Pods tab page of the workload details page.

    -

  1. Delete the original node.

    After the workload is successfully migrated and is running properly, choose Resource Management > Node Pools to delete the original node.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0277.html b/docs/cce/umn/cce_01_0277.html deleted file mode 100644 index 9ab00308..00000000 --- a/docs/cce/umn/cce_01_0277.html +++ /dev/null @@ -1,51 +0,0 @@ - - -

Overview

-

CCE provides multiple types of add-ons to extend cluster functions and meet feature requirements. You can install add-ons as required.

- -
- - - - - - - - - - - - - - - - - - - - - - -
Table 1 Add-on list

Add-on Name

-

Introduction

-

coredns

-

The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.

-

storage-driver

-

storage-driver is a FlexVolume driver used to support IaaS storage services such as EVS, SFS, and OBS.

-

everest

-

Everest is a cloud native container storage system. Based on CSI, clusters of Kubernetes v1.15.6 and later can connect to storage services such as EVS, OBS, SFS, and SFS Turbo.

-

autoscaler

-

The autoscaler add-on resizes a cluster based on pod scheduling status and resource usage.

-

metrics-server

-

metrics-server is an aggregator for monitoring data of core cluster resources.

-

gpu-beta

-

gpu-beta is a device management add-on that supports GPUs in containers. It supports only NVIDIA drivers.

-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0278.html b/docs/cce/umn/cce_01_0278.html deleted file mode 100644 index 0001142d..00000000 --- a/docs/cce/umn/cce_01_0278.html +++ /dev/null @@ -1,66 +0,0 @@ - - -

Creating a Namespace

-

When to Use Namespaces

A namespace is a collection of resources and objects. Multiple namespaces can be created inside a cluster and isolated from each other. This enables namespaces to share the same cluster Services without affecting each other.

-

For example, you can deploy workloads in a development environment into one namespace, and deploy workloads in a testing environment into another namespace.

-
-

Prerequisites

At least one cluster has been created. For details, see Creating a CCE Cluster.

-
-

Notes and Constraints

A maximum of 6,000 Services can be created in each namespace. The Services mentioned here indicate the Kubernetes Service resources added for workloads.

-
-

Namespace Types

Namespaces can be created in either of the following ways:

-
  • Created automatically: When a cluster is up, the default, kube-public, kube-system, and kube-node-lease namespaces are created by default.
    • default: All objects for which no namespace is specified are allocated to this namespace.
    • kube-public: Resources in this namespace can be accessed by all users (including unauthenticated users), such as public add-ons and container charts.
    • kube-system: All resources created by Kubernetes are in this namespace.
    • kube-node-lease: Each node has an associated Lease object in this namespace. The object is periodically updated by the node. Both NodeStatus and NodeLease are considered as heartbeats from a node. In versions earlier than v1.13, only NodeStatus is available. The NodeLease feature is introduced in v1.13. NodeLease is more lightweight than NodeStatus. This feature significantly improves the cluster scalability and performance.
    -
  • Created manually: You can create namespaces to serve separate purposes. For example, you can create three namespaces, one for a development environment, one for joint debugging environment, and one for test environment. You can also create one namespace for login services and one for game services.
-
-

Creating a Namespace

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Namespaces. Click Create Namespace.
  2. Set the parameters listed in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating a namespace

    Parameter

    -

    Description

    -

    * Namespace

    -

    Unique name of the created namespace.

    -

    * Cluster

    -

    Cluster to which the namespace belongs.

    -

    Node Affinity

    -

    If this parameter is set to on, workloads in this namespace will be scheduled only to nodes with specified labels. To add labels to a node, choose Resource Management > Nodes > Manage Labels.

    -

    This parameter is displayed only for clusters of v1.13.10-r0 and later.

    -

    Description

    -

    Description about the namespace.

    -

    Set Resource Quotas

    -

    Resource quotas can limit the amount of resources available in namespaces, achieving resource allocation by namespace.

    -
    NOTICE:

    You are advised to set resource quotas in the namespace as required to prevent cluster or node exceptions caused by resource overload.

    -

    For example, the default number of pods that can be created on each node in a cluster is 110. If you create a cluster with 50 nodes, you can create a maximum of 5,500 pods. Therefore, you can set a resource quota to ensure that the total number of pods in all namespaces does not exceed 5,500.

    -
    -

    Quotas can be configured for the following resources:

    -
    • CPU (cores)
    • Memory (MiB)
    • StatefulSet
    • Deployment
    • Job
    • Cron job
    • Pod
    • Service
    -

    Enter an integer. If the quota of a resource is set to 0, no limit is posed on the resource.

    -

    If you want to limit the CPU or memory quota, you must specify the CPU or memory request value when creating a workload.

    -
    -
    -

  3. When the configuration is complete, click OK.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0281.html b/docs/cce/umn/cce_01_0281.html deleted file mode 100644 index f606b2a9..00000000 --- a/docs/cce/umn/cce_01_0281.html +++ /dev/null @@ -1,105 +0,0 @@ - - -

Overview

-

The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:

-
  • Container tunnel network
  • VPC network
  • Cloud Native Network 2.0
-

Network Model Comparison

Table 1 describes the differences of network models supported by CCE.

-

After a cluster is created, the network model cannot be changed.

-
-
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Network model comparison

Dimension

-

Tunnel Network

-

VPC Network

-

Cloud Native Network 2.0

-

Core technology

-

OVS

-

IPvlan and VPC route

-

VPC ENI/sub-ENI

-

Applicable clusters

-

CCE cluster

-

CCE cluster

-

CCE Turbo cluster

-

Network isolation

-

Yes. For details, see Network Policies.

-

No

-

Yes. For details, see SecurityGroups.

-

Passthrough networking

-

No

-

No

-

Yes

-

IP address management

-
  • The container CIDR block is allocated separately.
  • CIDR blocks are divided by node and can be dynamically allocated (CIDR blocks can be dynamically added after being allocated.)
-
  • The container CIDR block is allocated separately.
  • CIDR blocks are divided by node and statically allocated (the CIDR block cannot be changed after a node is created).
-

The container CIDR block is divided from the VPC subnet and does not need to be allocated separately.

-

Network performance

-

Performance loss due to VXLAN encapsulation

-

No tunnel encapsulation. Cross-node packets are forwarded through VPC routers, delivering performance equivalent to that of the host network.

-

The container network is integrated with the VPC network, eliminating performance loss.

-

Networking scale

-

A maximum of 2,000 nodes are supported.

-

By default, 200 nodes are supported.

-

Each time a node is added to the cluster, a route is added to the VPC routing table. Therefore, the cluster scale is limited by the VPC route table.

-

A maximum of 2,000 nodes are supported.

-

Application scenarios

-
  • Common container service scenarios
  • Scenarios that do not have high requirements on network latency and bandwidth
-
  • Scenarios that have high requirements on network latency and bandwidth
  • Containers can communicate with VMs using a microservice registration framework, such as Dubbo and CSE.
-
  • Scenarios that have high requirements on network latency, bandwidth, and performance
  • Containers can communicate with VMs using a microservice registration framework, such as Dubbo and CSE.
-
-
-
  1. The scale of a cluster that uses the VPC network model is limited by the custom routes of the VPC. Therefore, you need to estimate the number of required nodes before creating a cluster.
  2. The scale of a cluster that uses the Cloud Native Network 2.0 model depends on the size of the VPC subnet CIDR block selected for the network attachment definition. Before creating a cluster, evaluate the scale of your cluster.
  3. By default, VPC routing network supports direct communication between containers and hosts in the same VPC. If a peering connection policy is configured between the VPC and another VPC, the containers can directly communicate with hosts on the peer VPC. In addition, in hybrid networking scenarios such as Direct Connect and VPN, communication between containers and hosts on the peer end can also be achieved with proper planning.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0284.html b/docs/cce/umn/cce_01_0284.html deleted file mode 100644 index 30b11480..00000000 --- a/docs/cce/umn/cce_01_0284.html +++ /dev/null @@ -1,79 +0,0 @@ - - -

Cloud Native Network 2.0

-

Model Definition

Developed by CCE, Cloud Native Network 2.0 deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and elastic IPs (EIPs) are bound to deliver high performance.

-
Figure 1 Cloud Native Network 2.0
-

Pod-to-pod communication

-
  • On the same node: Packets are forwarded through the VPC ENI or sub-ENI.
  • Across nodes: Packets are forwarded through the VPC ENI or sub-ENI.
-
-

Notes and Constraints

This network model is available only to CCE Turbo clusters.

-
-

Advantages and Disadvantages

Advantages

-
  • As the container network directly uses VPC, it is easy to locate network problems and provide the highest performance.
  • External networks in a VPC can be directly connected to container IP addresses.
  • The load balancing, security group, and EIP capabilities provided by VPC can be used directly.
-

Disadvantages

-

The container network directly uses VPC, which occupies the VPC address space. Therefore, you must properly plan the container CIDR block before creating a cluster.

-
-

Application Scenarios

  • High performance requirements and use of other VPC network capabilities: Cloud Native Network 2.0 directly uses VPC, which delivers almost the same performance as the VPC network. Therefore, it is applicable to scenarios that have high requirements on bandwidth and latency, such as online live broadcast and e-commerce seckill.
  • Large-scale networking: Cloud Native Network 2.0 supports a maximum of 2000 ECS nodes and 100,000 containers.
-
-

Container IP Address Management

In the Cloud Native Network 2.0 model, BMS nodes use ENIs and ECS nodes use sub-ENIs. The following figure shows how IP addresses are managed on these nodes.

-
Figure 2 IP address management in Cloud Native Network 2.0
-
  • Pod IP addresses are allocated from Pod Subnet you configure from the VPC.
  • ENIs and sub-ENIs bound to an ECS node = Number of ENIs used to bear sub-ENIs + Number of sub-ENIs currently used by pods + Number of sub-ENIs to be bound
  • ENIs bound to a BMS node = Number of ENIs currently used by pods + Number of pre-bound ENIs
  • Pre-binding policy: The system periodically (every 2 minutes by default) checks whether the total number of ENIs on the node. If the low threshold is not reached, the system pre-binds ENIs. If the high threshold is exceeded, the system releases ENIs.
  • On an ECS node, when the number of pre-bound sub-ENIs plus the number of sub-ENIs currently used by the pods is smaller than the number of sub-ENIs at the low threshold (sub-ENI quota on the node x low threshold), the system pre-binds sub-ENIs to make the numbers equal.
  • On an ECS node, when the number of pre-bound sub-ENIs plus the number of sub-ENIs currently used by the pods is larger than the number of sub-ENIs at the high threshold (sub-ENI quota on the node x high threshold), the system releases sub-ENIs to make the numbers equal.
  • On a BMS node, when the number of pre-bound ENIs plus the number of ENIs currently used by the pods is smaller than the number of ENIs at the low threshold (ENI quota on the node x low threshold), the system pre-binds ENIs to make the numbers equal.
  • On a BMS node, when the number of pre-bound ENIs plus the number of ENIs currently used by the pods is larger than the number of ENIs at the high threshold (ENI quota on the node x high threshold), the system releases ENIs to make the numbers equal.
-
-

Recommendation for CIDR Block Planning

As described in Cluster Network Structure, network addresses in a cluster can be divided into three parts: node network, container network, and service network. When planning network addresses, consider the following aspects:

-
  • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs. All subnets (including those created from the secondary CIDR block) in the VPC where the cluster resides cannot conflict with the container and Service CIDR blocks.
  • Ensure that each CIDR block has sufficient IP addresses.
    • The IP addresses in the node CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
    • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses.
    -
-

In the Cloud Native Network 2.0 model, the container CIDR block and node CIDR block share the network addresses in a VPC. It is recommended that the container subnet and node subnet not use the same subnet. Otherwise, containers or nodes may fail to be created due to insufficient IP resources.

-

In addition, a subnet can be added to the container CIDR block after a cluster is created to increase the number of available IP addresses. In this case, ensure that the added subnet does not conflict with other subnets in the container CIDR block.

-
Figure 3 Configuring CIDR blocks
-
-

Example of Cloud Native Network 2.0 Access

Create a CCE Turbo cluster, which contains three ECS nodes.

-
Figure 4 Cluster network
-

Access the details page of one node. You can see that the node has one primary NIC and one extended NIC, and both of them are ENIs. The extended NIC belongs to the container CIDR block and is used to mount a sub-ENI to the pod.

-

Create a Deployment on the cluster.

-
kind: Deployment
-apiVersion: apps/v1
-metadata:
-  name: example
-  namespace: default
-spec:
-  replicas: 6
-  selector:
-    matchLabels:
-      app: example
-  template:
-    metadata:
-      labels:
-        app: example
-    spec:
-      containers:
-        - name: container-0
-          image: 'nginx:perl'
-          resources:
-            limits:
-              cpu: 250m
-              memory: 512Mi
-            requests:
-              cpu: 250m
-              memory: 512Mi
-      imagePullSecrets:
-        - name: default-secret
-

View the created pod.

-
$ kubectl get pod -owide
-NAME                       READY   STATUS    RESTARTS   AGE   IP            NODE         NOMINATED NODE   READINESS GATES
-example-5bdc5699b7-54v7g   1/1     Running   0          7s    10.1.18.2     10.1.0.167   <none>           <none>
-example-5bdc5699b7-6dzx5   1/1     Running   0          7s    10.1.18.216   10.1.0.186   <none>           <none>
-example-5bdc5699b7-gq7xs   1/1     Running   0          7s    10.1.16.63    10.1.0.144   <none>           <none>
-example-5bdc5699b7-h9rvb   1/1     Running   0          7s    10.1.16.125   10.1.0.167   <none>           <none>
-example-5bdc5699b7-s9fts   1/1     Running   0          7s    10.1.16.89    10.1.0.144   <none>           <none>
-example-5bdc5699b7-swq6q   1/1     Running   0          7s    10.1.17.111   10.1.0.167   <none>           <none>
-

The IP addresses of all pods are sub-ENIs, which are mounted to the ENI (extended NIC) of the node.

-

For example, the extended NIC of node 10.1.0.167 is 10.1.17.172. On the Network Interfaces page of the Network Console, you can see that three sub-ENIs are mounted to the extended NIC 10.1.17.172, which is the IP address of the pod.

-

In the VPC, the IP address of the pod can be successfully accessed.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0285.html b/docs/cce/umn/cce_01_0285.html deleted file mode 100644 index 58f52f6f..00000000 --- a/docs/cce/umn/cce_01_0285.html +++ /dev/null @@ -1,26 +0,0 @@ - - -

Managing Namespaces

-

Selecting a Namespace

  • When creating a workload, you can select a namespace to isolate resources or users.
  • When querying workloads, you can select a namespace to view all workloads in the namespace.
-
-

Isolating Namespaces

  • Isolating namespaces by environment

    An application generally goes through the development, joint debugging, and testing stages before it is launched. In this process, the workloads deployed in each environment (stage) are the same, but are logically defined. There are two ways to define them:

    -
    • Group them in different clusters for different environments.

      Resources cannot be shared among different clusters. In addition, services in different environments can access each other only through load balancing.

      -
    • Group them in different namespaces for different environments.

      Workloads in the same namespace can be mutually accessed by using the Service name. Cross-namespace access can be implemented by using the Service name or namespace name.

      -

      The following figure shows namespaces created for the development, joint debugging, and testing environments, respectively.

      -
      Figure 1 One namespace for one environment
      -
    -
  • Isolating namespaces by application

    You are advised to use this method if a large number of workloads are deployed in the same environment. For example, in the following figure, different namespaces (APP1 and APP2) are created to logically manage workloads as different groups. Workloads in the same namespace access each other using the Service name, and workloads in different namespaces access each other using the Service name or namespace name.

    -
    Figure 2 Grouping workloads into different namespaces
    -
-
-

Deleting a Namespace

If a namespace is deleted, all resources (such as workloads, jobs, and ConfigMaps) in this namespace will also be deleted. Exercise caution when deleting a namespace.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Namespaces.
  2. Select the cluster to which the namespace belongs from the Clusters drop-down list.
  3. Select the namespace to be deleted and click Delete.

    Follow the prompts to delete the namespace. The default namespaces cannot be deleted.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0286.html b/docs/cce/umn/cce_01_0286.html deleted file mode 100644 index 1a1f9de3..00000000 --- a/docs/cce/umn/cce_01_0286.html +++ /dev/null @@ -1,34 +0,0 @@ - - -

Configuring a Namespace-level Network Policy

-

You can configure a namespace-level network policy after enabling network isolation.

-

By default, Network Isolation is disabled for namespaces. For example, if network isolation is off for namespace default, all workloads in the current cluster can access the workloads in namespace default.

-

To prevent other workloads from accessing the workloads in namespace default, perform the following steps:

-

Only clusters that use the tunnel network model support network isolation.

-
-

Prerequisites

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Namespaces.
  2. Select the cluster to which the namespace belongs from the Clusters drop-down list.
  3. At the row of a namespace (for example, default), switch on Network Isolation.

    After network isolation is enabled, workloads in namespace default can access each other but they cannot be accessed by workloads in other namespaces.

    -
    Figure 1 Namespace-level network policy
    -

-
-

Network Isolation Description

Enabling network isolation is to create a network policy in a namespace. The network policy selects all pods in the namespace and prevents pods in other namespaces from accessing.

-
kind: NetworkPolicy
-apiVersion: networking.k8s.io/v1
-metadata:
-    name: deny-default
-    namespace: default
-spec:
-    ingress:
-        - from:
-          - podSelector: {}
-    podSelector: {}                     # {} indicates that all pods are selected.
-

You can also customize a network policy. For details, see Network Policies.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0287.html b/docs/cce/umn/cce_01_0287.html deleted file mode 100644 index 92504987..00000000 --- a/docs/cce/umn/cce_01_0287.html +++ /dev/null @@ -1,127 +0,0 @@ - - -

Setting a Resource Quota

-

Namespace-level resource quotas limit the amount of resources available to teams or users when these teams or users use the same cluster. The quotas include the total number of a type of objects and the total amount of compute resources (CPU and memory) consumed by the objects.

-

Quotas can be set only in clusters of v1.9 or later.

-
-

Prerequisites

-
-

Usage

By default, running pods can use the CPUs and memory of a node without restrictions. This means the pods in a namespace may exhaust all resources of the cluster.

-

Kubernetes provides namespaces for you to group workloads in a cluster. By setting resource quotas for each namespace, you can prevent resource exhaustion and ensure cluster reliability.

-

You can configure quotas for resources such as CPU, memory, and the number of pods in a namespace. For more information, see Resource Quotas.

-

The following table recommends how many pods you can configure for your clusters of different sizes.

- -
- - - - - - - - - - - - - - - - -

Cluster Scale

-

Recommended Number of Pods

-

50 nodes

-

2,500 pods

-

200 nodes

-

10,000 pods

-

1,000 nodes

-

30,000 pods

-

2,000 nodes

-

50,000 pods

-
-
-

Starting from clusters of v1.21 and later, the default Resource Quotas are created when a namespace is created. Table 1 lists the resource quotas based on cluster specifications. You can modify them according to your service requirements.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Default resource quotas

Cluster Scale

-

Pod

-

Deployment

-

Secret

-

ConfigMap

-

Service

-

50 nodes

-

2000

-

1000

-

1000

-

1000

-

1000

-

200 nodes

-

2000

-

1000

-

1000

-

1000

-

1000

-

1,000 nodes

-

5000

-

2000

-

2000

-

2000

-

2000

-

2,000 nodes

-

5000

-

2000

-

2000

-

2000

-

2000

-
-
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Namespaces.
  2. Select the cluster to which the namespace belongs from the Clusters drop-down list.
  3. In the Operation column of a namespace, click Manage Quota.

    This operation cannot be performed on system namespaces kube-system and kube-public.

    -

  4. Set the resource quotas and click OK.

    • CPU (cores): maximum number of CPU cores that can be allocated to workload pods in the namespace.
    • Memory (MiB): maximum amount of memory that can be allocated to workload pods in the namespace.
    • StatefulSet: maximum number of StatefulSets that can be created in the namespace.
    • Deployment: maximum number of Deployments that can be created in the namespace.
    • Job: maximum number of one-off jobs that can be created in the namespace.
    • Cron Job: maximum number of cron jobs that can be created in the namespace.
    • Pod: maximum number of pods that can be created in the namespace.
    • Service: maximum number of Services that can be created in the namespace.
    -
    • After setting CPU and memory quotas for a namespace, you must specify the request and limit values of CPU and memory resources when creating a workload. Otherwise, the workload cannot be created. If the quota of a resource is set to 0, the resource usage is not limited.
    • Accumulated quota usage includes the resources used by CCE to create default components, such as the Kubernetes Services (which can be viewed using kubectl) created under the default namespace. Therefore, you are advised to set a resource quota greater than expected to reserve resource for creating default components.
    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0288.html b/docs/cce/umn/cce_01_0288.html deleted file mode 100644 index 047271c2..00000000 --- a/docs/cce/umn/cce_01_0288.html +++ /dev/null @@ -1,196 +0,0 @@ - - -

SecurityGroups

-

When the Cloud Native Network 2.0 model is used, pods use VPC ENIs or sub-ENIs for networking. You can directly bind security groups and EIPs to pods. CCE provides a custom resource object named SecurityGroup for you to associate security groups with pods in CCE. You can customize workloads with specific security isolation requirements using SecurityGroups.

-

Notes and Constraints

  • This function is supported for CCE Turbo clusters of v1.19 and later. Upgrade your CCE Turbo clusters if their versions are earlier than v1.19.
  • A workload can be bound to a maximum of five security groups.
-
-

Using the Console

  1. In the navigation pane of the CCE console, choose Resource Management > Network.
  2. On the SecurityGroup tab page, select the target cluster in the upper right corner and click Create.
  3. Set the parameters as described in Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Configuration parameters

    Parameter

    -

    Description

    -

    Example Value

    -

    SecurityGroup Name

    -

    Enter a SecurityGroup name.

    -

    Enter 4 to 63 characters. The value must start with a lowercase letter and cannot end with a hyphen (-). Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    security-group

    -

    Cluster Name

    -

    Select a cluster.

    -

    cce-turbo

    -

    Namespace

    -

    Select a namespace. If the namespace is not created, click Create Namespace.

    -

    default

    -

    Workload

    -

    Select a workload.

    -

    nginx

    -

    Security Group

    -

    The selected security group will be bound to the ENI or supplementary ENI of the selected workload. A maximum of five security groups can be selected from the drop-down list. You must select one or multiple security groups to create a SecurityGroup.

    -

    If no security group has not been created, click Create Security Group. After the security group is created, click the refresh button.

    -
    NOTICE:
    • A maximum of 5 security groups can be selected.
    • Hover the cursor on the security group name, and you can view details about the security group.
    -
    -

    64566556-bd6f-48fb-b2c6-df8f44617953

    -

    5451f1b0-bd6f-48fb-b2c6-df8f44617953

    -
    -
    -

  4. After setting the parameters, click Create.

    After the SecurityGroup is created, the system automatically returns to the SecurityGroup list page. You can see that the newly added SecurityGroup is in the list.

    -

-
-

Using kubectl

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create a description file named securitygroup-demo.yaml.

    vi securitygroup-demo.yaml

    -

    For example, create the following SecurityGroup to bind all nginx workloads with two security groups 64566556-bd6f-48fb-b2c6-df8f44617953 and 5451f1b0-bd6f-48fb-b2c6-df8f44617953 that have been created in advance. An example is as follows:

    -
    apiVersion: crd.yangtse.cni/v1
    -kind: SecurityGroup
    -metadata:
    -  name: demo
    -  namespace: default
    -spec:
    -  podSelector:
    -    matchLabels:
    -      app: nginx    
    -  securityGroups:
    -  - id: 64566556-bd6f-48fb-b2c6-df8f44617953
    -  - id: 5451f1b0-bd6f-48fb-b2c6-df8f44617953
    -
    Table 2 describes the parameters in the YAML file. -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Description

    Field

    -

    Description

    -

    Mandatory

    -

    apiVersion

    -

    API version. The value is crd.yangtse.cni/v1.

    -

    Yes

    -

    kind

    -

    Type of the object to be created.

    -

    Yes

    -

    metadata

    -

    Metadata definition of the resource object.

    -

    Yes

    -

    name

    -

    Name of the SecurityGroup.

    -

    Yes

    -

    namespace

    -

    Name of the namespace.

    -

    Yes

    -

    Spec

    -

    Detailed description of the SecurityGroup.

    -

    Yes

    -

    podselector

    -

    Used to define the workload to be associated with security groups in the SecurityGroup.

    -

    Yes

    -

    SecurityGroups

    -

    Security group ID.

    -

    Yes

    -
    -
    -
    -

  3. Run the following command to create the SecurityGroup:

    kubectl create -f securitygroup-demo.yaml

    -

    If the following information is displayed, the SecurityGroup is being created.

    -
    securitygroup.crd.yangtse.cni/demo created
    -

  4. Run the following command to view the SecurityGroup:

    kubectl get sg

    -

    If the name of the created SecurityGroup is demo in the command output, the SecurityGroup is created successfully.

    -
    NAME                       POD-SELECTOR                      AGE
    -all-no                     map[matchLabels:map[app:nginx]]   4h1m
    -s001test                   map[matchLabels:map[app:nginx]]   19m
    -demo                       map[matchLabels:map[app:nginx]]   2m9s
    -

-
-

Other Operations

-
- - - - - - - - - - - - - - - - -
Table 3 Other operations

Operation

-

Procedure

-

Deletion

-
  1. In the navigation pane of the CCE console, choose Resource Management > Network.
  2. On the SecurityGroup tab page, select the target SecurityGroup.
  3. Click SecurityGroup to delete the SecurityGroup.
-

Update

-
  1. In the navigation pane of the CCE console, choose Resource Management > Network.
  2. On the SecurityGroup tab page, click Update at the same row as the SecurityGroup.

    You can update the SecurityGroup ID and associated workload.

    -
-

Viewing the YAML file

-
  1. In the navigation pane of the CCE console, choose Resource Management > Network.
  2. On the SecurityGroup tab page, click View YAML at the same row as the SecurityGroup.

    You can view, copy, and download the YAML file.

    -
-

Viewing events

-
  1. In the navigation pane of the CCE console, choose Resource Management > Network.
  2. On the SecurityGroup tab page, click View Event.

    You can query the event information.

    -
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0291.html b/docs/cce/umn/cce_01_0291.html deleted file mode 100644 index e26e81aa..00000000 --- a/docs/cce/umn/cce_01_0291.html +++ /dev/null @@ -1,20 +0,0 @@ - - -

Scaling a Cluster/Node

-

-
-
- - - -
- diff --git a/docs/cce/umn/cce_01_0293.html b/docs/cce/umn/cce_01_0293.html deleted file mode 100644 index 08e78262..00000000 --- a/docs/cce/umn/cce_01_0293.html +++ /dev/null @@ -1,22 +0,0 @@ - - -

Scaling a Workload

-

-
-
- - - -
- diff --git a/docs/cce/umn/cce_01_0296.html b/docs/cce/umn/cce_01_0296.html deleted file mode 100644 index a148d81d..00000000 --- a/docs/cce/umn/cce_01_0296.html +++ /dev/null @@ -1,27 +0,0 @@ - - -

Node Scaling Mechanisms

-

Kubernetes HPA is designed for pods. However, if the cluster resources are insufficient, you can only add nodes. Scaling of cluster nodes could be laborious. Now with clouds, you can add or delete nodes by simply calling APIs.

-

autoscaler is a component provided by Kubernetes for auto scaling of cluster nodes based on the pod scheduling status and resource usage.

-

Prerequisites

Before using the node scaling function, you must install the autoscaler add-on of v1.13.8 or later.

-
-

How autoscaler Works

The cluster autoscaler (CA) goes through two processes.

-
  • Scale-out: The CA checks all unschedulable pods every 10 seconds and selects a node group that meets the requirements for scale-out based on the policy you set.
  • Scale-in: The CA scans all nodes every 10 seconds. If the number of pod requests on a node is less than the user-defined percentage for scale-in, the CA simulates whether the pods on the node can be migrated to other nodes. If yes, the node will be removed after an idle time window.
-

As described above, if a cluster node is idle for a period of time (10 minutes by default), scale-in is triggered, and the idle node is removed.

-

However, a node cannot be removed from a cluster if the following pods exist:

-
  1. Pods that do not meet specific requirements set in PodDisruptionBudget
  2. Pods that cannot be scheduled to other nodes due to constraints such as affinity and anti-affinity policies
  3. Pods that have the "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" annotation
  4. Pods (except those created by kube-system DaemonSet) that exist in the kube-system namespace on the node
  5. Pods that are not created by the controller (Deployment/ReplicaSet/job/StatefulSet)
-
-

autoscaler Architecture

Figure 1 shows the autoscaler architecture and its core modules:

-
Figure 1 autoscaler architecture
-

Description

-
  • Estimator: Evaluates the number of nodes to be added to each node pool to host unschedulable pods.
  • Simulator: Finds the nodes that meet the scale-in conditions in the scale-in scenario.
  • Expander: Selects an optimal node from the node pool picked out by the Estimator based on the user-defined policy in the scale-out scenario. Currently, the Expander has the following policies:
    • Random: Selects a node pool randomly. If you have not specified a policy, Random is set by default.
    • most-Pods: Selects the node pool that can host the largest number of unschedulable pods after the scale-out. If multiple node pools meet the requirement, a random node pool will be selected.
    • least-waste: Selects the node pool that has the least CPU or memory resource waste after scale-out.
    • price: Selects the node pool in which the to-be-added nodes cost least for scale-out.
    • priority: Selects the node pool with the highest weight. The weights are user-defined.
    -
-

Currently, CCE supports all policies except price. By default, CCE add-ons use the least-waste policy.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0298.html b/docs/cce/umn/cce_01_0298.html deleted file mode 100644 index b9862b21..00000000 --- a/docs/cce/umn/cce_01_0298.html +++ /dev/null @@ -1,170 +0,0 @@ - - -

Creating a CCE Turbo Cluster

-

CCE Turbo clusters run on a cloud native infrastructure that features software-hardware synergy to support passthrough networking, high security and reliability, and intelligent scheduling.

-

CCE Turbo clusters are paired with the Cloud Native Network 2.0 model for large-scale, high-performance container deployment. Containers are assigned IP addresses from the VPC CIDR block. Containers and nodes can belong to different subnets. Access requests from external networks in a VPC can be directly routed to container IP addresses, which greatly improves networking performance. It is recommended that you go through Cloud Native Network 2.0 to understand the features and network planning of each CIDR block of Cloud Native Network 2.0.

-

Notes and Constraints

  • During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
  • You can create a maximum of 50 clusters in a single region.
  • CCE Turbo clusters support only Cloud Native Network 2.0. For details about this network model, see Cloud Native Network 2.0.
  • Nodes in a CCE Turbo cluster must be the models developed on the QingTian architecture that features software-hardware synergy.
  • CCE Turbo clusters are available only in certain regions.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters. Click Create next to CCE Turbo Cluster.

    Figure 1 Creating a CCE Turbo cluster
    -

  2. On the page displayed, set the following parameters:

    Basic configuration

    -
    Specify the basic cluster configuration. -
    - - - - - - - - - - - - - -
    Table 1 Basic parameters for creating a cluster

    Parameter

    -

    Description

    -

    Cluster Name

    -

    Name of the cluster to be created. The cluster name must be unique under the same account and cannot be changed after the cluster is created.

    -

    A cluster name contains 4 to 128 characters, starting with a letter and not ending with a hyphen (-). Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    Version

    -

    Version of Kubernetes to use for the cluster.

    -

    Management Scale

    -

    Maximum number of worker nodes that can be managed by the master nodes of the cluster. You can select 200 nodes, 1,000 nodes, or 2,000 nodes for your cluster.

    -

    Master node specifications change with the cluster management scale you choose, and you will be charged accordingly.

    -
    -
    -
    -

    Networking configuration

    -
    Select the CIDR blocks used by nodes and containers in the cluster. If IP resources in the CIDR blocks are insufficient, nodes and containers cannot be created. -
    - - - - - - - - - - - - - - - - -
    Table 2 Networking parameters

    Parameter

    -

    Description

    -

    Network Model

    -

    Cloud Native Network 2.0: This network model deeply integrates the native elastic network interfaces (ENIs) of VPC, uses the VPC CIDR block to allocate container addresses, and supports direct traffic distribution to containers through a load balancer to deliver high performance.

    -

    VPC

    -

    Select the VPC used by nodes and containers in the cluster. The VPC cannot be changed after the cluster is created.

    -

    A VPC provides a secure and logically isolated network environment.

    -

    If no VPC is available, create one on the VPC console. After the VPC is created, click the refresh icon.

    -

    Node Subnet

    -

    This parameter is available after you select a VPC.

    -

    The subnet you select is used by nodes in the cluster and determines the maximum number of nodes in the cluster. This subnet will be the default subnet where your nodes are created. When creating a node, you can select other subnets in the same VPC.

    -

    A node subnet provides dedicated network resources that are logically isolated from other networks for higher security.

    -

    If no node subnet is available, click Create Subnet to create a subnet. After the subnet is created, click the refresh icon. For details about the relationship between VPCs, subnets, and clusters, see Cluster Overview.

    -

    During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.

    -

    The selected subnet cannot be changed after the cluster is created.

    -

    Pod Subnet

    -

    This parameter is available after you select a VPC.

    -

    The subnet you select is used by pods in the cluster and determines the maximum number of pods in the cluster. The subnet cannot be changed after the cluster is created.

    -

    IP addresses used by pods will be allocated from this subnet.

    -
    NOTE:

    If the pod subnet is the same as the node subnet, pods and nodes share the remaining IP addresses in the subnet. As a result, pods or nodes may fail to be created due to insufficient IP addresses.

    -
    -
    -
    -
    -

    Advanced Settings

    -
    Configure enhanced capabilities for your CCE Turbo cluster. -
    - - - - - - - - - - - - - -
    Table 3 Networking parameters

    Parameter

    -

    Description

    -

    Service Network Segment

    -

    An IP range from which IP addresses are allocated to Kubernetes Services. After the cluster is created, the CIDR block cannot be changed. The Service CIDR block cannot conflict with the created routes. If they conflict, select another CIDR block.

    -

    The default value is 10.247.0.0/16. You can change the CIDR block and mask according to your service requirements. The mask determines the maximum number of Service IP addresses available in the cluster.

    -

    After you set the mask, the console will provide an estimated maximum number of Services you can create in this CIDR block.

    -

    kube-proxy Mode

    -

    Load balancing between Services and their backend pods. The value cannot be changed after the cluster is created.

    -
    • IPVS: optimized kube-proxy mode to achieve higher throughput and faster speed, ideal for large-sized clusters. This mode supports incremental updates and can keep connections uninterrupted during Service updates.

      In this mode, when the ingress and Service use the same ELB instance, the ingress cannot be accessed from the nodes and containers in the cluster.

      -
    • iptables: Use iptables rules to implement Service load balancing. In this mode, too many iptables rules will be generated when many Services are deployed. In addition, non-incremental updates will cause a latency and even tangible performance issues in the case of service traffic spikes.
    -
    NOTE:
    • IPVS provides better scalability and performance for large clusters.
    • Compared with iptables, IPVS supports more complex load balancing algorithms such as least load first (LLF) and weighted least connections (WLC).
    • IPVS supports server health check and connection retries.
    -
    -

    CPU Policy

    -
    • On: Exclusive CPU cores can be allocated to workload pods. Select On if your workload is sensitive to latency in CPU cache and scheduling.
    • Off: Exclusive CPU cores will not be allocated to workload pods. Select Off if you want a large pool of shareable CPU cores.
    -
    -
    -
    -

  3. Click Next: Confirm to review the configurations and change them if required.
  4. Click Submit.

    It takes about 6 to 10 minutes to create a cluster. You can click Back to Cluster List to perform other operations on the cluster or click Go to Cluster Events to view the cluster details.

    -

  5. If the cluster status is Available, the CCE Turbo cluster is successfully created, and Turbo is displayed next to the cluster name.

    -

-
-

Related Operations

-
  • Creating a namespace: You can create multiple namespaces in a cluster and organize resources in the cluster into different namespaces. These namespaces serve as logical groups and can be managed separately. For details about how to create a namespace for a cluster, see Namespaces.
  • Creating a workload: Once the cluster is created, you can use an image to create an application that can be accessed from public networks. For details, see Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet.
  • Viewing cluster details: Click the cluster name to view cluster details. -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 4 Details about the created cluster

    Tab

    -

    Description

    -

    Basic Information

    -

    You can view the details and running status of the cluster.

    -

    Monitoring

    -

    You can view the CPU and memory allocation rates of all nodes in the cluster (that is, the maximum allocated amount), as well as the CPU usage, memory usage, and specifications of the master node(s).

    -

    Events

    -
    • View cluster events.
    • Set search criteria, such as the event name or the time segment during which an event is generated, to filter events.
    -

    Auto Scaling

    -

    You can configure auto scaling to add or reduce worker nodes in a cluster to meet service requirements. For details, see Setting Cluster Auto Scaling.

    -

    Clusters of v1.17 do not support auto scaling using AOM. You can use node pools for auto scaling. For details, see Node Pool Overview.

    -

    kubectl

    -

    To access a Kubernetes cluster from a PC, you need to use the Kubernetes command line tool kubectl. For details, see Connecting to a Cluster Using kubectl.

    -

    Resource Tags

    -

    Resource tags can be added to classify resources.

    -

    You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use predefined tags to improve tag creation and resource migration efficiency.

    -

    CCE will automatically create the "CCE-Dynamic-Provisioning-Node=Node ID" tag. A maximum of 5 tags can be added.

    -
    -
    -
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0300.html b/docs/cce/umn/cce_01_0300.html index c81c8574..a1798429 100644 --- a/docs/cce/umn/cce_01_0300.html +++ b/docs/cce/umn/cce_01_0300.html @@ -8,14 +8,29 @@ -

2022-08-27

+

2023-02-10

+ + + + +

2022-12-20

+ + + + +

2022-11-21

+ +

Added Best Practice.

+ + +

2022-08-27

EulerOS 2.9 is supported. For details, see OS Patch Notes for Cluster Nodes.

2022-07-13

-

Supported egress rules. For details, see Network Policies.

+

Supported egress rules. For details, see Network Policies.

2022-05-24

@@ -30,33 +45,33 @@

2022-04-14

-

Allowed cluster upgrade from v1.19 to v1.21. For details, see Performing In-place Upgrade.

+

Allowed cluster upgrade from v1.19 to v1.21. For details, see Performing In-place Upgrade.

2022-03-24

- +

2022-02-17

Supported the creation of CCE Turbo Cluster.

- +

2021-12-14

-

The validity period of the certificate of cluster can be configured. For details, see Obtaining a Cluster Certificate.

+

The validity period of the certificate of cluster can be configured. For details, see Obtaining a Cluster Certificate.

2021-11-30

- +

2021-11-15

- +

2021-06-23

@@ -72,7 +87,7 @@

2021-01-30

- +

2020-11-02

@@ -97,7 +112,7 @@

2020-02-21

-

Updated Namespaces.

+

Updated Namespaces.

2019-10-30

@@ -122,7 +137,7 @@

2019-07-30

- +

2019-07-29

@@ -237,7 +252,7 @@

2018-09-15

- +

2018-09-05

diff --git a/docs/cce/umn/cce_01_0301.html b/docs/cce/umn/cce_01_0301.html deleted file mode 100644 index 11d41108..00000000 --- a/docs/cce/umn/cce_01_0301.html +++ /dev/null @@ -1,39 +0,0 @@ - - -

Performing In-place Upgrade (v1.15 and Later)

-

Scenario

On the CCE console, You can perform an in-place cluster upgrade to use new cluster features.

-

Before the upgrade, learn about the target version to which each CCE cluster can be upgraded in what ways, and the upgrade impacts. For details, see Overview and Before You Start.

-
-

Description

  • An in-place upgrade updates the Kubernetes components on cluster nodes, without changing their OS version.
  • Data plane nodes are upgraded in batches. By default, they are prioritized based on their CPU, memory, and PodDisruptionBudgets (PDBs). You can also set the priorities according to your service requirements.
-
-

Precautions

  • During the cluster upgrade, the system will automatically upgrade add-ons to a version compatible with the target cluster version. Do not uninstall or reinstall add-ons during the cluster upgrade.
  • Before the upgrade, ensure that all add-ons are running. If an add-on fails to be upgraded, rectify the fault and try again.
  • During the upgrade, CCE checks the add-on running status. Some add-ons (such as coredns) require at least two nodes to run normally. In this case, at least two nodes must be available for the upgrade.
-

For more information, see Before You Start.

-
-

Procedure

This section describes how to upgrade a CCE cluster of v1.15 or later. For other versions, see Performing Replace/Rolling Upgrade (v1.13 and Earlier).

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters. In the cluster list, view the cluster version.
  2. Click More for the cluster you want to upgrade, and select Upgrade from the drop-down menu.

    Figure 1 Upgrading a cluster
    -
    • If your cluster version is up-to-date, the Upgrade button is grayed out.
    • If the cluster status is Unavailable, the upgrade flag in the upper right corner of the cluster card view will be grayed out. Check the cluster status by referring to Before You Start.
    -
    -

  3. (Optional) On the cluster upgrade confirmation page, click Backup to back up the entire master node. This backup mode is recommended.

    A manual confirmation is required for backing up the entire master node. The backup process uses the Cloud Backup and Recovery (CBR) service and takes about 20 minutes. If there are many cloud backup tasks at the current site, the backup time may be prolonged. You are advised to back up the master node.

    -
    Figure 2 Determining whether to back up the entire master node
    -

  4. Check the version information, last update/upgrade time, available upgrade version, and upgrade history of the current cluster.

    The cluster upgrade goes through pre-upgrade check, add-on upgrade/uninstallation, master node upgrade, worker node upgrade, and post-upgrade processing.

    -
    Figure 3 Cluster upgrade page
    -

  5. Click Upgrade on the right. Set the upgrade parameters.

    • Available Versions: Select v1.19 in this example.
    • Cluster Backup: A manual confirmation is required for backing up the entire master node. The backup process uses the Cloud Backup and Recovery (CBR) service and takes about 20 minutes. If there are many cloud backup tasks at the current site, the backup time may be prolonged.
    • Add-on Upgrade Configuration: Add-ons that have been installed in your cluster are listed. During the cluster upgrade, the system automatically upgrades the add-ons to be compatible with the target cluster version. You can click Set to re-define the add-on parameters.

      If a red dot is displayed on the right of an add-on, the add-on is incompatible with the target cluster version. During the upgrade, the add-on will be uninstalled and then re-installed. Ensure that the add-on parameters are correctly configured.

      -
      -
    • Node Upgrade Configuration: Before setting the node upgrade priority, you need to select a node pool. Nodes and node pools will be upgraded according to the priorities you specify. You can set the maximum number of nodes to be upgraded in batch, or set priorities for nodes to be upgraded. If you do not set this parameter, the system will determine the nodes to upgrade in batches based on specific conditions.
      • Add Upgrade Priority: Add upgrade priorities for node pools.
      • Add Node Priority: After adding a node pool priority, you can set the upgrade sequence of nodes in the node pool. The system upgrades nodes in the sequence you specify. If you skip this setting, the system upgrades nodes based on the default policy.
      -
    -
    Figure 4 Configuring upgrade parameters
    -

  6. Read the upgrade instructions carefully, and select I have read the upgrade instructions. Click Upgrade.

    Figure 5 Final step before upgrade
    -

  7. After you click Upgrade, the cluster upgrade starts. You can view the upgrade process in the lower part of the page.

    During the upgrade, you can click Suspend on the right to suspend the cluster upgrade. To continue the upgrade, click Continue.

    -

    -
    Figure 6 Cluster upgrade in process
    -

  8. When the upgrade progress reaches 100%, the cluster is upgraded. The version information will be properly displayed, and no upgrade is required.

    Figure 7 Upgrade completed
    -

  9. After the upgrade is complete, verify the cluster Kubernetes version on the Clusters page.

    Figure 8 Verifying the upgrade success
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0302.html b/docs/cce/umn/cce_01_0302.html deleted file mode 100644 index cbbc3e0c..00000000 --- a/docs/cce/umn/cce_01_0302.html +++ /dev/null @@ -1,191 +0,0 @@ - - -

Before You Start

-

Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Overview.

-

Precautions

  • Upgraded clusters cannot be rolled back. Therefore, perform the upgrade during off-peak hours to minimize the impact on your services.
  • Do not shut down or restart nodes during cluster upgrade. Otherwise, the upgrade fails.
  • Before upgrading a cluster, disable auto scaling policies to prevent node scaling during the upgrade. Otherwise, the upgrade fails.
  • If you locally modify the configuration of a cluster node, the cluster upgrade may fail or the configuration may be lost after the upgrade. Therefore, modify the configurations on the CCE console (cluster or node pool list page) so that they will be automatically inherited during the upgrade.
  • During the cluster upgrade, the running workload services will not be interrupted, but access to the API server will be temporarily interrupted.
  • Before upgrading the cluster, check whether the cluster is healthy.
  • To ensure data security, you are advised to back up data before upgrading the cluster. During the upgrade, you are not advised to perform any operations on the cluster.
  • CCE 1.17 and later versions do not support workload scaling using the AOM service. Before and after the upgrade, switch scaling policies by referring to Switching from AOM to HPA for Auto Scaling.
-
-

Notes and Constraints

  • Currently, only CCE clusters consisting of VM nodes can be upgraded.
  • If initContainer or Istio is used in the in-place upgrade of a cluster of v1.15, pay attention to the following restrictions:

    In kubelet 1.16 and later versions, QoS classes are different from those in earlier versions. In kubelet 1.15 and earlier versions, only containers in spec.containers are counted. In kubelet 1.16 and later versions, containers in both spec.containers and spec.initContainers are counted. The QoS class of a pod will change after the upgrade. As a result, the container in the pod restarts. You are advised to modify the QoS class of the service container before the upgrade to avoid this problem. For details, see Table 1.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 QoS class changes before and after the upgrade

    Init Container (Calculated Based on spec.initContainers)

    -

    Service Container (Calculated Based on spec.containers)

    -

    Pod (Calculated Based on spec.containers and spec.initContainers)

    -

    Impacted or Not

    -

    Guaranteed

    -

    Besteffort

    -

    Burstable

    -

    Yes

    -

    Guaranteed

    -

    Burstable

    -

    Burstable

    -

    No

    -

    Guaranteed

    -

    Guaranteed

    -

    Guaranteed

    -

    No

    -

    Besteffort

    -

    Besteffort

    -

    Besteffort

    -

    No

    -

    Besteffort

    -

    Burstable

    -

    Burstable

    -

    No

    -

    Besteffort

    -

    Guaranteed

    -

    Burstable

    -

    Yes

    -

    Burstable

    -

    Besteffort

    -

    Burstable

    -

    Yes

    -

    Burstable

    -

    Burstable

    -

    Burstable

    -

    No

    -

    Burstable

    -

    Guaranteed

    -

    Burstable

    -

    Yes

    -
    -
    -
-
-

Performing Pre-upgrade Check

Before upgrading a cluster, check the health status of the cluster and nodes and ensure that they are available.

-

Method 1: Use the console.

-

On the CCE console, click Resource Management in the navigation pane, and click Clusters and Nodes separately to check whether the cluster and nodes are normal.

-

Method 2: Run kubectl commands.

-
  1. Run the following command to verify that all cluster modules are in the Healthy state:

    kubectl get cs

    -
    Information similar to the following is displayed:
     NAME                 STATUS    MESSAGE              ERROR
    - scheduler            Healthy   ok
    - controller-manager   Healthy   ok
    - etcd-0               Healthy   {"health": "true"}
    - etcd-1               Healthy   {"health": "true"}
    - etcd-2               Healthy   {"health": "true"}
    -
    -

    In the command output, the value of STATUS must be Healthy for all items.

    -
    -

  2. Run the following command to verify that all nodes are in the Ready state:

    kubectl get nodes

    All nodes must be in the Ready state.

    -
    -
     NAME                   STATUS    ROLES     AGE       VERSION
    - xxx.xxx.xx.xx   Ready     <none>    38d       v1.9.7-r1
    - xxx.xxx.xx.xx   Ready     <none>    38d       v1.9.7-r1
    - xxx.xxx.xx.xx   Ready     <none>    38d       v1.9.7-r1
    -
    -

-
-

Pre-upgrade Checklist

Before upgrading a cluster, follow the pre-upgrade checklist to identify risks and problems in advance.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 2 Cluster upgrade check items

Module

-

Item

-

Cluster

-

Check whether the node IP addresses (including EIPs) of the current cluster are used in other configurations or whitelists.

-

Perform the pre-upgrade check.

-

Workload

-

Record the number and status of workloads for comparison after the upgrade.

-

For the databases you use (such as Direct Connect, Redis, and MongoDB), you need to consider the changes in their whitelists, routes, or security group policies in advance.

-

Storage

-

Record the storage status to check whether storage resources are lost after the upgrade.

-

Networking

-

Check and back up the load balancing services and ingresses.

-

If Direct Connect is used, check whether the upgrade causes changes in the IP addresses of nodes or pods where services are deployed. To handle changes, you need to enable routes on Direct Connect in advance.

-

Add-on

-

When Kubernetes 1.9 is upgraded to 1.11, the kube-dns of the cluster is uninstalled and replaced with CoreDNS. Back up the DNS address configured in kube-dns so that you can use it in CoreDNS when the domain name resolution is abnormal.

-

O&M

-

Private configurations: Check whether data plane passwords, certificates, and environment variables are configured for nodes or containers in the cluster before the upgrade. If a container is restarted (for example, the node is abnormal and the pod is re-scheduled), the configurations will be lost and your service will be abnormal.

-

Check and back up kernel parameters or system configurations.

-
-
-
-

Upgrade Backup

Currently, there are two backup modes for cluster upgrade:

-
  • etcd database backup: CCE automatically backs up the etcd database during the cluster upgrade.
  • Master node backup (recommended, manual confirmation required): On the upgrade confirmation page, click Backup to back up the entire master node of the cluster. The backup process uses the Cloud Backup and Recovery (CBR) service and takes about 20 minutes. If there are many cloud backup tasks at the current site, the backup time may be prolonged.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0305.html b/docs/cce/umn/cce_01_0305.html deleted file mode 100644 index cdbdbd86..00000000 --- a/docs/cce/umn/cce_01_0305.html +++ /dev/null @@ -1,22 +0,0 @@ - - -

Storage (FlexVolume)

-

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0306.html b/docs/cce/umn/cce_01_0306.html deleted file mode 100644 index 9fb116a9..00000000 --- a/docs/cce/umn/cce_01_0306.html +++ /dev/null @@ -1,60 +0,0 @@ - - -

FlexVolume Overview

-

In container storage, you can use different types of volumes and mount them to containers in pods as many as you want.

-

In CCE, container storage is backed both by Kubernetes-native objects, such as emptyDir/hostPath volumes, secrets, and ConfigMaps, and by storage services.

-

CCE clusters of 1.13 and earlier versions use the storage-driver add-on to connect to storage services to support Kubernetes FlexVolume driver for container storage. The FlexVolume driver has been deprecated in favor of the Container Storage Interface (CSI). The everest add-on for CSI is installed in CCE clusters of 1.15 and later versions by default. For details, see Overview.

-
  • In CCE clusters earlier than Kubernetes 1.13, end-to-end capacity expansion of container storage is not supported, and the PVC capacity is inconsistent with the storage capacity.
  • In a cluster of v1.13 or earlier, when an upgrade or bug fix is available for storage functionalities, you only need to install or upgrade the storage-driver add-on. Upgrading the cluster or creating a cluster is not required.
-
-

Notes and Constraints

  • For clusters created in CCE, Kubernetes v1.15.11 is a transitional version in which the FlexVolume plug-in (storage-driver) is compatible with the CSI plug-in (everest). Clusters of v1.17 and later versions do not support FlexVolume any more. You need to use the everest add-on. For details about CSI and FlexVolume, see Differences Between CSI and FlexVolume Plug-ins.
  • The FlexVolume plug-in will be maintained by Kubernetes developers, but new functionality will only be added to CSI. You are advised not to create storage that connects to the FlexVolume plug-in (storage-driver) in CCE any more. Otherwise, the storage resources may not function normally.
-
-

Differences Between CSI and FlexVolume Plug-ins

-
- - - - - - - - - - - - - - - - -
Table 1 CSI and FlexVolume

Kubernetes Solution

-

CCE Add-on

-

Feature

-

Usage

-

CSI

-

everest

-

CSI was developed as a standard for exposing arbitrary block and file storage storage systems to containerized workloads. Using CSI, third-party storage providers can deploy plugins exposing new storage systems in Kubernetes without having to touch the core Kubernetes code. In CCE, the everest add-on is installed by default in clusters of Kubernetes v1.15 and later to connect to storage services (EVS, OBS, SFS, and SFS Turbo).

-

The everest add-on consists of two parts:

-
  • everest-csi-controller for storage volume creation, deletion, capacity expansion, and cloud disk snapshots
  • everest-csi-driver for mounting, unmounting, and formatting storage volumes on nodes
-

For details, see everest.

-

The everest add-on is installed by default in clusters of v1.15 and later. CCE will mirror the Kubernetes community by providing continuous support for updated CSI capabilities.

-

FlexVolume

-

storage-driver

-

FlexVolume is an out-of-tree plugin interface that has existed in Kubernetes since version 1.2 (before CSI). CCE provided FlexVolume volumes through the storage-driver add-on installed in clusters of Kubernetes v1.13 and earlier versions. This add-on connects clusters to storage services (EVS, OBS, SFS, and SFS Turbo).

-

For details, see storage-driver.

-

For clusters of v1.13 or earlier that have been created, the installed FlexVolume plug-in (the storage-driver add-on in CCE) can still be used. CCE stops providing update support for this add-on, and you are advised to upgrade these clusters.

-
-
-
  • A cluster can use only one type of storage plug-ins.
  • The FlexVolume plug-in cannot be replaced by the CSI plug-in in clusters of v1.13 or earlier. You can only upgrade these clusters. For details, see Cluster Upgrade Between Major Versions.
-
-
-

Notice on Using Add-ons

  • To use the CSI plug-in (the everest add-on in CCE), your cluster must be using Kubernetes 1.15 or later. This add-on is installed by default when you create a cluster of v1.15 or later. The FlexVolume plug-in (the storage-driver add-on in CCE) is installed by default when you create a cluster of v1.13 or earlier.
  • If your cluster is upgraded from v1.13 to v1.15, storage-driver is replaced by everest (v1.1.6 or later) for container storage. The takeover does not affect the original storage functions.
  • In version 1.2.0 of the everest add-on, key authentication is optimized when OBS is used. After the everest add-on is upgraded from a version earlier than 1.2.0, you need to restart all workloads that use OBS in the cluster. Otherwise, workloads may not be able to use OBS.
-
-

Checking Storage Add-ons

  1. Log in to the CCE console.
  2. In the navigation tree on the left, click Add-ons.
  3. Click the Add-on Instance tab.
  4. Select a cluster in the upper right corner. The default storage add-on installed during cluster creation is displayed.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0307.html b/docs/cce/umn/cce_01_0307.html deleted file mode 100644 index 55f4274d..00000000 --- a/docs/cce/umn/cce_01_0307.html +++ /dev/null @@ -1,235 +0,0 @@ - - -

Overview

-

Volume

On-disk files in a container are ephemeral, which will be lost when the container crashes and are difficult to be shared between containers running together in a pod. The Kubernetes volume abstraction solves both of these problems. Volumes cannot be independently created, but defined in the pod spec.

-

All containers in a pod can access its volumes, but the volumes must have been mounted. Volumes can be mounted to any directory in a container.

-

The following figure shows how a storage volume is used between containers in a pod.

-

-

A volume will no longer exist if the pod to which it is mounted does not exist. However, files in the volume may outlive the volume, depending on the volume type.

-
-

Volume Types

Volumes can be classified into local volumes and cloud volumes.

-
  • Local volumes
    CCE supports the following five types of local volumes. For details about how to use them, see Using Local Disks as Storage Volumes.
    • emptyDir: an empty volume used for temporary storage
    • hostPath: mounts a directory on a host (node) to your container for reading data from the host.
    • ConfigMap: references the data stored in a ConfigMap for use by containers.
    • Secret: references the data stored in a secret for use by containers.
    -
    -
  • Cloud volumes

    CCE supports the following types of cloud volumes:

    -
    • EVS
    • SFS Turbo
    • OBS
    • SFS
    -
-
-

CSI

You can use Kubernetes Container Storage Interface (CSI) to develop plug-ins to support specific storage volumes.

-

CCE developed the storage add-on everest for you to use cloud storage services, such as EVS and OBS. You can install this add-on when creating a cluster.

-
-

PV and PVC

Kubernetes provides PersistentVolumes (PVs) and PersistentVolumeClaims (PVCs) to abstract details of how storage is provided from how it is consumed. You can request specific size of storage when needed, just like pods can request specific levels of resources (CPU and memory).

-
  • PV: A PV is a persistent storage volume in a cluster. Same as a node, a PV is a cluster-level resource.
  • PVC: A PVC describes a workload's request for storage resources. This request consumes existing PVs in the cluster. If there is no PV available, underlying storage and PVs are dynamically created. When creating a PVC, you need to describe the attributes of the requested persistent storage, such as the size of the volume and the read/write permissions.
-

You can bind PVCs to PVs in a pod so that the pod can use storage resources. The following figure shows the relationship between PVs and PVCs.

-
Figure 1 PVC-to-PV binding
-

PVs describes storage resources in the cluster. PVCs are requests for those resources. The following sections will describe how to use kubectl to connect to storage resources.

-

If you do not want to create storage resources or PVs manually, you can use StorageClasses.

-
-

StorageClass

StorageClass describes the storage class used in the cluster. You need to specify StorageClass when creating a PVC or PV. As of now, CCE provides storage classes such as csi-disk, csi-nas, and csi-obs by default. When defining a PVC, you can use a StorageClassName to create a PV of the corresponding type and automatically create underlying storage resources.

-

You can run the following command to query the storage classes that CCE supports. You can use the CSI plug-in provided by CCE to customize a storage class, which functions similarly as the default storage classes in CCE.

-
# kubectl get sc
-NAME                PROVISIONER                     AGE
-csi-disk            everest-csi-provisioner         17d          # Storage class for EVS disks
-csi-disk-topology   everest-csi-provisioner         17d          # Storage class for EVS disks with delayed binding
-csi-nas             everest-csi-provisioner         17d          # Storage class for SFS file systems
-csi-obs             everest-csi-provisioner         17d          # Storage class for OBS buckets
-

After a StorageClass is set, PVs can be automatically created and maintained. You only need to specify the StorageClass when creating a PVC, which greatly reduces the workload.

-
-

Cloud Services for Container Storage

CCE allows you to mount local and cloud storage volumes listed in Volume Types to your pods. Their features are described below.

-
Figure 2 Volume types supported by CCE
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Detailed description of cloud storage services

Dimension

-

EVS

-

SFS

-

OBS

-

SFS Turbo

-

Definition

-

EVS offers scalable block storage for cloud servers. With high reliability, high performance, and rich specifications, EVS disks can be used for distributed file systems, dev/test environments, data warehouses, and high-performance computing (HPC) applications.

-

Expandable to petabytes, SFS provides fully hosted shared file storage, highly available and stable to handle data- and bandwidth-intensive applications in HPC, media processing, file sharing, content management, and web services.

-

-

OBS is a stable, secure, and easy-to-use object storage service that lets you inexpensively store data of any format and size. You can use it in enterprise backup/archiving, video on demand (VoD), video surveillance, and many other scenarios.

-

Expandable to 320 TB, SFS Turbo provides a fully hosted shared file storage, highly available and stable to support small files and applications requiring low latency and high IOPS. You can use SFS Turbo in high-traffic websites, log storage, compression/decompression, DevOps, enterprise OA, and containerized applications.

-

Data storage logic

-

Stores binary data and cannot directly store files. To store files, you need to format the file system first.

-

Stores files and sorts and displays data in the hierarchy of files and folders.

-

Stores objects. Files directly stored automatically generate the system metadata, which can also be customized by users.

-

Stores files and sorts and displays data in the hierarchy of files and folders.

-

Services

-

Accessible only after being mounted to ECSs or BMSs and initialized.

-

Mounted to ECSs or BMSs using network protocols. A network address must be specified or mapped to a local directory for access.

-

Accessible through the Internet or Direct Connect (DC). You need to specify the bucket address and use transmission protocols such as HTTP and HTTPS.

-

Supports the Network File System (NFS) protocol (NFSv3 only). You can seamlessly integrate existing applications and tools with SFS Turbo.

-

Static provisioning

-

Supported

-

Supported

-

Supported

-

Supported

-

Dynamic provisioning

-

Supported

-

Supported

-

Supported

-

Not supported

-

Features

-

Non-shared storage. Each volume can be mounted to only one node.

-

Shared storage featuring high performance and throughput

-

Shared, user-mode file system

-

Shared storage featuring high performance and bandwidth

-

Usage

-

HPC, enterprise core cluster applications, enterprise application systems, and dev/test

-
NOTE:

HPC apps here require high-speed and high-IOPS storage, such as industrial design and energy exploration.

-
-

HPC, media processing, content management, web services, big data, and analysis applications

-
NOTE:

HPC apps here require high bandwidth and shared file storage, such as gene sequencing and image rendering.

-
-

Big data analysis, static website hosting, online video on demand (VoD), gene sequencing, intelligent video surveillance, backup and archiving, and enterprise cloud boxes (web disks)

-

High-traffic websites, log storage, DevOps, and enterprise OA

-

Capacity

-

TB

-

PB

-

EB

-

TB

-

Latency

-

1-2 ms

-

3-10 ms

-

10ms

-

1-2 ms

-

IOPS/TPS

-

33,000 for a single disk

-

10,000 for a single file system

-

Tens of millions

-

100K

-

Bandwidth

-

MB/s

-

GB/s

-

TB/s

-

GB/s

-
-
-
-

Notes and Constraints

Secure containers do not support OBS volumes.

-
  • A single user can create a maximum of 100 OBS buckets on the console. If you have a large number of CCE workloads and you want to mount an OBS bucket to every workload, you may easily run out of buckets. In this scenario, you are advised to use OBS through the OBS API or SDK and do not mount OBS buckets to the workload on the console.
  • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

    For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volume mounted, a new pod cannot be started because EVS disks cannot be attached.

    -
  • When you uninstall a subpath in a cluster of v1.19 or earlier, all folders in the subpath are traversed. If there are a large number of folders, the traversal takes a long time, so does the volume unmount. You are advised not to create too many folders in the subpath.
  • The maximum size of a single file in OBS mounted to a CCE cluster is far smaller than that defined by obsfs.
-
-

Notice on Using Add-ons

  • To use the CSI plug-in (the everest add-on in CCE), your cluster must be using Kubernetes 1.15 or later. This add-on is installed by default when you create a cluster of v1.15 or later. The FlexVolume plug-in (the storage-driver add-on in CCE) is installed by default when you create a cluster of v1.13 or earlier.
  • If your cluster is upgraded from v1.13 to v1.15, storage-driver is replaced by everest (v1.1.6 or later) for container storage. The takeover does not affect the original storage functions.
  • In version 1.2.0 of the everest add-on, key authentication is optimized when OBS is used. After the everest add-on is upgraded from a version earlier than 1.2.0, you need to restart all workloads that use OBS in the cluster. Otherwise, workloads may not be able to use OBS.
-
-

Differences Between CSI and FlexVolume Plug-ins

-
- - - - - - - - - - - - - - - - -
Table 2 CSI and FlexVolume

Kubernetes Solution

-

CCE Add-on

-

Feature

-

Recommendation

-

CSI

-

Everest

-

CSI was developed as a standard for exposing arbitrary block and file storage storage systems to containerized workloads. Using CSI, third-party storage providers can deploy plugins exposing new storage systems in Kubernetes without having to touch the core Kubernetes code. In CCE, the everest add-on is installed by default in clusters of Kubernetes v1.15 and later to connect to storage services (EVS, OBS, SFS, and SFS Turbo).

-

The everest add-on consists of two parts:

-
  • everest-csi-controller for storage volume creation, deletion, capacity expansion, and cloud disk snapshots
  • everest-csi-driver for mounting, unmounting, and formatting storage volumes on nodes
-

For details, see everest.

-

The everest add-on is installed by default in clusters of v1.15 and later. CCE will mirror the Kubernetes community by providing continuous support for updated CSI capabilities.

-

Flexvolume

-

storage-driver

-

FlexVolume is an out-of-tree plugin interface that has existed in Kubernetes since version 1.2 (before CSI). CCE provided FlexVolume volumes through the storage-driver add-on installed in clusters of Kubernetes v1.13 and earlier versions. This add-on connects clusters to storage services (EVS, OBS, SFS, and SFS Turbo).

-

For details, see storage-driver.

-

For clusters of v1.13 or earlier that have been created, the installed FlexVolume plug-in (the storage-driver add-on in CCE) can still be used. CCE stops providing update support for this add-on, and you are advised to upgrade these clusters.

-
-
-
  • A cluster can use only one type of storage plug-ins.
  • The FlexVolume plug-in cannot be replaced by the CSI plug-in in clusters of v1.13 or earlier. You can only upgrade these clusters. For details, see Cluster Upgrade Between Major Versions.
-
-
-

Checking Storage Add-ons

  1. Log in to the CCE console.
  2. In the navigation tree on the left, click Add-ons.
  3. Click the Add-on Instance tab.
  4. Select a cluster in the upper right corner. The default storage add-on installed during cluster creation is displayed.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0310.html b/docs/cce/umn/cce_01_0310.html deleted file mode 100644 index e76e197b..00000000 --- a/docs/cce/umn/cce_01_0310.html +++ /dev/null @@ -1,14 +0,0 @@ - - -

Overview

-

To achieve persistent storage, CCE allows you to mount the storage volumes created from Elastic Volume Service (EVS) disks to a path of a container. When the container is migrated, the mounted EVS volumes are also migrated. By using EVS volumes, you can mount the remote file directory of storage system into a container so that data in the data volume is permanently preserved even when the container is deleted.

-
Figure 1 Mounting EVS volumes to CCE
-

Description

  • User-friendly: Similar to formatting disks for on-site servers in traditional layouts, you can format block storage (disks) mounted to cloud servers, and create file systems on them.
  • Data isolation: Each server uses an independent block storage device (disk).
  • Private network: User can access data only in private networks of data centers.
  • Capacity and performance: The capacity of a single volume is limited (TB-level), but the performance is excellent (ms-level read/write I/O latency).
  • Restriction: EVS disks that have partitions or have non-ext4 file systems cannot be imported.
  • Applications: HPC, enterprise core applications running in clusters, enterprise application systems, and development and testing. These volumes are often used by single-pod Deployments and jobs, or exclusively by each pod in a StatefulSet. EVS disks are non-shared storage and cannot be attached to multiple nodes at the same time. If two pods are configured to use the same EVS disk and the two pods are scheduled to different nodes, one pod cannot be started because the EVS disk cannot be attached to it.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0311.html b/docs/cce/umn/cce_01_0311.html deleted file mode 100644 index c4f20439..00000000 --- a/docs/cce/umn/cce_01_0311.html +++ /dev/null @@ -1,144 +0,0 @@ - - -

Using EVS Volumes

-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

  • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple jobs.
  • Data in a shared disk cannot be shared between nodes in a CCE cluster. If the same EVS disk is attached to multiple nodes, read and write conflicts and data cache conflicts may occur. When creating a Deployment, you are advised to create only one pod if you want to use EVS disks.
  • When you create a StatefulSet and add a cloud storage volume, existing EVS volumes cannot be used.
  • EVS disks that have partitions or have non-ext4 file systems cannot be imported.
  • Container storage in CCE clusters of Kubernetes 1.13 or later version supports encryption. Currently, E2E encryption is supported only in certain regions.
  • Volumes cannot be created in specified enterprise projects. Only the default enterprise project is supported.
  • The following operations apply to clusters of Kubernetes 1.13 or earlier.
-
-

Buying an EVS Disk

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. Click Create EVS Disk.
  2. Configure basic disk information. Table 1 describes the parameters.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Configuring basic disk information

    Parameter

    -

    Description

    -

    * PVC Name

    -

    New PVC Name: name of the PVC to be created. A storage volume is automatically created when a PVC is created. One PVC corresponds to one storage volume. The storage volume name is automatically generated when the PVC is created.

    -

    Cluster Name

    -

    Cluster where the EVS disk is deployed.

    -

    Namespace

    -

    Namespace where the EVS disk is deployed. You can retain the default value or specify one.

    -

    Volume Capacity (GB)

    -

    Size of the storage to be created.

    -

    Access Mode

    -

    Access permissions of user applications on storage resources (PVs).

    -
    • ReadWriteOnce (RWO): The volume can be mounted as read-write by a single node, and data reading and writing are supported based on a non-shared EVS volume. EVS volumes in RWO mode are supported since v1.13.10-r1.
    -

    AZ

    -

    AZ to which the disk belongs.

    -

    Type

    -

    Type of the new EVS disk.

    -
    • Common I/O: uses Serial Advanced Technology Attachment (SATA) drives to store data.
    • High I/O: uses serial attached SCSI (SAS) drives to store data.
    • Ultra-high I/O: uses solid state disk (SSD) drives to store data.
    -

    Encryption

    -

    KMS Encryption is deselected by default.

    -

    After KMS Encryption is selected, Key Management Service (KMS), an easy-to-use and highly secure cloud service for your keys, will be used for EVS disks. If no agency has been created, click Create Agency and set the following parameters:

    -
    • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name EVSAccessKMS indicates that EVS is granted the permission to access KMS. After EVS is authorized successfully, it can obtain KMS keys to encrypt and decrypt EVS systems.
    • Key Name: After a key is created, it can be loaded and used in containerized applications.
    • Key ID: generated by default.
    -

    This function is supported only for clusters of v1.13.10 and later in certain regions.

    -
    -
    -

  3. Review your order, click Submit, and wait until the creation is successful.

    The file system is displayed in the list. When its status becomes Normal, the file system is created successfully.

    -

  4. Click the volume name to view detailed information about the volume.
-
-

Adding an EVS Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, or Creating a Job. During creation, expand Data Storage after adding a container. On the Cloud Volume tab page, click Add Cloud Volume.
  2. Set the storage volume type to EVS.

    -

    - - - - - - - - - - - - - - - - - - -
    Table 2 Parameters required for mounting an EVS volume

    Parameter

    -

    Description

    -

    Type

    -

    EVS: You can use EVS disks the same way you use traditional hard disks on servers. EVS disks deliver higher data reliability and I/O throughput and are easy to use. They can be used for file systems, databases, or other system software and applications that require block storage resources.

    -
    CAUTION:
    • To attach an EVS disk to a workload, you must set the number of pods to 1 when creating the workload. If multiple pods are created, you cannot attach EVS disks.
    • When you create a StatefulSet and add a cloud storage volume, existing EVS volumes cannot be used.
    • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple jobs.
    -
    -

    Allocation Mode

    -

    Manual

    -

    Select a created disk. If no disk is available, follow the prompts to create one.

    -

    For the same cluster and namespace, you can use an existing storage volume when creating a Deployment (with Allocation Mode set to Manual).

    -

    When creating a StatefulSet, you can only use a volume automatically allocated by the system (only Automatic is available for Allocation Mode).

    -

    Automatic

    -

    If you select Automatic, you need to configure the following items:

    -
    1. Access Mode: permissions of user applications on storage resources (PVs).
      • ReadWriteOnce (RWO): A non-shared EVS volume is mounted as read-write to a pod by a single node. EVS volumes in RWO mode are supported since v1.13.10-r1.
      -
    2. Availability Zone: AZ where the storage volume is located. Only the AZ where the worker node is located can be selected.
    3. Sub-Type: Select a storage subtype.
      • Common I/O: uses Serial Advanced Technology Attachment (SATA) drives to store data.
      • High I/O: uses serial attached SCSI (SAS) drives to store data.
      • Ultra-high I/O: uses solid state disk (SSD) drives to store data.
      -
    4. Storage Capacity: Enter the storage capacity in the unit of GB. Ensure that the storage capacity quota is not exceeded; otherwise, creation will fail.
    5. After you select KMS Encryption, Key Management Service (KMS), an easy-to-use and highly secure service, will be enabled for EVS disks. This function is supported only for clusters of v1.13.10 and later in certain regions. If no agency has been created, click Create Agency and set the following parameters:
      • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name EVSAccessKMS indicates that EVS is granted the permission to access KMS. After EVS is authorized successfully, it can obtain KMS keys to encrypt and decrypt EVS systems.
      • Key Name: After a key is created, it can be loaded and used in containerized applications.
      • Key ID: generated by default.
      -
    -

    Add Container Path

    -
    1. Click Add Container Path.
    2. Container Path: Enter the container path to which the volume is mounted.
      NOTICE:
      • Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      • If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.
      -
      -
    3. Set permissions.
      • Read-only: You can only read the data in the mounted volumes.
      • Read/Write: You can modify the data in the mounted volumes. Newly written data is not migrated if the container is migrated, which causes a data loss.
      -
    -
    -
    -

  3. Click OK.
-
-

Importing an EVS Disk

CCE allows you to import existing EVS disks.

-

An EVS disk can be imported into only one namespace. If an EVS disk has been imported into a namespace, it is invisible in other namespaces and cannot be imported again. If you want to import an EVS disk that has file system (ext4) formatted, ensure that no partition has been created for the disk. Otherwise, data may be lost.

-
-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the EVS tab page, click Import.
  2. Select one or more EVS disks that you want to import. Then, click OK.
-
-

Unbinding an EVS Disk

After an EVS volume is successfully created or imported, the EVS volume is automatically bound to the current cluster and cannot be used by other clusters. When the volume is unbound from the cluster, other clusters can still use the volume.

-

If the EVS volume has been mounted to a workload, it cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the EVS disk list, click Unbind next to the target EVS disk.
  2. Confirm the unbinding, and click OK.
-
-

Related Operations

After an EVS volume is created, you can perform operations described in Table 3. -
- - - - - - - -
Table 3 Other operations

Operation

-

Description

-

Deleting an EVS volume

-
  1. Select the EVS volume to be deleted and click Delete in the Operation column.
  2. Follow the prompts to delete the EVS volume.
-
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0312.html b/docs/cce/umn/cce_01_0312.html deleted file mode 100644 index 36d6b457..00000000 --- a/docs/cce/umn/cce_01_0312.html +++ /dev/null @@ -1,78 +0,0 @@ - - -

(kubectl) Automatically Creating an EVS Disk

-

Scenario

CCE supports creating EVS volumes through PersistentVolumeClaims (PVCs).

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the pvc-evs-auto-example.yaml file, which is used to create a PVC.

    touch pvc-evs-auto-example.yaml

    -

    vi pvc-evs-auto-example.yaml

    -
    Example YAML file for clusters of v1.9, v1.11, and v1.13:
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  name: pvc-evs-auto-example
    -  namespace: default
    -  annotations:
    -    volume.beta.kubernetes.io/storage-class: sas
    -  labels:
    -    failure-domain.beta.kubernetes.io/region: eu-de
    -    failure-domain.beta.kubernetes.io/zone: eu-de-01
    -spec:
    -  accessModes:
    -  - ReadWriteOnce
    -  resources:
    -    requests:
    -      storage: 10Gi
    - -
    - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    volume.beta.kubernetes.io/storage-class

    -

    EVS disk type. The value is in lowercase.

    -

    Supported values: Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD)

    -

    failure-domain.beta.kubernetes.io/region

    -

    Region where the cluster is located.

    -

    For details about the value of region, see Regions and Endpoints.

    -

    failure-domain.beta.kubernetes.io/zone

    -

    AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

    -

    For details about the value of zone, see Regions and Endpoints.

    -

    storage

    -

    Storage capacity in the unit of Gi.

    -

    accessModes

    -

    Read/write mode of the volume.

    -

    You can set this parameter to ReadWriteMany (shared volume) and ReadWriteOnce (non-shared volume).

    -
    -
    -
    -

  3. Run the following command to create a PVC.

    kubectl create -f pvc-evs-auto-example.yaml

    -

    After the command is executed, an EVS disk is created in the partition where the cluster is located. Choose Storage > EVS to view the EVS disk. Alternatively, you can view the EVS disk based on the volume name on the EVS console.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0313.html b/docs/cce/umn/cce_01_0313.html deleted file mode 100644 index 76e4d343..00000000 --- a/docs/cce/umn/cce_01_0313.html +++ /dev/null @@ -1,555 +0,0 @@ - - -

(kubectl) Creating a PV from an Existing EVS Disk

-

Scenario

CCE allows you to create a PersistentVolume (PV) using an existing EVS disk. After the PV is created, you can create a PersistentVolumeClaim (PVC) and bind it to the PV.

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Log in to the EVS console, create an EVS disk, and record the volume ID, capacity, and disk type of the EVS disk.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create two YAML files for creating the PersistentVolume (PV) and PersistentVolumeClaim (PVC). Assume that the file names are pv-evs-example.yaml and pvc-evs-example.yaml.

    touch pv-evs-example.yaml pvc-evs-example.yaml

    - -
    - - - - - - - - - - - - - - - - - -

    Kubernetes Version

    -

    Description

    -

    YAML Example

    -

    1.11.7 ≤ K8s version ≤ 1.13

    -

    Clusters from v1.11.7 to v1.13

    -

    Example YAML

    -

    1.11 ≤ K8s version < 1.11.7

    -

    Clusters from v1.11 to v1.11.7

    -

    Example YAML

    -

    K8s version = 1.9

    -

    Clusters of v1.9

    -

    Example YAML

    -
    -
    -

    Clusters from v1.11.7 to v1.13

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  labels: 
      -    failure-domain.beta.kubernetes.io/region: eu-de
      -    failure-domain.beta.kubernetes.io/zone:  eu-de-01
      -  annotations:
      -    pv.kubernetes.io/provisioned-by: flexvolume-huawei.com/fuxivol
      -  name: pv-evs-example 
      -spec: 
      -  accessModes: 
      -  - ReadWriteOnce 
      -  capacity: 
      -    storage: 10Gi 
      -  claimRef:
      -    apiVersion: v1
      -    kind: PersistentVolumeClaim
      -    name: pvc-evs-example
      -    namespace: default
      -  flexVolume: 
      -    driver: huawei.com/fuxivol 
      -    fsType: ext4 
      -    options:
      -      disk-mode: SCSI
      -      fsType: ext4 
      -      volumeID: 0992dbda-6340-470e-a74e-4f0db288ed82 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: sas
      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 1 Key parameters

      Parameter

      -

      Description

      -

      failure-domain.beta.kubernetes.io/region

      -

      Region where the cluster is located.

      -

      For details about the value of region, see Regions and Endpoints.

      -

      failure-domain.beta.kubernetes.io/zone

      -

      AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

      -

      For details about the value of zone, see Regions and Endpoints.

      -

      storage

      -

      EVS volume capacity in the unit of Gi.

      -

      storageClassName

      -

      EVS disk type. Supported values: Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD)

      -

      driver

      -

      Storage driver.

      -

      For EVS disks, set this parameter to huawei.com/fuxivol.

      -

      volumeID

      -

      Volume ID of the EVS disk.

      -

      To obtain the volume ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the EVS tab page, and copy the PVC ID on the PVC details page.

      -

      disk-mode

      -

      Device type of the EVS disk. The value is VBD or SCSI.

      -

      For CCE clusters earlier than v1.11.7, you do not need to set this field. The value defaults to VBD.

      -

      This field is mandatory for CCE clusters from v1.11.7 to v1.13 that use Linux x86. As the EVS volumes dynamically provisioned by a PVC are created from SCSI EVS disks, you are advised to choose SCSI when manually creating volumes (static PVs). Volumes in the VBD mode can still be used after cluster upgrades.

      -

      spec.claimRef.apiVersion

      -

      The value is fixed at v1.

      -

      spec.claimRef.kind

      -

      The value is fixed at PersistentVolumeClaim.

      -

      spec.claimRef.name

      -

      PVC name. The value is the same as the name of the PVC created in the next step.

      -

      spec.claimRef.namespace

      -

      Namespace of the PVC. The value is the same as the namespace of the PVC created in the next step.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1  
      -kind: PersistentVolumeClaim  
      -metadata:  
      -  annotations:  
      -    volume.beta.kubernetes.io/storage-class: sas
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxivol 
      -  labels: 
      -    failure-domain.beta.kubernetes.io/region: eu-de
      -    failure-domain.beta.kubernetes.io/zone: eu-de-01     
      -  name: pvc-evs-example 
      -  namespace: default  
      -spec:  
      -  accessModes:  
      -  - ReadWriteOnce  
      -  resources:  
      -    requests:  
      -      storage: 10Gi
      -  volumeName: pv-evs-example
      - -
      - - - - - - - - - - - - - - - - - - - - - - -
      Table 2 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Storage class, which must be the same as that of the existing PV.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      The field must be set to flexvolume-huawei.com/fuxivol.

      -

      failure-domain.beta.kubernetes.io/region

      -

      Region where the cluster is located.

      -

      For details about the value of region, see Regions and Endpoints.

      -

      failure-domain.beta.kubernetes.io/zone

      -

      AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

      -

      For details about the value of zone, see Regions and Endpoints.

      -

      storage

      -

      Requested capacity in the PVC, in Gi.

      -

      The value must be the same as the storage size of the existing PV.

      -

      volumeName

      -

      Name of the PV.

      -
      -
      -
    -

    Clusters from v1.11 to v1.11.7

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  labels: 
      -    failure-domain.beta.kubernetes.io/region: eu-de
      -    failure-domain.beta.kubernetes.io/zone:  
      -  name: pv-evs-example 
      -spec: 
      -  accessModes: 
      -  - ReadWriteOnce
      -  capacity: 
      -    storage: 10Gi 
      -  flexVolume: 
      -    driver: huawei.com/fuxivol 
      -    fsType: ext4 
      -    options:
      -      fsType: ext4 
      -      volumeID: 0992dbda-6340-470e-a74e-4f0db288ed82 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: sas
      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 3 Key parameters

      Parameter

      -

      Description

      -

      failure-domain.beta.kubernetes.io/region

      -

      Region where the cluster is located.

      -

      For details about the value of region, see Regions and Endpoints.

      -

      failure-domain.beta.kubernetes.io/zone

      -

      AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

      -

      For details about the value of zone, see Regions and Endpoints.

      -

      storage

      -

      EVS volume capacity in the unit of Gi.

      -

      storageClassName

      -

      EVS disk type. Supported values: Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD)

      -

      driver

      -

      Storage driver.

      -

      For EVS disks, set this parameter to huawei.com/fuxivol.

      -

      volumeID

      -

      Volume ID of the EVS disk.

      -

      To obtain the volume ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the EVS tab page, and copy the PVC ID on the PVC details page.

      -

      disk-mode

      -

      Device type of the EVS disk. The value is VBD or SCSI.

      -

      For CCE clusters earlier than v1.11.7, you do not need to set this field. The default value is VBD.

      -

      This field is mandatory for CCE clusters from v1.11.7 to v1.13 that use Linux x86. As the EVS volumes dynamically provisioned by a PVC are created from SCSI EVS disks, you are advised to choose SCSI when manually creating volumes (static PVs). Volumes in the VBD mode can still be used after cluster upgrades.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1  
      -kind: PersistentVolumeClaim  
      -metadata:  
      -  annotations:  
      -    volume.beta.kubernetes.io/storage-class: sas
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxivol 
      -  labels: 
      -    failure-domain.beta.kubernetes.io/region: eu-de
      -    failure-domain.beta.kubernetes.io/zone: eu-de-01     
      -  name: pvc-evs-example 
      -  namespace: default  
      -spec:  
      -  accessModes:  
      -  - ReadWriteOnce
      -  resources:  
      -    requests:  
      -      storage: 10Gi
      -  volumeName: pv-evs-example
      - -
      - - - - - - - - - - - - - - - - - - - - - - -
      Table 4 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Storage class. The value can be sas or ssd. The value must be the same as that of the existing PV.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      The field must be set to flexvolume-huawei.com/fuxivol.

      -

      failure-domain.beta.kubernetes.io/region

      -

      Region where the cluster is located.

      -

      For details about the value of region, see Regions and Endpoints.

      -

      failure-domain.beta.kubernetes.io/zone

      -

      AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

      -

      For details about the value of zone, see Regions and Endpoints.

      -

      storage

      -

      Requested capacity in the PVC, in Gi.

      -

      The value must be the same as the storage size of the existing PV.

      -

      volumeName

      -

      Name of the PV.

      -
      -
      -
    -

    Clusters of v1.9

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  labels: 
      -    failure-domain.beta.kubernetes.io/region: eu-de
      -    failure-domain.beta.kubernetes.io/zone:  
      -  name: pv-evs-example 
      -  namespace: default 
      -spec: 
      -  accessModes: 
      -  - ReadWriteOnce
      -  capacity: 
      -    storage: 10Gi 
      -  flexVolume: 
      -    driver: huawei.com/fuxivol 
      -    fsType: ext4 
      -    options: 
      -      fsType: ext4 
      -      kubernetes.io/namespace: default 
      -      volumeID: 0992dbda-6340-470e-a74e-4f0db288ed82 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: sas
      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 5 Key parameters

      Parameter

      -

      Description

      -

      failure-domain.beta.kubernetes.io/region

      -

      Region where the cluster is located.

      -

      For details about the value of region, see Regions and Endpoints.

      -

      failure-domain.beta.kubernetes.io/zone

      -

      AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

      -

      For details about the value of zone, see Regions and Endpoints.

      -

      storage

      -

      EVS volume capacity in the unit of Gi.

      -

      storageClassName

      -

      EVS disk type. Supported values: Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD)

      -

      driver

      -

      Storage driver.

      -

      For EVS disks, set this parameter to huawei.com/fuxivol.

      -

      volumeID

      -

      Volume ID of the EVS disk.

      -

      To obtain the volume ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the EVS tab page, and copy the PVC ID on the PVC details page.

      -

      disk-mode

      -

      Device type of the EVS disk. The value is VBD or SCSI.

      -

      For CCE clusters earlier than v1.11.7, you do not need to set this field. The default value is VBD.

      -

      This field is mandatory for CCE clusters from v1.11.7 to v1.13 that use Linux x86. As the EVS volumes dynamically provisioned by a PVC are created from SCSI EVS disks, you are advised to choose SCSI when manually creating volumes (static PVs). Volumes in the VBD mode can still be used after cluster upgrades.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1  
      -kind: PersistentVolumeClaim  
      -metadata:  
      -  annotations:  
      -    volume.beta.kubernetes.io/storage-class: sas
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxivol 
      -  labels: 
      -    failure-domain.beta.kubernetes.io/region: eu-de
      -    failure-domain.beta.kubernetes.io/zone: 
      -  name: pvc-evs-example 
      -  namespace: default  
      -spec:  
      -  accessModes:  
      -  - ReadWriteOnce 
      -  resources:  
      -    requests:  
      -      storage: 10Gi
      -  volumeName: pv-evs-example
      -  volumeNamespace: default
      - -
      - - - - - - - - - - - - - - - - - - - - - - -
      Table 6 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Storage class, which must be the same as that of the existing PV.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      The field must be set to flexvolume-huawei.com/fuxivol.

      -

      failure-domain.beta.kubernetes.io/region

      -

      Region where the cluster is located.

      -

      For details about the value of region, see Regions and Endpoints.

      -

      failure-domain.beta.kubernetes.io/zone

      -

      AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

      -

      For details about the value of zone, see Regions and Endpoints.

      -

      storage

      -

      Requested capacity in the PVC, in Gi.

      -

      The value must be the same as the storage size of the existing PV.

      -

      volumeName

      -

      Name of the PV.

      -
      -
      -
    -

  4. Create the PV.

    kubectl create -f pv-evs-example.yaml

    -

  5. Create the PVC.

    kubectl create -f pvc-evs-example.yaml

    -

    After the operation is successful, choose Resource Management > Storage to view the created PVC. You can also view the EVS disk by name on the EVS console.

    -

  6. (Optional) Add the metadata associated with the cluster to ensure that the EVS disk associated with the mounted static PV is not deleted when the node or cluster is deleted.

    If you skip this step in this example or when creating a static PV or PVC, ensure that the EVS disk associated with the static PV has been unbound from the node before you delete the node.

    -
    -
    1. Obtain the tenant token. For details, see Obtaining a User Token.
    2. Obtain the EVS access address EVS_ENDPOINT. For details, see Regions and Endpoints.

      -
    3. Add the metadata associated with the cluster to the EVS disk backing the static PV.
      curl -X POST ${EVS_ENDPOINT}/v2/${project_id}/volumes/${volume_id}/metadata --insecure \
      -    -d '{"metadata":{"cluster_id": "${cluster_id}", "namespace": "${pvc_namespace}"}}' \
      -    -H 'Accept:application/json' -H 'Content-Type:application/json;charset=utf8' \
      -    -H 'X-Auth-Token:${TOKEN}'
      - -
      - - - - - - - - - - - - - - - - - - - - - - -
      Table 7 Key parameters

      Parameter

      -

      Description

      -

      EVS_ENDPOINT

      -

      EVS access address. Set this parameter to the value obtained in 6.b.

      -

      project_id

      -

      Project ID. You can click the login user in the upper right corner of the console page, select My Credentials from the drop-down list, and view the project ID on the Projects tab page.

      -

      volume_id

      -

      ID of the associated EVS disk. Set this parameter to volume_id of the static PV to be created. You can also log in to the EVS console, click the name of the EVS disk to be imported, and obtain the ID from Summary on the disk details page.

      -

      cluster_id

      -

      ID of the cluster where the EVS PV is to be created. On the CCE console, choose Resource Management > Clusters. Click the name of the cluster to be associated. On the cluster details page, obtain the cluster ID.

      -

      pvc_namespace

      -

      Namespace where the PVC is to be bound.

      -

      TOKEN

      -

      User token. Set this parameter to the value obtained in 6.a.

      -
      -
      -

      For example, run the following commands:

      -
      curl -X POST https://evs.eu-de.otc.t-systems.com:443/v2/060576866680d5762f52c0150e726aa7/volumes/69c9619d-174c-4c41-837e-31b892604e14/metadata --insecure \
      -    -d '{"metadata":{"cluster_id": "71e8277e-80c7-11ea-925c-0255ac100442", "namespace": "default"}}' \
      -    -H 'Accept:application/json' -H 'Content-Type:application/json;charset=utf8' \
      -    -H 'X-Auth-Token:MIIPe******IsIm1ldG
      -

      After the request is executed, run the following commands to check whether the EVS disk has been associated with the metadata of the cluster:

      -
      curl -X GET ${EVS_ENDPOINT}/v2/${project_id}/volumes/${volume_id}/metadata --insecure \
      -    -H 'X-Auth-Token:${TOKEN}'
      -

      For example, run the following commands:

      -
      curl -X GET https://evs.eu-de.otc.t-systems.com/v2/060576866680d5762f52c0150e726aa7/volumes/69c9619d-174c-4c41-837e-31b892604e14/metadata --insecure \
      -    -H 'X-Auth-Token:MIIPeAYJ***9t1c31ASaQ=='
      -

      The command output displays the current metadata of the EVS disk.

      -
      {
      -    "metadata": {
      -        "namespace": "default",
      -        "cluster_id": "71e8277e-80c7-11ea-925c-0255ac100442",
      -        "hw:passthrough": "true"
      -    }
      -}
      -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0314.html b/docs/cce/umn/cce_01_0314.html deleted file mode 100644 index ba971b3a..00000000 --- a/docs/cce/umn/cce_01_0314.html +++ /dev/null @@ -1,176 +0,0 @@ - - -

(kubectl) Creating a Pod Mounted with an EVS Volume

-

Scenario

After an EVS volume is created or imported to CCE, you can mount it to a workload.

-

EVS disks cannot be attached across AZs. Before mounting a volume, you can run the kubectl get pvc command to query the available PVCs in the AZ where the current cluster is located.

-
-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the evs-deployment-example.yaml file, which is used to create a Deployment.

    touch evs-deployment-example.yaml

    -

    vi evs-deployment-example.yaml

    -
    Example of mounting an EVS volume to a Deployment (PVC-based, shared volume):
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -  name: evs-deployment-example 
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: evs-deployment-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: evs-deployment-example 
    -    spec: 
    -      containers: 
    -      - image: nginx
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp 
    -          name: pvc-evs-example 
    -      imagePullSecrets:
    -        - name: default-secret
    -      restartPolicy: Always 
    -      volumes: 
    -      - name: pvc-evs-example 
    -        persistentVolumeClaim: 
    -          claimName: pvc-evs-auto-example
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    spec.template.spec.containers.volumeMounts

    -

    name

    -

    Name of the volume mounted to the container.

    -

    spec.template.spec.containers.volumeMounts

    -

    mountPath

    -

    Mount path of the container. In this example, the volume is mounted to the /tmp directory.

    -

    spec.template.spec.volumes

    -

    name

    -

    Name of the volume.

    -

    spec.template.spec.volumes.persistentVolumeClaim

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

    Mounting an EVS volume to a StatefulSet (PVC template-based, non-shared volume):

    -
    Example YAML:
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: deploy-evs-sas-in
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: deploy-evs-sata-in
    -  template:
    -    metadata:
    -      labels:
    -        app: deploy-evs-sata-in
    -        failure-domain.beta.kubernetes.io/region: eu-de
    -        failure-domain.beta.kubernetes.io/zone: eu-de-01
    -    spec:
    -      containers:
    -        - name: container-0
    -          image: 'nginx:1.12-alpine-perl'
    -          volumeMounts:
    -            - name: bs-sas-mountoptionpvc
    -              mountPath: /tmp
    -      imagePullSecrets:
    -        - name: default-secret
    -  volumeClaimTemplates:
    -    - metadata:
    -        name: bs-sas-mountoptionpvc
    -        annotations:
    -          volume.beta.kubernetes.io/storage-class: sas
    -          volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxivol
    -      spec:
    -        accessModes:
    -          - ReadWriteOnce
    -        resources:
    -          requests:
    -            storage: 10Gi
    -  serviceName: wwww
    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    metadata

    -

    name

    -

    Name of the created workload.

    -

    spec.template.spec.containers

    -

    image

    -

    Image of the workload.

    -

    spec.template.spec.containers.volumeMount

    -

    mountPath

    -

    Mount path of the container. In this example, the volume is mounted to the /tmp directory.

    -

    spec

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.

    -
    -
    -

  3. Run the following command to create the pod:

    kubectl create -f evs-deployment-example.yaml

    -

    After the creation is complete, log in to the CCE console. In the navigation pane, choose Resource Management > Storage > EVS. Then, click the PVC name. On the PVC details page, you can view the binding relationship between the EVS volume and the PVC.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0316.html b/docs/cce/umn/cce_01_0316.html deleted file mode 100644 index d86bfefa..00000000 --- a/docs/cce/umn/cce_01_0316.html +++ /dev/null @@ -1,14 +0,0 @@ - - -

Overview

-

CCE allows you to mount a volume created from a Scalable File Service (SFS) file system to a container to store data persistently. SFS volumes are commonly used in ReadWriteMany scenarios, such as media processing, content management, big data analysis, and workload process analysis.

-
Figure 1 Mounting SFS volumes to CCE
-

Description

  • Standard file protocols: You can mount file systems as volumes to servers, the same as using local directories.
  • Data sharing: The same file system can be mounted to multiple servers, so that data can be shared.
  • Private network: User can access data only in private networks of data centers.
  • Capacity and performance: The capacity of a single file system is high (PB level) and the performance is excellent (ms-level read/write I/O latency).
  • Use cases: Deployments/StatefulSets in the ReadWriteMany mode and jobs created for high-performance computing (HPC), media processing, content management, web services, big data analysis, and workload process analysis
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0317.html b/docs/cce/umn/cce_01_0317.html deleted file mode 100644 index 0340e06d..00000000 --- a/docs/cce/umn/cce_01_0317.html +++ /dev/null @@ -1,135 +0,0 @@ - - -

Using SFS Volumes

-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

  • SFS volumes are available only in certain regions.
  • Container storage in CCE clusters of Kubernetes 1.13 or later version supports encryption. Currently, E2E encryption is supported only in certain regions.
  • Volumes cannot be created in specified enterprise projects. Only the default enterprise project is supported.
-
-

Creating an SFS Volume

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage.
  2. On the SFS tab, click Create SFS File System.
  3. Configure basic information, as shown in Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for Creating a File System Volume

    Parameter

    -

    Parameter Description

    -

    * PVC Name

    -

    Name of the new PVC, which is different from the volume name. The actual volume name is automatically generated when the PV is created by the PVC.

    -

    Cluster Name

    -

    Cluster to which the file system volume belongs.

    -

    Namespace

    -

    Namespace with which the snapshot is associated.

    -

    Total Capacity

    -

    The total capacity is the capacity of a single volume. Fees are charged by actual usage.

    -

    Access Mode

    -

    Access permissions of user applications on storage resources (PVs).

    -
    • ReadWriteMany (RWX): The SFS volume can be mounted as read-write by multiple nodes.
    -

    Encryption

    -

    KMS Encryption is deselected by default.

    -

    After KMS Encryption is selected, Key Management Service (KMS), an easy-to-use and highly secure key service, will be used for SFS file systems. If no agency has been created, click Create Agency and set the following parameters:

    -
    • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name SFSAccessKMS indicates that SFS is granted the permission to access KMS. After SFS is authorized successfully, it can obtain KMS keys to encrypt and decrypt file systems.
    • Key Name: After a key is created, it can be loaded and used in containerized applications.
    • Key ID: generated by default.
    -

    This function is supported only for clusters of v1.13.10 and later in certain regions.

    -
    -
    -

  4. Click Create.

    The volume is displayed in the list. When PVS Status becomes Bound, the volume is created successfully.

    -

  5. Click the volume name to view detailed information about the volume.
-
-

Adding an SFS Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, Creating a DaemonSet, or Creating a Job. During creation, expand Data Storage after adding a container. On the Cloud Volume tab page, click Add Cloud Volume.
  2. Set the storage class to SFS.

    -

    - - - - - - - - - - - - - - - - - - -
    Table 2 Parameters for mounting a file system

    Parameter

    -

    Parameter Description

    -

    Type

    -

    File Storage (NFS): This type applies to a wide range of scenarios, including media processing, content management, big data, and application analysis.

    -

    Allocation Mode

    -

    Manual

    -
    • Name: Select a created file system. You need to create a file system in advance. For details about how to create a file system, see Creating an SFS Volume.
    • Sub-Type: subtype of the created file storage.
    • Storage Capacity: This field is one of the PVC attributes. If the storage capacity has been expanded on the IaaS side, it is normal that the capacity values are inconsistent. The PVC capacity is the same as the storage entity capacity only after end-to-end container storage capacity expansion is supported for CCE clusters of v1.13.
    -

    Automatic

    -

    An SFS volume is created automatically. You need to enter the storage capacity.

    -
    • Sub-Type: Select NFS.
    • Storage Capacity: Specify the total storage capacity, in GB. Ensure that the storage capacity quota is not exceeded; otherwise, creation will fail.
    • After you select KMS Encryption, Key Management Service (KMS), an easy-to-use and highly secure service, will be enabled for file systems. This function is supported only for clusters of v1.13.10 and later in certain regions. If no agency has been created, click Create Agency and set the following parameters:
      • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name SFSAccessKMS indicates that SFS is granted the permission to access KMS. After SFS is authorized successfully, it can obtain KMS keys to encrypt and decrypt file systems.
      • Key Name: After a key is created, it can be loaded and used in containerized applications.
      • Key ID: generated by default.
      -
    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter the subpath of the file storage, for example, /tmp.

      If this parameter is not specified, the root path of the data volume is used by default. Currently, only file storage is supported. The value must be a relative path and cannot start with a slash (/) or ../.

      -
    2. Container Path: Enter the path of the container, for example, /tmp.
      The container path must not be a system directory, such as / and /var/run. Otherwise, an exception occurs. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Set permissions.
      • Read-only: You can only read the data volumes mounted to the path.
      • Read/Write: You can modify the data volumes mounted to the path. Newly written data is not migrated if the container is migrated, which may cause a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

  3. Click OK.
-
-

Importing an SFS Volume

CCE allows you to import existing SFS volumes.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the SFS tab page, click Import.
  2. Select one or more SFS volumes that you want to attach.
  3. Select the target cluster and namespace. Then, click OK.
-
-

Unbinding an SFS Volume

When an SFS volume is successfully created or imported, the volume is automatically bound to the current cluster. Other clusters can also use the volume. When the SFS volume is unbound from the cluster, other clusters can still import and use the volume.

-

If the SFS volume has been attached to a workload, the volume cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the SFS volume list, click Unbind next to the target volume.
  2. Confirm the unbinding, and click OK.
-
-

Related Operations

After an SFS volume is created, you can perform the operation described in Table 3. -
- - - - - - - - - - -
Table 3 Other operations

Operation

-

Description

-

Deleting an SFS volume

-
  1. Select the SFS volume to be deleted and click Delete in the Operation column.
  2. Follow the prompts to delete the EVS disk.
-

Importing an SFS volume

-

CCE allows you to import existing SFS volumes.

-
  1. On the SFS tab page, click Import.
  2. Select one or more SFS volumes that you want to attach.
  3. Select the target cluster and namespace.
  4. Click Yes.
-
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0318.html b/docs/cce/umn/cce_01_0318.html deleted file mode 100644 index 8bcd3db1..00000000 --- a/docs/cce/umn/cce_01_0318.html +++ /dev/null @@ -1,66 +0,0 @@ - - -

(kubectl) Automatically Creating an SFS Volume

-

Scenario

CCE supports creating SFS volumes through PersistentVolumeClaims (PVCs).

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the pvc-sfs-auto-example.yaml file, which is used to create a PVC.

    touch pvc-sfs-auto-example.yaml

    -

    vi pvc-sfs-auto-example.yaml

    -
    Example YAML file:
    apiVersion: v1 
    -kind: PersistentVolumeClaim 
    -metadata: 
    -  annotations: 
    -    volume.beta.kubernetes.io/storage-class: nfs-rw
    -  name: pvc-sfs-auto-example 
    -  namespace: default 
    -spec: 
    -  accessModes: 
    -  - ReadWriteMany 
    -  resources: 
    -    requests: 
    -      storage: 10Gi
    - -
    - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    volume.beta.kubernetes.io/storage-class

    -

    File storage class. Currently, the standard file protocol type (nfs-rw) is supported.

    -

    name

    -

    Name of the PVC to be created.

    -

    accessModes

    -

    Only ReadWriteMany is supported. ReadWriteOnly is not supported.

    -

    storage

    -

    Storage capacity in the unit of Gi.

    -
    -
    -
    -

  3. Run the following command to create the PVC.

    kubectl create -f pvc-sfs-auto-example.yaml

    -

    After the command is executed, a file system is created in the VPC to which the cluster belongs. Choose Storage > SFS on the CCE console or log in to the SFS console to view the file system.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0319.html b/docs/cce/umn/cce_01_0319.html deleted file mode 100644 index 2c484bff..00000000 --- a/docs/cce/umn/cce_01_0319.html +++ /dev/null @@ -1,283 +0,0 @@ - - -

(kubectl) Creating a PV from an Existing SFS File System

-

Scenario

CCE allows you to use an existing file system to create a PersistentVolume (PV). After the creation is successful, create the corresponding PersistentVolumeClaim (PVC) and bind it to the PV.

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Log in to the SFS console, create a file system, and record the file system ID, shared path, and capacity.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create two YAML files for creating the PV and PVC. Assume that the file names are pv-sfs-example.yaml and pvc-sfs-example.yaml.

    touch pv-sfs-example.yaml pvc-sfs-example.yaml

    - -
    - - - - - - - - - - - - - -

    Kubernetes Version

    -

    Description

    -

    YAML Example

    -

    1.11 ≤ K8s version ≤ 1.13

    -

    Clusters from v1.11 to v1.13

    -

    Example YAML

    -

    K8s version = 1.9

    -

    Clusters of v1.9

    -

    Example YAML

    -
    -
    -

    Clusters from v1.11 to v1.13

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  name: pv-sfs-example 
      -  annotations:
      -    pv.kubernetes.io/provisioned-by: flexvolume-huawei.com/fuxinfs
      -spec: 
      -  accessModes: 
      -  - ReadWriteMany 
      -  capacity: 
      -    storage: 10Gi 
      -  claimRef:
      -    apiVersion: v1
      -    kind: PersistentVolumeClaim
      -    name: pvc-sfs-example
      -    namespace: default
      -  flexVolume: 
      -    driver: huawei.com/fuxinfs 
      -    fsType: nfs 
      -    options: 
      -      deviceMountPath: <your_deviceMountPath>  # Shared storage path of your file.
      -      fsType: nfs 
      -      volumeID: f6976f9e-2493-419b-97ca-d7816008d91c 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: nfs-rw
      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 1 Key parameters

      Parameter

      -

      Description

      -

      driver

      -

      Storage driver used to mount the volume. Set the driver to huawei.com/fuxinfs for the file system.

      -

      deviceMountPath

      -

      Shared path of the file system.

      -

      On the management console, choose Service List > Storage > Scalable File Service. You can obtain the shared path of the file system from the Mount Address column.

      -

      volumeID

      -

      File system ID.

      -

      To obtain the ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the SFS tab page, and copy the PVC ID on the PVC details page.

      -

      storage

      -

      File system size.

      -

      storageClassName

      -

      Read/write mode supported by the file system. Currently, nfs-rw and nfs-ro are supported.

      -

      spec.claimRef.apiVersion

      -

      The value is fixed at v1.

      -

      spec.claimRef.kind

      -

      The value is fixed at PersistentVolumeClaim.

      -

      spec.claimRef.name

      -

      The value is the same as the name of the PVC created in the next step.

      -

      spec.claimRef.namespace

      -

      Namespace of the PVC. The value is the same as the namespace of the PVC created in the next step.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1
      -kind: PersistentVolumeClaim
      -metadata:
      -  annotations:
      -    volume.beta.kubernetes.io/storage-class: nfs-rw
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxinfs
      -  name: pvc-sfs-example
      -  namespace: default
      -spec:
      -  accessModes:
      -  - ReadWriteMany
      -  resources:
      -    requests:
      -      storage: 10Gi
      -  volumeName: pv-sfs-example
      - -
      - - - - - - - - - - - - - - - - -
      Table 2 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Read/write mode supported by the file system. nfs-rw and nfs-ro are supported. The value must be the same as that of the existing PV.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      Must be set to flexvolume-huawei.com/fuxinfs.

      -

      storage

      -

      Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

      -

      volumeName

      -

      Name of the PV.

      -
      -
      -
    -

    Clusters of v1.9

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  name: pv-sfs-example 
      -  namespace: default 
      -spec: 
      -  accessModes: 
      -  - ReadWriteMany 
      -  capacity: 
      -    storage: 10Gi 
      -  flexVolume: 
      -    driver: huawei.com/fuxinfs 
      -    fsType: nfs 
      -    options: 
      -      deviceMountPath: <your_deviceMountPath>  # Shared storage path of your file.
      -      fsType: nfs 
      -      kubernetes.io/namespace: default 
      -      volumeID: f6976f9e-2493-419b-97ca-d7816008d91c 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: nfs-rw
      - -
      - - - - - - - - - - - - - - - - - - - -
      Table 3 Key parameters

      Parameter

      -

      Description

      -

      driver

      -

      Storage driver used to mount the volume. Set the driver to huawei.com/fuxinfs for the file system.

      -

      deviceMountPath

      -

      Shared path of the file system.

      -

      On the management console, choose Service List > Storage > Scalable File Service. You can obtain the shared path of the file system from the Mount Address column.

      -

      volumeID

      -

      File system ID.

      -

      To obtain the ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the SFS tab page, and copy the PVC ID on the PVC details page.

      -

      storage

      -

      File system size.

      -

      storageClassName

      -

      Read/write mode supported by the file system. Currently, nfs-rw and nfs-ro are supported.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1
      -kind: PersistentVolumeClaim
      -metadata:
      -  annotations:
      -    volume.beta.kubernetes.io/storage-class: nfs-rw
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxinfs
      -  name: pvc-sfs-example
      -  namespace: default
      -spec:
      -  accessModes:
      -  - ReadWriteMany
      -  resources:
      -    requests:
      -      storage: 10Gi
      -  volumeName: pv-sfs-example
      -  volumeNamespace: default
      - -
      - - - - - - - - - - - - - - - - -
      Table 4 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Read/write mode supported by the file system. nfs-rw and nfs-ro are supported. The value must be the same as that of the existing PV.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      The field must be set to flexvolume-huawei.com/fuxinfs.

      -

      storage

      -

      Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

      -

      volumeName

      -

      Name of the PV.

      -
      -
      -
    -

    The VPC to which the file system belongs must be the same as the VPC of the ECS VM to which the workload is planned.

    -
    -

  4. Create the PV.

    kubectl create -f pv-sfs-example.yaml

    -

  5. Create the PVC.

    kubectl create -f pvc-sfs-example.yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0320.html b/docs/cce/umn/cce_01_0320.html deleted file mode 100644 index fb2affab..00000000 --- a/docs/cce/umn/cce_01_0320.html +++ /dev/null @@ -1,168 +0,0 @@ - - -

(kubectl) Creating a Deployment Mounted with an SFS Volume

-

Scenario

After an SFS volume is created or imported to CCE, you can mount the volume to a workload.

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the sfs-deployment-example.yaml file, which is used to create a pod.

    touch sfs-deployment-example.yaml

    -

    vi sfs-deployment-example.yaml

    -
    Example of mounting an SFS volume to a Deployment (PVC-based, shared volume):
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -  name: sfs-deployment-example                                # Workload name
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: sfs-deployment-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: sfs-deployment-example 
    -    spec: 
    -      containers: 
    -      - image: nginx 
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp                                # Mount path 
    -          name: pvc-sfs-example 
    -      imagePullSecrets:
    -        - name: default-secret
    -      restartPolicy: Always 
    -      volumes: 
    -      - name: pvc-sfs-example 
    -        persistentVolumeClaim: 
    -          claimName: pvc-sfs-auto-example                # PVC name
    -
    - -
    - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    metadata

    -

    name

    -

    Name of the pod to be created.

    -

    spec.template.spec.containers.volumeMounts

    -

    mountPath

    -

    Mount path in the container. In this example, the mount path is /tmp.

    -

    spec.template.spec.volumes.persistentVolumeClaim

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

    Example of mounting an SFS volume to a StatefulSet (PVC template-based, dedicated volume):

    -
    Example YAML:
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: deploy-sfs-nfs-rw-in
    -  namespace: default
    -  labels:
    -    appgroup: ''
    -spec:
    -  replicas: 2
    -  selector:
    -    matchLabels:
    -      app: deploy-sfs-nfs-rw-in
    -  template:
    -    metadata:
    -      labels:
    -        app: deploy-sfs-nfs-rw-in
    -    spec:
    -      containers:
    -        - name: container-0
    -          image: 'nginx:1.12-alpine-perl'
    -          volumeMounts:
    -            - name: bs-nfs-rw-mountoptionpvc
    -              mountPath: /aaa
    -      imagePullSecrets:
    -        - name: default-secret
    -  volumeClaimTemplates:
    -    - metadata:
    -        name: bs-nfs-rw-mountoptionpvc
    -        annotations:
    -          volume.beta.kubernetes.io/storage-class: nfs-rw
    -          volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxinfs
    -      spec:
    -        accessModes:
    -          - ReadWriteMany
    -        resources:
    -          requests:
    -            storage: 1Gi
    -  serviceName: wwww
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    metadata

    -

    name

    -

    Name of the created workload.

    -

    spec.template.spec.containers

    -

    image

    -

    Image of the workload.

    -

    spec.template.spec.containers.volumeMount

    -

    mountPath

    -

    Mount path in the container. In this example, the mount path is /tmp.

    -

    spec

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the pod:

    kubectl create -f sfs-deployment-example.yaml

    -

    After the creation is complete, log in to the CCE console. In the navigation pane, choose Resource Management > Storage > SFS. Click the PVC name. On the PVC details page, you can view the binding relationship between SFS and PVC.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0321.html b/docs/cce/umn/cce_01_0321.html deleted file mode 100644 index f2dfa354..00000000 --- a/docs/cce/umn/cce_01_0321.html +++ /dev/null @@ -1,110 +0,0 @@ - - -

(kubectl) Creating a StatefulSet Mounted with an SFS Volume

-

Scenario

CCE allows you to use an existing SFS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Create an SFS volume by referring to Creating an SFS Volume and record the volume name.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create a YAML file for creating the workload. Assume that the file name is sfs-statefulset-example.yaml.

    touch sfs-statefulset-example.yaml

    -

    vi sfs-statefulset-example.yaml

    -

    Example YAML:

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: sfs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 2
    -  selector:
    -    matchLabels:
    -      app: sfs-statefulset-example
    -  serviceName: qwqq
    -  template:
    -    metadata:
    -      annotations:
    -        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
    -        pod.alpha.kubernetes.io/initialized: "true"
    -      labels:
    -        app: sfs-statefulset-example
    -    spec:
    -      affinity: {}
    -      containers:
    -      - image: nginx:latest
    -        name: container-0
    -        volumeMounts:
    -        - mountPath: /tmp
    -          name: pvc-sfs-example
    -      imagePullSecrets:
    -      - name: default-secret
    -      volumes:
    -        - name: pvc-sfs-example
    -          persistentVolumeClaim:
    -            claimName: cce-sfs-demo
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    spec

    -

    replicas

    -

    Number of pods.

    -

    metadata

    -

    name

    -

    Name of the created workload.

    -

    spec.template.spec.containers

    -

    image

    -

    Image used by the workload.

    -

    spec.template.spec.containers.volumeMounts

    -

    mountPath

    -

    Mount path in the container.

    -

    spec

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -

    spec.template.spec.volumes.persistentVolumeClaim

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  4. Create the StatefulSet.

    kubectl create -f sfs-statefulset-example .yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0323.html b/docs/cce/umn/cce_01_0323.html deleted file mode 100644 index 43903118..00000000 --- a/docs/cce/umn/cce_01_0323.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

Overview

-

CCE allows you to mount a volume created from an Object Storage Service (OBS) bucket to a container to store data persistently. Object storage is commonly used in cloud workloads, data analysis, content analysis, and hotspot objects.

-
Figure 1 Mounting OBS volumes to CCE
-

Storage Class

Object storage offers three storage classes, Standard, Infrequent Access, and Archive, to satisfy different requirements for storage performance and costs.

-
  • The Standard storage class features low access latency and high throughput. It is therefore applicable to storing a large number of hot files (frequently accessed every month) or small files (less than 1 MB). The application scenarios include big data analytics, mobile apps, hot videos, and picture processing on social media.
  • The Infrequent Access storage class is ideal for storing data that is semi-frequently accessed (less than 12 times a year), with requirements for quick response. The application scenarios include file synchronization or sharing, and enterprise-level backup. It provides the same durability, access latency, and throughput as the Standard storage class but at a lower cost. However, the Infrequent Access storage class has lower availability than the Standard storage class.
  • The Archive storage class is suitable for archiving data that is rarely-accessed (averagely once a year). The application scenarios include data archiving and long-term data backup. The Archive storage class is secure and durable at an affordable low cost, which can be used to replace tape libraries. However, it may take hours to restore data from the Archive storage class.
-
-

Description

  • Standard APIs: With HTTP RESTful APIs, OBS allows you to use client tools or third-party tools to access object storage.
  • Data sharing: Servers, embedded devices, and IoT devices can use the same path to access shared object data in OBS.
  • Public/Private networks: OBS allows data to be accessed from public networks to meet Internet application requirements.
  • Capacity and performance: No capacity limit; high performance (read/write I/O latency within 10 ms).
  • Use cases: Deployments/StatefulSets in the ReadOnlyMany mode and jobs created for big data analysis, static website hosting, online video on demand (VoD), gene sequencing, intelligent video surveillance, backup and archiving, and enterprise cloud boxes (web disks). You can create object storage by using the OBS console, tools, and SDKs.
-
-

Reference

CCE clusters can also be mounted with OBS buckets of third-party tenants, including OBS parallel file systems (preferred) and OBS object buckets.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0324.html b/docs/cce/umn/cce_01_0324.html deleted file mode 100644 index 03ad5eec..00000000 --- a/docs/cce/umn/cce_01_0324.html +++ /dev/null @@ -1,144 +0,0 @@ - - -

Using OBS Volumes

-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

  • CCE clusters of v1.7.3-r8 and earlier do not support OBS volumes. You need to upgrade these clusters or create clusters of a later version that supports OBS.
  • Volumes cannot be created in specified enterprise projects. Only the default enterprise project is supported.
  • The following operations apply to clusters of Kubernetes 1.13 or earlier.
-
-

Preparations

To mount reliable and stable OBS buckets as volumes, you must create AK/SK before you create OBS buckets.

-

The procedure for configuring the AK/SK is as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage.
  2. On the OBS tab page, click AK/SK in the notice.
    Figure 1 Configuring the AK/SK
    -
  3. Click , select a key file, and click Upload to upload the key file.
  4. Select the corresponding workload and click Restart.
-

When creating an OBS volume, you must use the AK/SK. If the key file is not uploaded, the pod will fail to be started or OBS data access will be abnormal due to the volume mounting failure.

-
-
-

Creating an OBS Volume

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage.
  2. Click the OBS tab and click Create OBS Bucket.
  3. Configure basic information, as shown in Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating an OBS volume

    Parameter

    -

    Description

    -

    * PVC Name

    -

    Name of the new PVC, which is different from the volume name. The actual volume name is automatically generated when the PV is created by the PVC.

    -

    The name contains 3 to 55 characters (excluding the prefix). It must contain lowercase letters, digits, and hyphens (-), and cannot start or end with a hyphen (-).

    -

    Cluster Name

    -

    Cluster to which the OBS volume belongs.

    -

    Namespace

    -

    Namespace to which the volume belongs. The default value is default.

    -

    Instance Type

    -

    Type of the storage instance created on OBS.

    -
    • Parallel file system: supported when the cluster version is 1.15 or later and the everest add-on version is 1.0.2 or later.
    • Object bucket: A bucket is a container for storing objects in OBS. OBS provides flat storage in the form of buckets and objects. Unlike the conventional multi-layer directory structure of file systems, all objects in a bucket are stored at the same logical layer.
    -

    Storage Class

    -

    This parameter is displayed when you select Object bucket for Instance Type.

    -

    This parameter indicates the storage classes supported by OBS.

    -
    • Standard: applicable to scenarios where a large number of hotspot files or small-sized files need to be accessed frequently (multiple times per month on average) and require fast access response.
    • Infrequent access: applicable to scenarios where data is not frequently accessed (less than 12 times per year on average) but requires fast access response.
    -

    Storage Policy

    -

    Object storage has the following policies:

    -

    Private: Only the bucket owner has full control over the bucket. Unauthorized users do not have permissions to access the bucket.

    -

    Access Mode

    -

    Access permissions of user applications on storage resources (PVs).

    -
    • ReadWriteMany (RWX): The volume is mounted as read-write by multiple nodes.
    -
    -
    -

  4. Click Create.

    After the OBS volume is successfully created, it is displayed in the OBS volume list. Click the PVC name to view detailed information about the OBS volume.

    -

-
-

Adding an OBS Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, Creating a DaemonSet, or Creating a Job. After you have added a container, choose Data Storage > Cloud Volume, and then click Add Cloud Volume.
  2. Set Type to OBS.

    -

    - - - - - - - - - - - - - - - - - - -
    Table 2 OBS volume parameters

    Parameter

    -

    Description

    -

    Type

    -

    Select OBS.

    -

    OBS: Standard and Infrequent Access OBS buckets are supported. OBS buckets are commonly used for big data analytics, cloud native applications, static website hosting, and backup/active archiving.

    -

    Allocation Mode

    -

    Manual

    -

    Name: Select a created OBS volume.

    -

    Sub-Type: class of the selected volume. The value can be Standard or Infrequent access, and you do not need to set this parameter.

    -

    Automatic

    -

    Type of the storage instance created on OBS.

    -
    • Parallel file system: supported when the cluster version is 1.15 or later and the everest add-on version is 1.0.2 or later.
    • Object bucket: A bucket is a container for storing objects in OBS.

      Sub-Type: Select Standard or Infrequent access.

      -
    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. Container Path: Enter the mount path in the container, for example, /tmp.
      The mount path must not be a system directory, such as / and /var/run. Otherwise, an exception occurs. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    2. Set permissions.
      • Read-only: You can only read the data in the mounted volumes.
      • Read/Write: You can modify the data in the mounted volumes. Newly written data is not migrated if the container is migrated, which causes a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

  3. Click OK.
-
-

Importing an OBS Volume

CCE allows you to import existing OBS volumes.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the OBS tab page, click Import.
  2. Select one or more OBS volumes that you want to import.
  3. Select the target cluster and namespace.
  4. Click OK.
-
-

Unbinding an OBS Volume

When an OBS volume is successfully created, the OBS volume is automatically bound to the current cluster. Other clusters can also use the OBS volume. When the volume is unbound from the cluster, other clusters can still use the volume.

-

If the volume has been mounted to a workload, the volume cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the OBS volume list, click Unbind next to the target OBS volume.
  2. In the dialog box displayed, click Yes.
-
-

Related Operations

After an OBS volume is created, you can perform the operation described in Table 3. -
- - - - - - - -
Table 3 Other operations

Operation

-

Description

-

Deleting an OBS volume

-
  1. Select the OBS volume to be deleted and click Delete in the Operation column.
  2. Follow the prompts to delete the volume.
-
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0325.html b/docs/cce/umn/cce_01_0325.html deleted file mode 100644 index d3b48fa4..00000000 --- a/docs/cce/umn/cce_01_0325.html +++ /dev/null @@ -1,66 +0,0 @@ - - -

(kubectl) Automatically Creating an OBS Volume

-

Scenario

During the use of OBS, expected OBS buckets can be automatically created and mounted as volumes. Currently, standard and infrequent access OBS buckets are supported, which correspond to obs-standard and obs-standard-ia, respectively.

-
-

Prerequisites

  • You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.
  • The AK/SK has been uploaded. For details, see Preparations.
-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the pvc-obs-auto-example.yaml file, which is used to create a PVC.

    touch pvc-obs-auto-example.yaml

    -

    vi pvc-obs-auto-example.yaml

    -

    Example YAML:

    -
    apiVersion: v1 
    -kind: PersistentVolumeClaim 
    -metadata: 
    -  annotations: 
    -    volume.beta.kubernetes.io/storage-class: obs-standard  # OBS bucket type. The value can be obs-standard (standard) or obs-standard-ia (infrequent access).
    -  name: pvc-obs-auto-example  # PVC name
    -  namespace: default 
    -spec: 
    -  accessModes: 
    -  - ReadWriteMany 
    -  resources: 
    -    requests: 
    -      storage: 1Gi   # Storage capacity in the unit of Gi. For OBS buckets, this parameter is used only for verification (fixed to 1, cannot be empty or 0). Any value you set does not take effect for OBS buckets.
    - -
    - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    volume.beta.kubernetes.io/storage-class

    -

    Bucket type. Currently, obs-standard and obs-standard-ia are supported.

    -

    name

    -

    Name of the PVC to be created.

    -

    accessModes

    -

    Only ReadWriteMany is supported. ReadWriteOnly is not supported.

    -

    storage

    -

    Storage capacity in the unit of Gi. For OBS buckets, this field is used only for verification (cannot be empty or 0). Its value is fixed at 1, and any value you set does not take effect for OBS buckets.

    -
    -
    -

  3. Run the following command to create the PVC.

    kubectl create -f pvc-obs-auto-example.yaml

    -

    After the command is executed, an OBS bucket is created in the VPC to which the cluster belongs. You can click the bucket name in Storage > OBS to view the bucket or view it on the OBS console.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0326.html b/docs/cce/umn/cce_01_0326.html deleted file mode 100644 index 57f78493..00000000 --- a/docs/cce/umn/cce_01_0326.html +++ /dev/null @@ -1,291 +0,0 @@ - - -

(kubectl) Creating a PV from an Existing OBS Bucket

-

Scenario

CCE allows you to use an existing OBS bucket to create a PersistentVolume (PV). You can create a PersistentVolumeClaim (PVC) and bind it to the PV.

-
-

Prerequisites

  • You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.
  • The AK/SK has been uploaded. For details, see Preparations.
-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Log in to the OBS console, create an OBS bucket, and record the bucket name and storage class.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create two YAML files for creating the PV and PVC. Assume that the file names are pv-obs-example.yaml and pvc-obs-example.yaml.

    touch pv-obs-example.yaml pvc-obs-example.yaml

    - -
    - - - - - - - - - - - - - -

    Kubernetes Version

    -

    Description

    -

    YAML Example

    -

    1.11 ≤ K8s version ≤ 1.13

    -

    Clusters from v1.11 to v1.13

    -

    Example YAML

    -

    K8s version = 1.9

    -

    Clusters of v1.9

    -

    Example YAML

    -
    -
    -

    Clusters from v1.11 to v1.13

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  name: pv-obs-example 
      -  annotations:
      -    pv.kubernetes.io/provisioned-by: flexvolume-huawei.com/fuxiobs
      -spec: 
      -  accessModes: 
      -  - ReadWriteMany 
      -  capacity: 
      -    storage: 1Gi 
      -  claimRef:
      -    apiVersion: v1
      -    kind: PersistentVolumeClaim
      -    name: pvc-obs-example
      -    namespace: default
      -  flexVolume: 
      -    driver: huawei.com/fuxiobs 
      -    fsType: obs 
      -    options: 
      -      fsType: obs 
      -      region: eu-de
      -      storage_class: STANDARD 
      -      volumeID: test-obs 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: obs-standard
      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 1 Key parameters

      Parameter

      -

      Description

      -

      driver

      -

      Storage driver used to mount the volume. Set the driver to huawei.com/fuxiobs for the OBS volume.

      -

      storage_class

      -

      Storage class, including STANDARD (standard bucket) and STANDARD_IA (infrequent access bucket).

      -

      region

      -

      For details about the value of region, see Regions and Endpoints.

      -

      volumeID

      -

      OBS bucket name.

      -

      To obtain the name, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the OBS tab page, and copy the PV name on the PV Details tab page.

      -

      storage

      -

      Storage capacity in the unit of Gi. The value is fixed at 1Gi.

      -

      storageClassName

      -

      Storage class supported by OBS, including obs-standard (standard bucket) and obs-standard-ia (infrequent access bucket).

      -

      spec.claimRef.apiVersion

      -

      The value is fixed at v1.

      -

      spec.claimRef.kind

      -

      The value is fixed at PersistentVolumeClaim.

      -

      spec.claimRef.name

      -

      The value is the same as the name of the PVC created in the next step.

      -

      spec.claimRef.namespace

      -

      The value is the same as the namespace of the PVC created in the next step.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1
      -kind: PersistentVolumeClaim
      -metadata:
      -  annotations:
      -    volume.beta.kubernetes.io/storage-class: obs-standard
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxiobs
      -  name: pvc-obs-example
      -  namespace: default
      -spec:
      -  accessModes:
      -  - ReadWriteMany
      -  resources:
      -    requests:
      -      storage: 1Gi
      -  volumeName: pv-obs-example
      - -
      - - - - - - - - - - - - - - - - -
      Table 2 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Storage class supported by OBS, including obs-standard and obs-standard-ia.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      Must be set to flexvolume-huawei.com/fuxiobs.

      -

      volumeName

      -

      Name of the PV.

      -

      storage

      -

      Storage capacity in the unit of Gi. The value is fixed at 1Gi.

      -
      -
      -
    -

    Clusters of v1.9

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  name: pv-obs-example 
      -  namespace: default  
      -spec: 
      -  accessModes: 
      -  - ReadWriteMany 
      -  capacity: 
      -    storage: 1Gi 
      -  flexVolume: 
      -    driver: huawei.com/fuxiobs 
      -    fsType: obs 
      -    options: 
      -      fsType: obs 
      -      kubernetes.io/namespace: default 
      -      region: eu-de
      -      storage_class: STANDARD 
      -      volumeID: test-obs 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: obs-standard
      - -
      - - - - - - - - - - - - - - - - - - - - - - -
      Table 3 Key parameters

      Parameter

      -

      Description

      -

      driver

      -

      Storage driver used to mount the volume. Set the driver to huawei.com/fuxiobs for the OBS volume.

      -

      storage_class

      -

      Storage class, including STANDARD (standard bucket) and STANDARD_IA (infrequent access bucket).

      -

      region

      -

      For details about the value of region, see Regions and Endpoints.

      -

      volumeID

      -

      OBS bucket name.

      -

      To obtain the name, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the OBS tab page, and copy the PV name on the PV Details tab page.

      -

      storage

      -

      Storage capacity in the unit of Gi. The value is fixed at 1Gi.

      -

      storageClassName

      -

      Storage class supported by OBS, including obs-standard (standard bucket) and obs-standard-ia (infrequent access bucket).

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1
      -kind: PersistentVolumeClaim
      -metadata:
      -  annotations:
      -    volume.beta.kubernetes.io/storage-class: obs-standard
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxiobs
      -  name: pvc-obs-example
      -  namespace: default
      -spec:
      -  accessModes:
      -  - ReadWriteMany
      -  resources:
      -    requests:
      -      storage: 1Gi
      -  volumeName: pv-obs-example
      -  volumeNamespace: default
      - -
      - - - - - - - - - - - - - - - - -
      Table 4 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Storage class supported by OBS, including obs-standard and obs-standard-ia.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      Must be set to flexvolume-huawei.com/fuxiobs.

      -

      volumeName

      -

      Name of the PV.

      -

      storage

      -

      Storage capacity in the unit of Gi. The value is fixed at 1Gi.

      -
      -
      -
    -

  4. Create the PV.

    kubectl create -f pv-obs-example.yaml

    -

  5. Create the PVC.

    kubectl create -f pvc-obs-example.yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0327.html b/docs/cce/umn/cce_01_0327.html deleted file mode 100644 index 9a4136ac..00000000 --- a/docs/cce/umn/cce_01_0327.html +++ /dev/null @@ -1,175 +0,0 @@ - - -

(kubectl) Creating a Deployment Mounted with an OBS Volume

-

Scenario

After an OBS volume is created or imported to CCE, you can mount the volume to a workload.

-
-

Prerequisites

  • You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.
  • The AK/SK has been uploaded. For details, see Preparations.
-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the obs-deployment-example.yaml file, which is used to create a pod.

    touch obs-deployment-example.yaml

    -

    vi obs-deployment-example.yaml

    -
    Example of mounting an OBS volume to a Deployment (PVC-based, shared volume):
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -   name: obs-deployment-example                       # Workload name
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: obs-deployment-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: obs-deployment-example 
    -    spec: 
    -      containers: 
    -      - image: nginx 
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp                       # Mount path
    -          name: pvc-obs-example 
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -      volumes: 
    -      - name: pvc-obs-example  
    -        persistentVolumeClaim: 
    -          claimName: pvc-obs-auto-example       # PVC name
    -
    - -
    - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the pod to be created.

    -

    app

    -

    Name of the application running in the pod.

    -

    mountPath

    -

    Mount path in the container.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

    Example of mounting an OBS volume to a StatefulSet (PVC template-based, dedicated volume):

    -
    Example YAML:
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: deploy-obs-standard-in
    -  namespace: default
    -  generation: 1
    -  labels:
    -    appgroup: ''
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: deploy-obs-standard-in
    -  template:
    -    metadata:
    -      labels:
    -        app: deploy-obs-standard-in
    -      annotations:
    -        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
    -        pod.alpha.kubernetes.io/initialized: 'true'
    -    spec:
    -      containers:
    -        - name: container-0
    -          image: 'nginx:1.12-alpine-perl'
    -          env:
    -            - name: PAAS_APP_NAME
    -              value: deploy-obs-standard-in
    -            - name: PAAS_NAMESPACE
    -              value: default
    -            - name: PAAS_PROJECT_ID
    -              value: a2cd8e998dca42e98a41f596c636dbda
    -          resources: {}
    -          volumeMounts:
    -            - name: obs-bs-standard-mountoptionpvc
    -              mountPath: /tmp
    -          terminationMessagePath: /dev/termination-log
    -          terminationMessagePolicy: File
    -          imagePullPolicy: IfNotPresent
    -      restartPolicy: Always
    -      terminationGracePeriodSeconds: 30
    -      dnsPolicy: ClusterFirst
    -      securityContext: {}
    -      imagePullSecrets:
    -        - name: default-secret
    -      affinity: {}
    -      schedulerName: default-scheduler
    -  volumeClaimTemplates:
    -    - metadata:
    -        name: obs-bs-standard-mountoptionpvc
    -        annotations:
    -          volume.beta.kubernetes.io/storage-class: obs-standard
    -          volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxiobs
    -      spec:
    -        accessModes:
    -          - ReadWriteMany
    -        resources:
    -          requests:
    -            storage: 1Gi
    -  serviceName: wwww
    -  podManagementPolicy: OrderedReady
    -  updateStrategy:
    -    type: RollingUpdate
    -  revisionHistoryLimit: 10
    -
    - -
    - - - - - - - - - - - - - - - - -
    Table 2 Key parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the created workload.

    -

    image

    -

    Image of the workload.

    -

    mountPath

    -

    Mount path in the container. In this example, the volume is mounted to the /tmp directory.

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the pod:

    kubectl create -f obs-deployment-example.yaml

    -

    After the creation is complete, choose Storage > OBS on the CCE console and click the PVC name. On the PVC details page, you can view the binding relationship between the OBS service and the PVC.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0328.html b/docs/cce/umn/cce_01_0328.html deleted file mode 100644 index bba3314d..00000000 --- a/docs/cce/umn/cce_01_0328.html +++ /dev/null @@ -1,96 +0,0 @@ - - -

(kubectl) Creating a StatefulSet Mounted with an OBS Volume

-

Scenario

CCE allows you to use an existing OBS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).

-
-

Prerequisites

  • You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.
  • The AK/SK has been uploaded. For details, see Preparations.
-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Create an OBS volume by referring to Creating an OBS Volume and obtain the PVC name.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create a YAML file for creating the workload. Assume that the file name is obs-statefulset-example.yaml.

    touch obs-statefulset-example.yaml

    -

    vi obs-statefulset-example.yaml

    -

    Example YAML:

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: obs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: obs-statefulset-example
    -  serviceName: qwqq
    -  template:
    -    metadata:
    -      annotations:
    -        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
    -        pod.alpha.kubernetes.io/initialized: "true"
    -      creationTimestamp: null
    -      labels:
    -        app: obs-statefulset-example
    -    spec:
    -      affinity: {}
    -      containers:	
    -        image: nginx:latest
    -        imagePullPolicy: Always
    -        name: container-0
    -        volumeMounts:
    -        - mountPath: /tmp
    -          name: pvc-obs-example
    -      imagePullSecrets:
    -      - name: default-secret
    -      volumes:
    -        - name: pvc-obs-example
    -          persistentVolumeClaim:
    -            claimName: cce-obs-demo
    - -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    replicas

    -

    Number of pods.

    -

    name

    -

    Name of the created workload.

    -

    image

    -

    Image used by the workload.

    -

    mountPath

    -

    Mount path in the container.

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

  4. Create the StatefulSet.

    kubectl create -f obs-statefulset-example.yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0330.html b/docs/cce/umn/cce_01_0330.html deleted file mode 100644 index aa21cdb6..00000000 --- a/docs/cce/umn/cce_01_0330.html +++ /dev/null @@ -1,14 +0,0 @@ - - -

Overview

-

CCE allows you to mount a volume created from an SFS Turbo file system to a container to store data persistently. Provisioned on demand and fast, SFS Turbo is suitable for DevOps, container microservices, and enterprise OA scenarios.

-
Figure 1 Mounting SFS Turbo volumes to CCE
-

Description

  • Standard file protocols: You can mount file systems as volumes to servers, the same as using local directories.
  • Data sharing: The same file system can be mounted to multiple servers, so that data can be shared.
  • Private network: User can access data only in private networks of data centers.
  • Data isolation: The on-cloud storage service provides exclusive cloud file storage, which delivers data isolation and ensures IOPS performance.
  • Use cases: Deployments/StatefulSets in the ReadWriteMany mode, DaemonSets, and jobs created for high-traffic websites, log storage, DevOps, and enterprise OA applications
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0331.html b/docs/cce/umn/cce_01_0331.html deleted file mode 100644 index b74d2295..00000000 --- a/docs/cce/umn/cce_01_0331.html +++ /dev/null @@ -1,58 +0,0 @@ - - -

Using SFS Turbo Volumes

-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

  • SFS Turbo volumes are available only in certain regions.
  • Currently, SFS Turbo file systems cannot be directly created on CCE.
  • The following operations apply to clusters of Kubernetes 1.13 or earlier.
-
-

Importing an SFS Turbo Volume

CCE allows you to import existing SFS Turbo volumes.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the SFS Turbo tab page, click Import.
  2. Select one or more SFS Turbo volumes that you want to import.
  3. Select the cluster and namespace to which you want to import the volumes.
  4. Click OK. The volumes are displayed in the list. When PVS Status becomes Bound, the volumes are imported successfully.
-
-

Adding an SFS Turbo Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, Creating a DaemonSet, or Creating a Job. After you have added a container, choose Data Storage > Cloud Volume, and then click Add Cloud Volume.
  2. Set the storage volume type to SFS Turbo.

    -

    - - - - - - - - - - - - - - - -
    Table 1 Parameters for configuring an SFS Turbo volume

    Parameter

    -

    Description

    -

    Type

    -

    SFS Turbo: applicable to DevOps, containerized microservices, and enterprise OA applications.

    -

    Allocation Mode

    -

    Manual

    -

    Select an existing SFS Turbo volume. You need to import SFS Turbo volumes in advance. For details, see Importing an SFS Turbo Volume.

    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter the subpath of the file storage, for example, /tmp.

      This parameter specifies a subpath inside the referenced volume instead of its root. If this parameter is not specified, the root path is used. Currently, only file storage is supported. The value must be a relative path and cannot start with a slash (/) or ../.

      -
    2. Container Path: Enter the mount path in the container, for example, /tmp.
      The mount path must not be a system directory, such as / and /var/run. Otherwise, an exception occurs. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Set permissions.
      • Read-only: You can only read the data in the mounted volumes.
      • Read/Write: You can modify the data in the mounted volumes. Newly written data is not migrated if the container is migrated, which causes a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

  3. Click OK.
-
-

Unbinding an SFS Turbo Volume

When an SFS Turbo volume is successfully imported to a cluster, the volume is bound to the cluster. The volume can also be imported to other clusters. When the volume is unbound from the cluster, other clusters can still import and use the volume.

-

If the SFS Turbo volume has been mounted to a workload, the volume cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the SFS Turbo volume list, click Unbind next to the target volume.
  2. In the dialog box displayed, click OK.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0332.html b/docs/cce/umn/cce_01_0332.html deleted file mode 100644 index 4fa2cec8..00000000 --- a/docs/cce/umn/cce_01_0332.html +++ /dev/null @@ -1,150 +0,0 @@ - - -

(kubectl) Creating a PV from an Existing SFS Turbo File System

-

Scenario

CCE allows you to use an existing SFS Turbo file system to create a PersistentVolume (PV). After the creation is successful, you can create a PersistentVolumeClaim (PVC) and bind it to the PV.

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Log in to the SFS console, create a file system, and record the file system ID, shared path, and capacity.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create two YAML files for creating the PV and PVC. Assume that the file names are pv-efs-example.yaml and pvc-efs-example.yaml.

    touch pv-efs-example.yaml pvc-efs-example.yaml

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  name: pv-efs-example 
      -  annotations:
      -    pv.kubernetes.io/provisioned-by: flexvolume-huawei.com/fuxiefs
      -spec: 
      -  accessModes: 
      -  - ReadWriteMany 
      -  capacity: 
      -    storage: 100Gi 
      -  claimRef:
      -    apiVersion: v1
      -    kind: PersistentVolumeClaim
      -    name: pvc-efs-example
      -    namespace: default
      -  flexVolume: 
      -    driver: huawei.com/fuxiefs 
      -    fsType: efs 
      -    options: 
      -      deviceMountPath: <your_deviceMountPath>  # Shared storage path of your SFS Turbo file.
      -      fsType: efs 
      -      volumeID: 8962a2a2-a583-4b7f-bb74-fe76712d8414 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: efs-standard
      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 1 Key parameters

      Parameter

      -

      Description

      -

      driver

      -

      Storage driver used to mount the volume. Set it to huawei.com/fuxiefs.

      -

      deviceMountPath

      -

      Shared path of the SFS Turbo volume.

      -

      volumeID

      -

      SFS Turbo volume ID.

      -

      To obtain the ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the SFS Turbo tab page, and copy the PVC ID on the PVC details page.

      -

      storage

      -

      File system size.

      -

      storageClassName

      -

      Volume type supported by SFS Turbo. The value can be efs-standard and efs-performance. Currently, SFS Turbo does not support dynamic creation; therefore, this parameter is not used for now.

      -

      spec.claimRef.apiVersion

      -

      The value is fixed at v1.

      -

      spec.claimRef.kind

      -

      The value is fixed at PersistentVolumeClaim.

      -

      spec.claimRef.name

      -

      The value is the same as the name of the PVC created in the next step.

      -

      spec.claimRef.namespace

      -

      The value is the same as the namespace of the PVC created in the next step.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1 
      -kind: PersistentVolumeClaim 
      -metadata: 
      -  annotations: 
      -    volume.beta.kubernetes.io/storage-class: efs-standard 
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxiefs 
      -  name: pvc-efs-example 
      -  namespace: default 
      -spec: 
      -  accessModes: 
      -  - ReadWriteMany 
      -  resources: 
      -    requests: 
      -      storage: 100Gi 
      -  volumeName: pv-efs-example
      - -
      - - - - - - - - - - - - - - - - -
      Table 2 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Read/write mode supported by SFS Turbo. The value can be efs-standard or efs-performance. The value must be the same as that of the existing PV.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      The field must be set to flexvolume-huawei.com/fuxiefs.

      -

      storage

      -

      Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

      -

      volumeName

      -

      Name of the PV.

      -
      -
      -
    -

    The VPC to which the SFS Turbo file system belongs must be the same as the VPC of the ECS VM planned for the workload. Ports 111, 445, 2049, 2051, and 20048 must be enabled in the security groups.

    -
    -

  4. Create the PV.

    kubectl create -f pv-efs-example.yaml

    -

  5. Create the PVC.

    kubectl create -f pvc-efs-example.yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0333.html b/docs/cce/umn/cce_01_0333.html deleted file mode 100644 index b398c4cf..00000000 --- a/docs/cce/umn/cce_01_0333.html +++ /dev/null @@ -1,78 +0,0 @@ - - -

(kubectl) Creating a Deployment Mounted with an SFS Turbo Volume

-

Scenario

After an SFS Turbo volume is created or imported to CCE, you can mount the volume to a workload.

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the efs-deployment-example.yaml file, which is used to create a Deployment:

    touch efs-deployment-example.yaml

    -

    vi efs-deployment-example.yaml

    -

    Example of mounting an SFS Turbo volume to a Deployment (PVC-based, shared volume):

    -
    apiVersion: apps/v1  
    -kind: Deployment  
    -metadata:  
    -  name: efs-deployment-example                                # Workload name
    -  namespace: default  
    -spec:  
    -  replicas: 1  
    -  selector:  
    -    matchLabels:  
    -      app: efs-deployment-example  
    -  template:  
    -    metadata:  
    -      labels:  
    -        app: efs-deployment-example  
    -    spec:  
    -      containers:  
    -      - image: nginx  
    -        name: container-0  
    -        volumeMounts:  
    -        - mountPath: /tmp                                # Mount path
    -          name: pvc-efs-example  
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -      volumes:  
    -      - name: pvc-efs-example  
    -        persistentVolumeClaim:  
    -          claimName: pvc-sfs-auto-example                # PVC name
    - -
    - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the created Deployment.

    -

    app

    -

    Name of the application running in the Deployment.

    -

    mountPath

    -

    Mount path in the container. In this example, the mount path is /tmp.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the pod:

    kubectl create -f efs-deployment-example.yaml

    -

    After the creation is complete, choose Storage > SFS Turbo on the CCE console and click the PVC name. On the PVC details page, you can view the binding relationship between SFS Turbo and PVC.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0336.html b/docs/cce/umn/cce_01_0336.html deleted file mode 100644 index 6cf954f5..00000000 --- a/docs/cce/umn/cce_01_0336.html +++ /dev/null @@ -1,242 +0,0 @@ - - -

Using a Custom AK/SK to Mount an OBS Volume

-

Scenario

You can solve this issue by using Everest 1.2.8 and later versions to use custom access keys for different IAM users.

-
-

Prerequisites

  • The everest add-on version must be 1.2.8 or later.
  • The cluster version must be 1.15.11 or later.
-
-

Notes and Constraints

Custom access keys cannot be configured for secure containers.

-
-

Disabling Auto Key Mounting

The key you uploaded is used by default when mounting an OBS volume. That is, all IAM users under your account will use the same key to mount OBS buckets, and they have the same permissions on buckets. This setting does not allow you to configure differentiated permissions for different IAM users.

-

If you have uploaded the AK/SK, you are advised to disable the automatic mounting of access keys by enabling the disable_auto_mount_secret parameter in the everest add-on to prevent IAM users from performing unauthorized operations. In this way, the access keys uploaded on the console will not be used when creating OBS volumes.

-
  • When enabling disable-auto-mount-secret, ensure that no OBS volume exists in the cluster. A workload mounted with an OBS volume, when scaled or restarted, will fail to remount the OBS volume because it needs to specify the access key but is prohibited by disable-auto-mount-secret.
  • If disable-auto-mount-secret is set to true, an access key must be specified when a PV or PVC is created. Otherwise, the OBS volume fails to be mounted.
-
-

kubectl edit ds everest-csi-driver -nkube-system

-

Search for disable-auto-mount-secret and set it to true.

-

-

Run :wq to save the settings and exit. Wait until the pod is restarted.

-
-

Creating a Secret Using an Access Key

  1. Obtain an access key.

    For details, see Creating Access Keys (AK and SK).

    -

  2. Encode the keys using Base64. (Assume that the AK is xxx and the SK is yyy.)

    echo -n xxx|base64

    -

    echo -n yyy|base64

    -

    Record the encoded AK and SK.

    -

  3. Create a YAML file for the secret, for example, test-user.yaml.

    apiVersion: v1
    -data:
    -  access.key: WE5WWVhVNU*****
    -  secret.key: Nnk4emJyZ0*****
    -kind: Secret
    -metadata:
    -  name: test-user
    -  namespace: default
    -  labels:
    -    secret.kubernetes.io/used-by: csi
    -type: cfe/secure-opaque
    -

    Specifically:

    - -
    - - - - - - - - - - - - - - - - - - - - - - -

    Parameter

    -

    Description

    -

    access.key

    -

    Base64-encoded AK.

    -

    secret.key

    -

    Base64-encoded SK.

    -

    name

    -

    Secret name.

    -

    namespace

    -

    Namespace of the secret.

    -

    secret.kubernetes.io/used-by: csi

    -

    You need to add this label in the YAML file if you want to make it available on the CCE console when you create an OBS PV/PVC.

    -

    type

    -

    Secret type. The value must be cfe/secure-opaque.

    -

    When this type is used, the data entered by users is automatically encrypted.

    -
    -
    -

  4. Create the secret.

    kubectl create -f test-user.yaml

    -

-
-

Mounting a Secret When Statically Creating an OBS Volume

After a secret is created using the AK/SK, you can associate the secret with the PV to be created and then use the AK/SK in the secret to mount an OBS volume.

-
  1. Log in to the OBS console, create an OBS bucket, and record the bucket name and storage class. The parallel file system is used as an example.
  2. Create a YAML file for the PV, for example, pv-example.yaml.

    -
    apiVersion: v1
    -kind: PersistentVolume
    -metadata:
    -  name: pv-obs-example
    -  annotations:
    -    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  capacity:
    -    storage: 1Gi
    -  csi:
    -    nodePublishSecretRef:
    -      name: test-user
    -      namespace: default
    -    driver: obs.csi.everest.io
    -    fsType: obsfs
    -    volumeAttributes:
    -      everest.io/obs-volume-type: STANDARD
    -      everest.io/region: eu-de
    -      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
    -    volumeHandle: obs-normal-static-pv
    -  persistentVolumeReclaimPolicy: Delete
    -  storageClassName: csi-obs
    - -
    - - - - - - - - - - - - - -

    Parameter

    -

    Description

    -

    nodePublishSecretRef

    -

    Secret specified during the mounting.

    -
    • name: name of the secret
    • namespace: namespace of the secret
    -

    fsType

    -

    File type. The value can be obsfs or s3fs. If the value is s3fs, an OBS bucket is created and mounted using s3fs. If the value is obsfs, an OBS parallel file system is created and mounted using obsfs. You are advised to set this field to obsfs.

    -

    volumeHandle

    -

    OBS bucket name.

    -
    -
    -

  3. Create the PV.

    kubectl create -f pv-example.yaml

    -

    After a PV is created, you can create a PVC and associate it with the PV.

    -

  4. Create a YAML file for the PVC, for example, pvc-example.yaml.

    Example YAML file for the PVC:

    -
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  annotations:
    -    csi.storage.k8s.io/node-publish-secret-name: test-user
    -    csi.storage.k8s.io/node-publish-secret-namespace: default
    -    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
    -    everest.io/obs-volume-type: STANDARD
    -    csi.storage.k8s.io/fstype: obsfs
    -  name: obs-secret
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  resources:
    -    requests:
    -      storage: 1Gi
    -  storageClassName: csi-obs
    -  volumeName: pv-obs-example
    - -
    - - - - - - - - - - -

    Parameter

    -

    Description

    -

    csi.storage.k8s.io/node-publish-secret-name

    -

    Name of the secret

    -

    csi.storage.k8s.io/node-publish-secret-namespace

    -

    Namespace of the secret

    -
    -
    -

  5. Create the PVC.

    kubectl create -f pvc-example.yaml

    -

    After the PVC is created, you can create a workload and associate it with the PVC to create volumes.

    -

-
-

Mounting a Secret When Dynamically Creating an OBS Volume

When dynamically creating an OBS volume, you can use the following method to specify a secret:

-
  1. Create a YAML file for the PVC, for example, pvc-example.yaml.

    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  annotations:
    -    csi.storage.k8s.io/node-publish-secret-name: test-user
    -    csi.storage.k8s.io/node-publish-secret-namespace: default
    -    everest.io/obs-volume-type: STANDARD
    -    csi.storage.k8s.io/fstype: obsfs
    -  name: obs-secret
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  resources:
    -    requests:
    -      storage: 1Gi
    -  storageClassName: csi-obs
    - -
    - - - - - - - - - - -

    Parameter

    -

    Description

    -

    csi.storage.k8s.io/node-publish-secret-name

    -

    Name of the secret

    -

    csi.storage.k8s.io/node-publish-secret-namespace

    -

    Namespace of the secret

    -
    -
    -

  2. Create the PVC.

    kubectl create -f pvc-example.yaml

    -

    After the PVC is created, you can create a workload and associate it with the PVC to create volumes.

    -

-
-

Verification

You can use a secret of an IAM user to mount an OBS volume. Assume that a workload named obs-secret is created, the mount path in the container is /temp, and the IAM user has the CCE ReadOnlyAccess and Tenant Guest permissions.
  1. Query the name of the workload pod.

    kubectl get po | grep obs-secret

    -

    Expected outputs:

    -
    obs-secret-5cd558f76f-vxslv          1/1     Running   0          3m22s
    -
  2. Query the objects in the mount path. In this example, the query is successful.

    kubectl exec obs-secret-5cd558f76f-vxslv -- ls -l /temp/

    -
  3. Write data into the mount path. In this example, the write operation fails.

    kubectl exec obs-secret-5cd558f76f-vxslv -- touch /temp/test

    -

    Expected outputs:

    -
    touch: setting times of '/temp/test': No such file or directory
    -command terminated with exit code 1
    -
  4. Set the read/write permissions for the IAM user who mounted the OBS volume by referring to the bucket policy configuration.

    -

    -
  5. Write data into the mouth path again. In this example, the write operation succeeded.

    kubectl exec obs-secret-5cd558f76f-vxslv -- touch /temp/test

    -
  6. Check the mount path in the container to see whether the data is successfully written.

    kubectl exec obs-secret-5cd558f76f-vxslv -- ls -l /temp/

    -

    Expected outputs:

    -
    -rwxrwxrwx 1 root root 0 Jun  7 01:52 test
    -
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0337.html b/docs/cce/umn/cce_01_0337.html deleted file mode 100644 index b6bb6888..00000000 --- a/docs/cce/umn/cce_01_0337.html +++ /dev/null @@ -1,184 +0,0 @@ - - -

Setting Mount Options

-

Scenario

You can mount cloud storage volumes to your containers and use these volumes as local directories.

-

This section describes how to set mount options when mounting SFS and OBS volumes. You can set mount options in a PV and bind the PV to a PVC. Alternatively, set mount options in a StorageClass and use the StorageClass to create a PVC. In this way, PVs can be dynamically created and inherit mount options configured in the StorageClass by default.

-
-

SFS Volume Mount Options

The everest add-on in CCE presets the options described in Table 1 for mounting SFS volumes. You can set other mount options if needed. For details, see Mounting an NFS File System to ECSs (Linux).

- -
- - - - - - - - - - - - - - - - -
Table 1 Preset mount options for SFS volumes

Option

-

Description

-

vers=3

-

File system version. Currently, only NFSv3 is supported, Value: 3

-

nolock

-

Whether to lock files on the server using the NLM protocol. If nolock is selected, the lock is valid for applications on one host. For applications on another host, the lock is invalid.

-

timeo=600

-

Waiting time before the NFS client retransmits a request. The unit is 0.1 seconds. Recommended value: 600

-

hard/soft

-

Mounting mode.

-
  • hard: If the NFS request times out, the client keeps resending the request until the request is successful.
  • soft: If the NFS request times out, the client returns an error to the invoking program.
-

The default value is hard.

-
-
-
-

OBS Volume Mount Options

When mounting file storage, the everest add-on presets the options described in Table 2 and Table 3 by default. The options in Table 2 are mandatory.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - -
Table 2 Mandatory mount options configured by default

Option

-

Description

-

use_ino

-

If enabled, obsfs allocates the inode number. Enabled by default in read/write mode.

-

big_writes

-

If configured, the maximum size of the cache can be modified.

-

nonempty

-

Allows non-empty mount paths.

-

allow_other

-

Allows other users to access the parallel file system.

-

no_check_certificate

-

Disables server certificate verification.

-

enable_noobj_cache

-

Enables cache entries for objects that do not exist, which can improve performance. Enabled by default in object bucket read/write mode.

-

This option is no longer set by default since everest 1.2.40.

-

sigv2

-

Specifies the signature version. Used by default in object buckets.

-
-
- -
- - - - - - - - - - - - - - - - -
Table 3 Optional mount options configured by default

Option

-

Description

-

max_write=131072

-

If specified, obsfs allocates the inode number. Enabled by default in read/write mode.

-

ssl_verify_hostname=0

-

Disables verifying the SSL certificate based on the host name.

-

max_background=100

-

Allows setting the maximum number of waiting requests in the background. Used by default in parallel file systems.

-

public_bucket=1

-

If set to 1, public buckets are mounted anonymously. Enabled by default in object bucket read/write mode.

-
-
-

You can log in to the node to which the pod is scheduled and view all mount options used for mounting the OBS volume in the process details.

-
  • Object bucket: ps -ef | grep s3fs
    root     22142     1  0 Jun03 ?        00:00:00 /usr/bin/s3fs pvc-82fe2cbe-3838-43a2-8afb-f994e402fb9d /mnt/paas/kubernetes/kubelet/pods/0b13ff68-4c8e-4a1c-b15c-724fd4d64389/volumes/kubernetes.io~csi/pvc-82fe2cbe-3838-43a2-8afb-f994e402fb9d/mount -o url=https://{{endpoint}}:443 -o endpoint=xxxxxx -o passwd_file=/opt/everest-host-connector/1622707954357702943_obstmpcred/pvc-82fe2cbe-3838-43a2-8afb-f994e402fb9d -o nonempty -o big_writes -o enable_noobj_cache -o sigv2 -o allow_other -o no_check_certificate -o ssl_verify_hostname=0 -o max_write=131072 -o multipart_size=20 -o umask=0
    -
  • Parallel file system: ps -ef | grep obsfs
    root      1355     1  0 Jun03 ?        00:03:16 /usr/bin/obsfs pvc-86720bb9-5aa8-4cde-9231-5253994f8468 /mnt/paas/kubernetes/kubelet/pods/c959a91d-eced-4b41-91c6-96cbd65324f9/volumes/kubernetes.io~csi/pvc-86720bb9-5aa8-4cde-9231-5253994f8468/mount -o url=https://{{endpoint}}:443 -o endpoint=xxxxxx -o passwd_file=/opt/everest-host-connector/1622714415305160399_obstmpcred/pvc-86720bb9-5aa8-4cde-9231-5253994f8468 -o allow_other -o nonempty -o big_writes -o use_ino -o no_check_certificate -o ssl_verify_hostname=0 -o umask=0027 -o max_write=131072 -o max_background=100 -o uid=10000 -o gid=10000
    -
-
-

Prerequisites

  • The everest add-on version must be 1.2.8 or later.
  • The add-on identifies the mount options and transfers them to the underlying storage resources, which determine whether the specified options are valid.
-
-

Notes and Constraints

Mount options cannot be configured for secure containers.

-
-

Setting Mount Options in a PV

You can use the mountOptions field to set mount options in a PV. The options you can configure in mountOptions are listed in SFS Volume Mount Options and OBS Volume Mount Options.

-
apiVersion: v1
-kind: PersistentVolume
-metadata:
-  name: pv-obs-example
-  annotations:
-    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
-spec:
-  mountOptions:
-  - umask=0027
-  - uid=10000
-  - gid=10000
-  accessModes:
-  - ReadWriteMany
-  capacity:
-    storage: 1Gi
-  claimRef:
-    apiVersion: v1
-    kind: PersistentVolumeClaim
-    name: pvc-obs-example
-    namespace: default
-  csi:
-    driver: obs.csi.everest.io
-    fsType: obsfs
-    volumeAttributes:
-      everest.io/obs-volume-type: STANDARD
-      everest.io/region: eu-de
-      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
-    volumeHandle: obs-normal-static-pv
-  persistentVolumeReclaimPolicy: Delete
-  storageClassName: csi-obs
-

After a PV is created, you can create a PVC and bind it to the PV, and then mount the PV to the container in the workload.

-
-

Setting Mount Options in a StorageClass

You can use the mountOptions field to set mount options in a StorageClass. The options you can configure in mountOptions are listed in SFS Volume Mount Options and OBS Volume Mount Options.

-
apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
-  name: csi-obs-mount-option
-mountOptions:
-- umask=0027
-- uid=10000
-- gid=10000
-parameters:
-  csi.storage.k8s.io/csi-driver-name: obs.csi.everest.io
-  csi.storage.k8s.io/fstype: s3fs
-  everest.io/obs-volume-type: STANDARD
-provisioner: everest-csi-provisioner
-reclaimPolicy: Delete
-volumeBindingMode: Immediate
-

After the StorageClass is configured, you can use it to create a PVC. By default, the dynamically created PVs inherit the mount options set in the StorageClass.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0338.html b/docs/cce/umn/cce_01_0338.html deleted file mode 100644 index 47f62342..00000000 --- a/docs/cce/umn/cce_01_0338.html +++ /dev/null @@ -1,45 +0,0 @@ - - -

Removing a Node

-

Scenario

Removing a node from a cluster in CCE will re-install the node OS and clear CCE components on the node.

-

Removing a node will not delete the server (ECS) corresponding to the node. You are advised to remove nodes at off-peak hours to avoid impacts on your services.

-

After a node is removed from the cluster, the node is still running and incurs fees.

-
-

Notes and Constraints

  • Nodes can be removed only when the cluster is in the Available or Unavailable state.
  • A CCE node can be removed only when it is in the Active, Abnormal, or Error state.
  • A CCE node in the Active state can have its OS re-installed and CCE components cleared after it is removed.
  • If the OS fails to be re-installed after the node is removed, manually re-install the OS. After the re-installation, log in to the node and run the clearance script to clear CCE components. For details, see Handling Failed OS Reinstallation.
-
-

Precautions

  • Removing a node will lead to pod migration, which may affect services. Perform this operation during off-peak hours.
  • Unexpected risks may occur during the operation. Back up data in advance.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
  • After you remove the node and re-install the OS, the original LVM partitions will be cleared and the data managed by LVM will be cleared. Therefore, back up data in advance.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes. In the same row as the target node, choose More > Remove.
  2. In the dialog box displayed, enter REMOVE, configure the login information required for re-installing the OS, and click Yes. Wait until the node is removed.

    After the node is removed, workload pods on the node are automatically migrated to other available nodes.

    -

-
-

Handling Failed OS Reinstallation

You can perform the following steps to re-install the OS and clear the CCE components on the node if previous attempts fail:

-
  1. Log in to the management console of the server and re-install the OS.
  2. Log in to the server and run the following commands to clear the CCE components and LVM data:

    Write the following scripts to the clean.sh file:

    -
    lsblk
    -vgs --noheadings | awk '{print $1}' | xargs vgremove -f
    -pvs --noheadings | awk '{print $1}' | xargs pvremove -f
    -lvs --noheadings | awk '{print $1}' | xargs -i lvremove -f --select {}
    -function init_data_disk() {
    -    all_devices=$(lsblk -o KNAME,TYPE | grep disk | grep -v nvme | awk '{print $1}' | awk '{ print "/dev/"$1}')
    -    for device in ${all_devices[@]}; do
    -        isRootDisk=$(lsblk -o KNAME,MOUNTPOINT $device 2>/dev/null| grep -E '[[:space:]]/$' | wc -l )
    -        if [[ ${isRootDisk} != 0 ]]; then
    -            continue
    -        fi
    -        dd if=/dev/urandom of=${device} bs=512 count=64
    -        return
    -    done
    -    exit 1
    -}
    -init_data_disk
    -lsblk
    -

    Run the following command:

    -

    bash clean.sh

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0341.html b/docs/cce/umn/cce_01_0341.html deleted file mode 100644 index 062c0931..00000000 --- a/docs/cce/umn/cce_01_0341.html +++ /dev/null @@ -1,45 +0,0 @@ - - -

Data Disk Space Allocation

-

When creating a node, you need to configure data disks for the node.

-

-

The data disk is divided into Kubernetes space and user space. The user space defines the space that is not allocated to Kubernetes in the local disk. The Kubernetes space consists of the following two parts:

-
  • Docker space (90% by default): stores Docker working directories, Docker image data, and image metadata.
  • kubelet space (10% by default): stores pod configuration files, secrets, and mounted storage such as emptyDir volumes.
-

The Docker space size affects image download and container startup and running. This section describes how the Docker space is used so that you can configure the Docker space accordingly.

-

Docker Space Description

By default, a data disk, 100 GB for example, is divided as follows (depending on the container storage Rootfs):

-
  • Rootfs (Device Mapper)
    • The /var/lib/docker directory is used as the Docker working directory and occupies 20% of the Docker space by default. (Space size of the /var/lib/docker directory = Data disk space x 90% x 20%)
    • The thin pool is used to store Docker image data, image metadata, and container data, and occupies 80% of the Docker space by default. (Thin pool space = Data disk space x 90% x 80%)

      The thin pool is dynamically mounted. You can view it by running the lsblk command on a node, but not the df -h command.

      -
    -

    -
-
  • Rootfs (OverlayFS): No separate thinpool. The entire Docker space is in the /var/lib/docker directory.

    -
-

Using rootfs for container storage in CCE

-
  • CCE cluster: EulerOS 2.9 nodes use OverlayFS, and EulerOS 2.5 nodes use Device Mapper. CentOS 7.6 nodes in clusters earlier than v1.21 use Device Mapper, and use OverlayFS in clusters of v.1.21 and later.
-

You can log in to the node and run the docker info command to view the storage engine type.

-
# docker info
-Containers: 20
- Running: 17
- Paused: 0
- Stopped: 3
-Images: 16
-Server Version: 18.09.0
-Storage Driver: devicemapper
-
-

Docker Space and Containers

The number of pods and the space configured for each container determine whether the Docker space of a node is sufficient.

-

-

The Docker space should be greater than the total disk space used by containers. Formula: Docker space > Number of containers x Available data space for a single container (basesize)

-

When device mapper is used, although you can limit the size of the /home directory of a single container (to 10 GB by default), all containers on the node still share the thin pool of the node for storage. They are not completely isolated. When the sum of the thin pool space used by certain containers reaches the upper limit, other containers cannot run properly.

-

In addition, after a file is deleted in the /home directory of the container, the thin pool space occupied by the file is not released immediately. Therefore, even if basesize is set to 10 GB, the thin pool space occupied by files keeps increasing until 10 GB when files are created in the container. The space released after file deletion will be reused but after a while. If the number of containers on the node multiplied by basesize is greater than the thin pool space size of the node, there is a possibility that the thin pool space has been used up.

-
-

Garbage Collection Policies for Container Images

When the Docker space is insufficient, image garbage collection is triggered.

-

The policy for garbage collecting images takes two factors into consideration: HighThresholdPercent and LowThresholdPercent. Disk usage above the high threshold (default: 85%) will trigger garbage collection. The garbage collection will delete least recently used images until the low threshold (default: 80%) has been met.

-
-

Docker Space Configuration Suggestions

  • The Docker space should be greater than the total disk space used by containers. Formula: Docker space > Number of containers x Available data space for a single container (basesize)
  • You are advised to create and delete files of containerized services in local storage volumes (such as emptyDir and hostPath volumes) or cloud storage directories mounted to the containers. In this way, the thin pool space is not occupied. emptyDir volumes occupy the kubelet space. Therefore, properly plan the size of the kubelet space.
  • Docker uses the OverlayFS storage mode. This mode is used in Ubuntu 18.04 nodes in CCE clusters by default. You can deploy services on these nodes to prevent that the disk space occupied by files created or deleted in containers is not released immediately.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0342.html b/docs/cce/umn/cce_01_0342.html deleted file mode 100644 index 5f86ea80..00000000 --- a/docs/cce/umn/cce_01_0342.html +++ /dev/null @@ -1,79 +0,0 @@ - - -

CCE Turbo Clusters and CCE Clusters

-

Comparison Between CCE Turbo Clusters and CCE Clusters

The following table lists the differences between CCE Turbo clusters and CCE clusters:

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Cluster types

Dimensions

-

Sub-dimension

-

CCE Turbo Cluster

-

CCE Cluster

-

Cluster

-

Positioning

-

Next-generation container cluster for Cloud Native 2.0 with accelerated computing, networking, and scheduling

-

Standard cluster for common commercial use

-

Node type

-

Hybrid deployment of VMs and bare-metal servers

-

Hybrid deployment of VMs

-

Network

-

Network model

-

Cloud Native Network 2.0: applies to large-scale and high-performance scenarios.

-

Networking scale: 2000 nodes

-

Cloud-native network 1.0 for scenarios that do not require high performance or involve large-scale deployment.

-
  • Tunnel network model
  • VPC network model
-

Network performance

-

The VPC network and container network are flattened into one, achieving zero performance loss.

-

The VPC network is overlaid with the container network, causing certain performance loss.

-

Container network isolation

-

Pods can be directly associated with security groups to configure isolation policies for resources inside and outside a cluster.

-
  • Tunnel network model: Network isolation policies are supported for intra-cluster communication (by configuring network policies).
  • VPC network model: Isolation is not supported.
-

Security

-

Isolation

-
  • Bare-metal server: You can select secure containers for VM-level isolation.
  • VM: Common containers are deployed.
-

Common containers are deployed and isolated by Cgroups.

-
-
-
-

QingTian Architecture

-

The QingTian architecture consists of data plane (software-hardware synergy) and management plane (Alkaid Smart Cloud Brain). The data plane innovates in five dimensions: simplified data center, diversified computing power, QingTian cards, ultra-fast engines, and simplified virtualization, to fully offload and accelerate compute, storage, networking, and security components. VMs, bare metal servers, and containers can run together. As a distributed operating system, the Alkaid Smart Cloud Brain focuses on the cloud, AI, and 5G, and provide all-domain scheduling to achieve cloud-edge-device collaboration and governance.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0343.html b/docs/cce/umn/cce_01_0343.html deleted file mode 100644 index c2ebe74b..00000000 --- a/docs/cce/umn/cce_01_0343.html +++ /dev/null @@ -1,645 +0,0 @@ - - -

How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?

-

In clusters later than v1.15.11-r1, CSI (the everest add-on) has taken over all functions of fuxi FlexVolume (the storage-driver add-on) for managing container storage. You are advised to use CSI Everest.

-

To migrate your storage volumes, create a static PV to associate with the original underlying storage, and then create a PVC to associate with this static PV. When you upgrade your application, mount the new PVC to the original mounting path to migrate the storage volumes.

-

Services will be interrupted during the migration. Therefore, properly plan the migration and back up data.

-
-

Procedure

  1. (Optional) Back up data to prevent data loss in case of exceptions.
  2. Configure a YAML file of the PV in the CSI format according to the PV in the FlexVolume format and associate the PV with the existing storage.

    To be specific, run the following commands to configure the pv-example.yaml file, which is used to create a PV.

    -

    touch pv-example.yaml

    -

    vi pv-example.yaml

    -
    Configuration example of a PV for an EVS volume:
    apiVersion: v1
    -kind: PersistentVolume
    -metadata:
    -  labels:
    -    failure-domain.beta.kubernetes.io/region: eu-de
    -    failure-domain.beta.kubernetes.io/zone: <zone name>
    -  annotations:
    -    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
    -  name: pv-evs-example
    -spec:
    -  accessModes:
    -  - ReadWriteOnce
    -  capacity:
    -    storage: 10Gi
    -  csi:
    -    driver: disk.csi.everest.io
    -    fsType: ext4
    -    volumeAttributes:
    -      everest.io/disk-mode: SCSI
    -      everest.io/disk-volume-type: SAS
    -      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
    -    volumeHandle: 0992dbda-6340-470e-a74e-4f0db288ed82
    -  persistentVolumeReclaimPolicy: Delete
    -  storageClassName: csi-disk
    -
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 EVS volume configuration parameters

    Parameter

    -

    Description

    -

    failure-domain.beta.kubernetes.io/region

    -

    Region where the EVS disk is located. Use the same value as that of the FlexVolume PV.

    -

    failure-domain.beta.kubernetes.io/zone

    -

    AZ where the EVS disk is located. Use the same value as that of the FlexVolume PV.

    -

    name

    -

    Name of the PV, which must be unique in the cluster.

    -

    storage

    -

    EVS volume capacity in the unit of Gi. Use the value of spec.capacity.storage of the FlexVolume PV.

    -

    driver

    -

    Storage driver used to attach the volume. Set the driver to disk.csi.everest.io for the EVS volume.

    -

    volumeHandle

    -

    Volume ID of the EVS disk. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

    -

    everest.io/disk-mode

    -

    EVS disk mode. Use the value of spec.flexVolume.options.disk-mode of the FlexVolume PV.

    -

    everest.io/disk-volume-type

    -

    EVS disk type. Use the value of kubernetes.io/volumetype in the storage class corresponding to spec.storageClassName of the FlexVolume PV.

    -

    storageClassName

    -

    Name of the Kubernetes storage class associated with the storage volume. Set this field to csi-disk for EVS disks.

    -
    -
    -

    Configuration example of a PV for an SFS volume:

    -
    apiVersion: v1
    -kind: PersistentVolume
    -metadata:
    -  name: pv-sfs-example
    -  annotations:
    -    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  capacity:
    -    storage: 10Gi
    -  csi:
    -    driver: nas.csi.everest.io
    -    fsType: nfs
    -    volumeAttributes:
    -      everest.io/share-export-location:  # Shared path of the file storage
    -      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
    -    volumeHandle: 682f00bb-ace0-41d8-9b3e-913c9aa6b695
    -  persistentVolumeReclaimPolicy: Delete
    -  storageClassName: csi-nas
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 SFS volume configuration parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the PV, which must be unique in the cluster.

    -

    storage

    -

    File storage size in the unit of Gi. Use the value of spec.capacity.storage of the FlexVolume PV.

    -

    driver

    -

    Storage driver used to attach the volume. Set the driver to nas.csi.everest.io for the file system.

    -

    everest.io/share-export-location

    -

    Shared path of the file system. Use the value of spec.flexVolume.options.deviceMountPath of the FlexVolume PV.

    -

    volumeHandle

    -

    File system ID. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

    -

    storageClassName

    -

    Name of the Kubernetes storage class. Set this field to csi-nas.

    -
    -
    -

    Configuration example of a PV for an OBS volume:

    -
    apiVersion: v1
    -kind: PersistentVolume
    -metadata:
    -  name: pv-obs-example
    -  annotations:
    -    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  capacity:
    -    storage: 1Gi
    -  csi:
    -    driver: obs.csi.everest.io
    -    fsType: s3fs
    -    volumeAttributes:
    -      everest.io/obs-volume-type: STANDARD
    -      everest.io/region: eu-de
    -      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
    -    volumeHandle: obs-normal-static-pv
    -  persistentVolumeReclaimPolicy: Delete
    -  storageClassName: csi-obs
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 3 OBS volume configuration parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the PV, which must be unique in the cluster.

    -

    storage

    -

    Storage capacity, in the unit of Gi. Set this parameter to the fixed value 1Gi.

    -

    driver

    -

    Storage driver used to attach the volume. Set the driver to obs.csi.everest.io for the OBS volume.

    -

    fsType

    -

    File type. Value options are obsfs or s3fs. If the value is s3fs, an OBS bucket is created and mounted using s3fs. If the value is obsfs, an OBS parallel file system is created and mounted using obsfs. Set this parameter according to the value of spec.flexVolume.options.posix of the FlexVolume PV. If the value of spec.flexVolume.options.posix is true, set this parameter to obsfs. If the value is false, set this parameter to s3fs.

    -

    everest.io/obs-volume-type

    -

    Storage class, including STANDARD (standard bucket) and WARM (infrequent access bucket). Set this parameter according to the value of spec.flexVolume.options.storage_class of the FlexVolume PV. If the value of spec.flexVolume.options.storage_class is standard, set this parameter to STANDARD. If the value is standard_ia, set this parameter to WARM.

    -

    everest.io/region

    -

    Region where the OBS bucket is located. Use the value of spec.flexVolume.options.region of the FlexVolume PV.

    -

    volumeHandle

    -

    OBS bucket name. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

    -

    storageClassName

    -

    Name of the Kubernetes storage class. Set this field to csi-obs.

    -
    -
    -

    Configuration example of a PV for an SFS Turbo volume:

    -
    apiVersion: v1
    -kind: PersistentVolume
    -metadata:
    -  name: pv-efs-example
    -  annotations:
    -    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  capacity:
    -    storage: 10Gi
    -  csi:
    -    driver: sfsturbo.csi.everest.io
    -    fsType: nfs
    -    volumeAttributes:
    -      everest.io/share-export-location: 192.168.0.169:/
    -      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
    -    volumeHandle: 8962a2a2-a583-4b7f-bb74-fe76712d8414
    -  persistentVolumeReclaimPolicy: Delete
    -  storageClassName: csi-sfsturbo
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 4 SFS Turbo volume configuration parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the PV, which must be unique in the cluster.

    -

    storage

    -

    File system size. Use the value of spec.capacity.storage of the FlexVolume PV.

    -

    driver

    -

    Storage driver used to attach the volume. Set it to sfsturbo.csi.everest.io.

    -

    everest.io/share-export-location

    -

    Shared path of the SFS Turbo volume. Use the value of spec.flexVolume.options.deviceMountPath of the FlexVolume PV.

    -

    volumeHandle

    -

    SFS Turbo volume ID. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

    -

    storageClassName

    -

    Name of the Kubernetes storage class. Set this field to csi-sfsturbo for SFS Turbo volumes.

    -
    -
    -

  3. Configure a YAML file of the PVC in the CSI format according to the PVC in the FlexVolume format and associate the PVC with the PV created in 2.

    To be specific, run the following commands to configure the pvc-example.yaml file, which is used to create a PVC.

    -

    touch pvc-example.yaml

    -

    vi pvc-example.yaml

    -

    Configuration example of a PVC for an EVS volume:

    -
    apiVersion: v1  
    -kind: PersistentVolumeClaim
    -metadata:
    -  labels:
    -    failure-domain.beta.kubernetes.io/region: eu-de
    -    failure-domain.beta.kubernetes.io/zone: <zone name>
    -  annotations:
    -    everest.io/disk-volume-type: SAS
    -    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
    -  name: pvc-evs-example
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteOnce
    -  resources:
    -    requests:
    -      storage: 10Gi
    -  volumeName:  pv-evs-example
    -  storageClassName: csi-disk
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 5 PVC configuration parameters for an EVS volume

    Parameter

    -

    Description

    -

    failure-domain.beta.kubernetes.io/region

    -

    Region where the cluster is located. Use the same value as that of the FlexVolume PVC.

    -

    failure-domain.beta.kubernetes.io/zone

    -

    AZ where the EVS disk is deployed. Use the same value as that of the FlexVolume PVC.

    -

    everest.io/disk-volume-type

    -

    Storage class of the EVS disk. The value can be SAS or SSD. Set this parameter to the same value as that of the PV created in 2.

    -

    name

    -

    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

    -

    namespace

    -

    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

    -

    storage

    -

    Requested capacity of the PVC, which must be the same as the storage size of the existing PV.

    -

    volumeName

    -

    Name of the PV. Set this parameter to the name of the static PV in 2.

    -

    storageClassName

    -

    Name of the Kubernetes storage class. Set this field to csi-disk for EVS disks.

    -
    -
    -

    Configuration example of a PVC for an SFS volume:

    -
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  annotations:
    -    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
    -  name: pvc-sfs-example
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  resources:
    -    requests:
    -      storage: 10Gi
    -  storageClassName: csi-nas
    -  volumeName: pv-sfs-example
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - -
    Table 6 PVC configuration parameters for an SFS volume

    Parameter

    -

    Description

    -

    name

    -

    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

    -

    namespace

    -

    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

    -

    storage

    -

    Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

    -

    storageClassName

    -

    Set this field to csi-nas.

    -

    volumeName

    -

    Name of the PV. Set this parameter to the name of the static PV in 2.

    -
    -
    -

    Configuration example of a PVC for an OBS volume:

    -
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  annotations:
    -    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
    -    everest.io/obs-volume-type: STANDARD
    -    csi.storage.k8s.io/fstype: s3fs
    -  name: pvc-obs-example
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  resources:
    -    requests:
    -      storage: 1Gi
    -  storageClassName: csi-obs
    -  volumeName: pv-obs-example
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 7 PVC configuration parameters for an OBS volume

    Parameter

    -

    Description

    -

    everest.io/obs-volume-type

    -

    OBS volume type, which can be STANDARD (standard bucket) and WARM (infrequent access bucket). Set this parameter to the same value as that of the PV created in 2.

    -

    csi.storage.k8s.io/fstype

    -

    File type, which can be obsfs or s3fs. The value must be the same as that of fsType of the static OBS volume PV.

    -

    name

    -

    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

    -

    namespace

    -

    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

    -

    storage

    -

    Storage capacity, in the unit of Gi. Set this parameter to the fixed value 1Gi.

    -

    storageClassName

    -

    Name of the Kubernetes storage class. Set this field to csi-obs.

    -

    volumeName

    -

    Name of the PV. Set this parameter to the name of the static PV created in 2.

    -
    -
    -

    Configuration example of a PVC for an SFS Turbo volume:

    -
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  annotations:
    -    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
    -  name: pvc-efs-example
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  resources:
    -    requests:
    -      storage: 10Gi
    -  storageClassName: csi-sfsturbo
    -  volumeName: pv-efs-example
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - -
    Table 8 PVC configuration parameters for an SFS Turbo volume

    Parameter

    -

    Description

    -

    name

    -

    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

    -

    namespace

    -

    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

    -

    storageClassName

    -

    Name of the Kubernetes storage class. Set this field to csi-sfsturbo.

    -

    storage

    -

    Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

    -

    volumeName

    -

    Name of the PV. Set this parameter to the name of the static PV created in 2.

    -
    -
    -

  4. Upgrade the workload to use a new PVC.

    For Deployments
    1. Run the kubectl create -f commands to create a PV and PVC.

      kubectl create -f pv-example.yaml

      -

      kubectl create -f pvc-example.yaml

      -

      Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 2 and 3.

      -
      -
    2. Go to the CCE console. On the workload upgrade page, click Upgrade > Advanced Settings > Data Storage > Cloud Storage.

      -
    3. Uninstall the old storage and add the PVC in the CSI format. Retain the original mounting path in the container.
    4. Click Submit.
    5. Wait until the pods are running.
    -
    -

    For StatefulSets that use existing storage

    -
    1. Run the kubectl create -f commands to create a PV and PVC.

      kubectl create -f pv-example.yaml

      -

      kubectl create -f pvc-example.yaml

      -

      Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 2 and 3.

      -
      -
    2. Run the kubectl edit command to edit the StatefulSet and use the newly created PVC.

      kubectl edit sts sts-example -n xxx

      -

      -

      Replace sts-example in the preceding command with the actual name of the StatefulSet to upgrade. xxx indicates the namespace to which the StatefulSet belongs.

      -
      -
    3. Wait until the pods are running.
    -

    The current console does not support the operation of adding new cloud storage for StatefulSets. Use the kubectl commands to replace the storage with the newly created PVC.

    -
    -

    For StatefulSets that use dynamically allocated storage

    -
    1. Back up the PV and PVC in the flexVolume format used by the StatefulSet.

      kubectl get pvc xxx -n {namespaces} -oyaml > pvc-backup.yaml

      -

      kubectl get pv xxx -n {namespaces} -oyaml > pv-backup.yaml

      -
    2. Change the number of pods to 0.
    3. On the storage page, disassociate the flexVolume PVC used by the StatefulSet.
    4. Run the kubectl create -f commands to create a PV and PVC.

      kubectl create -f pv-example.yaml

      -

      kubectl create -f pvc-example.yaml

      -

      Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 2 and 3.

      -
      -
    5. Change the number of pods back to the original value and wait until the pods are running.
    -

    The dynamic allocation of storage for StatefulSets is achieved by using volumeClaimTemplates. This field cannot be modified by Kubernetes. Therefore, data cannot be migrated by using a new PVC.

    -

    The PVC naming rule of the volumeClaimTemplates is fixed. When a PVC that meets the naming rule exists, this PVC is used.

    -

    Therefore, disassociate the original PVC first, and then create a PVC with the same name in the CSI format.

    -
    -

    6. (Optional) Recreate the stateful application to ensure that a CSI PVC is used when the application is scaled out. Otherwise, FlexVolume PVCs are used in scaling out.

    -
    • Run the following command to obtain the YAML file of the StatefulSet:
    -

    kubectl get sts xxx -n {namespaces} -oyaml > sts.yaml

    -
    • Run the following command to back up the YAML file of the StatefulSet:
    -

    cp sts.yaml sts-backup.yaml

    -
    • Modify the definition of volumeClaimTemplates in the YAML file of the StatefulSet.
    -

    vi sts.yaml

    -

    Configuration example of volumeClaimTemplates for an EVS volume:

    -
      volumeClaimTemplates:
    -    - metadata:
    -        name: pvc-161070049798261342
    -        namespace: default
    -        creationTimestamp: null
    -        annotations:
    -          everest.io/disk-volume-type: SAS
    -      spec:
    -        accessModes:
    -          - ReadWriteOnce
    -        resources:
    -          requests:
    -            storage: 10Gi
    -        storageClassName: csi-disk
    -

    The parameter value must be the same as the PVC of the EVS volume created in 3.

    -

    Configuration example of volumeClaimTemplates for an SFS volume:

    -
      volumeClaimTemplates:
    -    - metadata:
    -        name: pvc-161063441560279697
    -        namespace: default
    -        creationTimestamp: null
    -      spec:
    -        accessModes:
    -          - ReadWriteMany
    -        resources:
    -          requests:
    -            storage: 10Gi
    -        storageClassName: csi-nas
    -

    The parameter value must be the same as the PVC of the SFS volume created in 3.

    -

    Configuration example of volumeClaimTemplates for an OBS volume:

    -
      volumeClaimTemplates:
    -    - metadata:
    -        name: pvc-161070100417416148
    -        namespace: default
    -        creationTimestamp: null
    -        annotations:
    -          csi.storage.k8s.io/fstype: s3fs
    -          everest.io/obs-volume-type: STANDARD
    -      spec:
    -        accessModes:
    -          - ReadWriteMany
    -        resources:
    -          requests:
    -            storage: 1Gi
    -        storageClassName: csi-obs
    -

    The parameter value must be the same as the PVC of the OBS volume created in 3.

    -
    • Delete the StatefulSet.
    -

    kubectl delete sts xxx -n {namespaces}

    -
    • Create the StatefulSet.
    -

    kubectl create -f sts.yaml

    -

  5. Check service functions.

    1. Check whether the application is running properly.
    2. Checking whether the data storage is normal.
    -

    If a rollback is required, perform 4. Select the PVC in FlexVolume format and upgrade the application.

    -
    -

  6. Uninstall the PVC in the FlexVolume format.

    If the application functions normally, unbind the PVC in the FlexVolume format on the storage management page.

    -

    You can also run the kubectl command to delete the PVC and PV of the FlexVolume format.

    -

    Before deleting a PV, change the persistentVolumeReclaimPolicy of the PV to Retain. Otherwise, the underlying storage will be reclaimed after the PV is deleted.

    -

    If the cluster has been upgraded before the storage migration, PVs may fail to be deleted. You can remove the PV protection field finalizers to delete PVs.

    -

    kubectl patch pv {pv_name} -p '{"metadata":{"finalizers":null}}'

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0344.html b/docs/cce/umn/cce_01_0344.html deleted file mode 100644 index 2866f65c..00000000 --- a/docs/cce/umn/cce_01_0344.html +++ /dev/null @@ -1,88 +0,0 @@ - - -

Adding a Second Data Disk to a Node in a CCE Cluster

-

You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).

-
  • When creating a node in a cluster of v1.13.10 or later, if a data disk is not managed by LVM, follow instructions in this section to format the data disk before adding the disk. Otherwise, the data disk will still be managed by LVM.
  • When creating a node in a cluster earlier than v1.13.10, you must format the data disks that are not managed by LVM. Otherwise, either these data disks or the first data disk will be managed by LVM.
-
-

Before using this feature, write a script that can format data disks and save it to your OBS bucket. This script must be executed by user root.

-

Input Parameters

-
  1. Set the script name to formatdisk.sh, save the script to your OBS bucket, and obtain the address of the script in OBS.
  2. You need to specify the size of the Docker data disk (the data disk managed by LVM is called the Docker data disk). The size of the Docker disk must be different from that of the second disk. For example, the Docker data disk is 100 GB and the new disk is 110 GB.
  3. Set the mount path of the second data disk, for example, /data/code.
-

Run the following command in the pre-installation script to format the disk:

-
cd /tmp;curl -k -X GET OBS bucket address /formatdisk.sh -1 -O;fdisk -l;sleep 30;bash -x formatdisk.sh 100 /data/code;fdisk -l
-

Example script (formatdisk.sh):

-
dockerdisksize=$1
-mountdir=$2
-systemdisksize=40
-i=0
-while [ 20 -gt $i ]; do 
-    echo $i; 
-    if [ $(lsblk -o KNAME,TYPE | grep disk | grep -v nvme | awk '{print $1}' | awk '{ print "/dev/"$1}' |wc -l) -ge 3 ]; then 
-        break 
-    else 
-        sleep 5 
-    fi; 
-    i=$[i+1] 
-done 
-all_devices=$(lsblk -o KNAME,TYPE | grep disk | grep -v nvme | awk '{print $1}' | awk '{ print "/dev/"$1}')
-for device in ${all_devices[@]}; do
-    isRawDisk=$(lsblk -n $device 2>/dev/null | grep disk | wc -l)
-    if [[ ${isRawDisk} > 0 ]]; then
-        # is it partitioned ?
-        match=$(lsblk -n $device 2>/dev/null | grep -v disk | wc -l)
-        if [[ ${match} > 0 ]]; then
-            # already partited
-            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Raw disk ${device} has been partition, will skip this device"
-            continue
-        fi
-    else
-        isPart=$(lsblk -n $device 2>/dev/null | grep part | wc -l)
-        if [[ ${isPart} -ne 1 ]]; then
-            # not parted
-            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Disk ${device} has not been partition, will skip this device"
-            continue
-        fi
-        # is used ?
-        match=$(lsblk -n $device 2>/dev/null | grep -v part | wc -l)
-        if [[ ${match} > 0 ]]; then
-            # already used
-            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Disk ${device} has been used, will skip this device"
-            continue
-        fi
-        isMount=$(lsblk -n -o MOUNTPOINT $device 2>/dev/null)
-        if [[ -n ${isMount} ]]; then
-            # already used
-            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Disk ${device} has been used, will skip this device"
-            continue
-        fi
-        isLvm=$(sfdisk -lqL 2>>/dev/null | grep $device | grep "8e.*Linux LVM")
-        if [[ ! -n ${isLvm} ]]; then
-            # part system type is not Linux LVM
-            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Disk ${device} system type is not Linux LVM, will skip this device"
-            continue
-        fi
-    fi
-    block_devices_size=$(lsblk -n -o SIZE $device 2>/dev/null | awk '{ print $1}')
-    if [[ ${block_devices_size}"x" != "${dockerdisksize}Gx" ]] && [[ ${block_devices_size}"x" != "${systemdisksize}Gx" ]]; then
-echo "n
-p
-1
-
-
-w
-" | fdisk $device
-        mkfs -t ext4 ${device}1
-        mkdir -p $mountdir
-	uuid=$(blkid ${device}1 |awk '{print $2}')
-	echo "${uuid}  $mountdir ext4  noatime  0 0" | tee -a /etc/fstab >/dev/null
-        mount $mountdir
-    fi
-done
-

If the preceding example cannot be executed, use the dos2unix tool to convert the format.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0347.html b/docs/cce/umn/cce_01_0347.html deleted file mode 100644 index 8e37b697..00000000 --- a/docs/cce/umn/cce_01_0347.html +++ /dev/null @@ -1,19 +0,0 @@ - - - -

Cluster Parameters

- -

-
- -
- - - -
- diff --git a/docs/cce/umn/cce_01_0348.html b/docs/cce/umn/cce_01_0348.html deleted file mode 100644 index f8f25cb3..00000000 --- a/docs/cce/umn/cce_01_0348.html +++ /dev/null @@ -1,27 +0,0 @@ - - -

Maximum Number of Pods That Can Be Created on a Node

-

The maximum number of pods that can be created on a node is determined by the following parameters:

-
  • Number of container IP addresses that can be allocated on a node (alpha.cce/fixPoolMask): Set this parameter when creating a CCE cluster. This parameter is available only when Network Model is VPC network.
  • Maximum number of pods of a node (maxPods): Set this parameter when creating a node. It is a configuration item of kubelet.
  • Number of ENIs of a CCE Turbo cluster node: In a CCE Turbo cluster, ECS nodes use sub-ENIs and BMS nodes use ENIs. The maximum number of pods that can be created on a node depends on the number of ENIs that can be used by the node.
-

The maximum number of pods that can be created on a node depends on the minimum value of these parameters.

- -

Container Network vs. Host Network

When creating a pod, you can select the container network or host network for the pod.

-
  • Container network (default): Each pod is assigned an IP address by the cluster networking add-ons, which occupies the IP addresses of the container network.
  • Host network: The pod uses the host network (hostNetwork: true needs to be configured for the pod) and occupies the host port. The pod IP address is the host IP address. The pod does not occupy the IP addresses of the container network. To use the host network, you must confirm whether the container ports conflict with the host ports. Do not use the host network unless you know exactly which host port is used by which container.
-
-

Number of Container IP Addresses That Can Be Allocated on a Node

If you select VPC network for Network Model when creating a CCE cluster, you also need to set the number of container IP addresses that can be allocated to each node, as shown in the following figure.

-

This parameter affects the maximum number of pods that can be created on a node. Each pod occupies an IP address (when the container network is used). If the number of available IP addresses is insufficient, pods cannot be created.

-

-

By default, a node occupies three container IP addresses (network address, gateway address, and broadcast address). Therefore, the number of container IP addresses that can be allocated to a node equals the number of selected container IP addresses minus 3. For example, in the preceding figure, the number of container IP addresses that can be allocated to a node is 125 (128 – 3).

-
-

Maximum Number of Pods on a Node

When creating a node, you can configure the maximum number of pods that can be created on the node. This parameter is a configuration item of kubelet and determines the maximum number of pods that can be created by kubelet.

-

-
-

Number of NICs on a CCE Turbo Cluster Node

For details about the number of NICs on a CCE Turbo cluster node, see Cloud Native Network 2.0.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0352.html b/docs/cce/umn/cce_01_0352.html deleted file mode 100644 index 87d1c14f..00000000 --- a/docs/cce/umn/cce_01_0352.html +++ /dev/null @@ -1,82 +0,0 @@ - - -

Configuring Node Scheduling (Tainting)

-

Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.

-

Taints

A taint is a key-value pair associated with an effect. The following effects are available:

-
  • NoSchedule: No pod will be able to schedule onto the node unless it has a matching toleration. Existing pods will not be evicted from the node.
  • PreferNoSchedule: Kubernetes prevents pods that cannot tolerate this taint from being scheduled onto the node.
  • NoExecute: If the pod has been running on a node, the pod will be evicted from the node. If the pod has not been running on a node, the pod will not be scheduled onto the node.
-

To add a taint to a node, run the kubectl taint node nodename command as follows:

-
$ kubectl get node
-NAME             STATUS   ROLES    AGE    VERSION
-192.168.10.170   Ready    <none>   73d    v1.19.8-r1-CCE21.4.1.B003
-192.168.10.240   Ready    <none>   4h8m   v1.19.8-r1-CCE21.6.1.2.B001
-$ kubectl taint node 192.168.10.240 key1=value1:NoSchedule
-node/192.168.10.240 tainted
-

To view the taint configuration, run the describe and get commands as follows:

-
$ kubectl describe node 192.168.10.240
-Name:               192.168.10.240
-...
-Taints:             key1=value1:NoSchedule
-...
-$ kubectl get node 192.168.10.240 -oyaml
-apiVersion: v1
-...
-spec:
-  providerID: 06a5ea3a-0482-11ec-8e1a-0255ac101dc2
-  taints:
-  - effect: NoSchedule
-    key: key1
-    value: value1
-...
-

To remove a taint, run the following command with a hyphen (-) added after NoSchedule:

-
$ kubectl taint node 192.168.10.240 key1=value1:NoSchedule-
-node/192.168.10.240 untainted
-$ kubectl describe node 192.168.10.240
-Name:               192.168.10.240
-...
-Taints:             <none>
-...
-

To configure scheduling settings, log in to the CCE console, choose Resource Management > Nodes in the navigation pane, and choose More > Scheduling settings in the Operation column of a node in the node list.

-

-

In the dialog box that is displayed, click OK to set the node to be unschedulable.

-

-

This operation will add a taint to the node. You can use kubectl to view the content of the taint.

-
$ kubectl describe node 192.168.10.240
-...
-Taints:             node.kubernetes.io/unschedulable:NoSchedule
-...
-

On the CCE console, perform the same operations again to remove the taint and set the node to be schedulable.

-

-
-

Tolerations

Tolerations are applied to pods, and allow (but do not require) the pods to schedule onto nodes with matching taints.

-

Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. One or more taints are applied to a node. This marks that the node should not accept any pods that do not tolerate the taints.

-

Here's an example of a pod that uses tolerations:

-
apiVersion: v1
-kind: Pod
-metadata:
-  name: nginx
-  labels:
-    env: test
-spec:
-  containers:
-  - name: nginx
-    image: nginx
-    imagePullPolicy: IfNotPresent
-  tolerations:
-  - key: "key1"
-    operator: "Equal"
-    value: "value1"
-    effect: "NoSchedule"  
-

In the preceding example, the toleration label of the pod is key1=value1 and the taint effect is NoSchedule. Therefore, the pod can be scheduled onto the corresponding node.

-

You can also configure tolerations similar to the following information, which indicates that the pod can be scheduled onto a node when the node has the taint key1:

-
tolerations:
-- key: "key1"
-  operator: "Exists"
-  effect: "NoSchedule"
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0363.html b/docs/cce/umn/cce_01_0363.html deleted file mode 100644 index 5ceb035a..00000000 --- a/docs/cce/umn/cce_01_0363.html +++ /dev/null @@ -1,169 +0,0 @@ - - -

Creating a Node in a CCE Turbo Cluster

-

Prerequisites

  • At least one CCE Turbo cluster is available. For details on how to create a cluster, see Creating a CCE Turbo Cluster.
  • A key pair has been created for identity authentication upon remote node login.
-
-

Notes and Constraints

  • During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
  • Nodes in a CCE Turbo cluster must be the models developed on the QingTian architecture that features software-hardware synergy.
  • CCE Turbo clusters are available only in certain regions.
-
-

Procedure for Creating a Node

After a CCE Turbo cluster is created, you can create nodes for the cluster.

-
  1. Click Create Node in the card view of the created CCE Turbo cluster. In the Node Configuration step, set node parameters by referring to the following tables.

    Computing configurations:

    -
    You can configure the specifications and OS of a cloud server, on which your containerized applications run. -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Configuration parameters

    Parameter

    -

    Description

    -

    AZ

    -

    AZ where the node is located. Nodes in a cluster can be created in different AZs for higher reliability. The value cannot be changed after creation.

    -

    You are advised to select Random to deploy your node in a random AZ based on the selected node flavor.

    -

    An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network. To enhance workload availability, create nodes in different AZs.

    -

    Container runtime

    -

    Container runtime used on the node. Different container runtimes support different node specifications and cannot be changed after the node is created.

    -
    • runc: The runC runtime is used. By default, Docker is selected as the container engine when you create a container on the console.
    • kata: The Kata runtime is used. If you select this type for both nodes and workloads, the workloads run only on the nodes that use the Kata runtime. containerd is used by default.
    -

    For details about common containers and secure containers, see Secure Containers and Common Containers.

    -

    Specifications

    -

    Select node specifications that best fit your business needs.

    -

    Nodes in a CCE Turbo cluster must be the models developed on the QingTian architecture that features software-hardware synergy.

    -

    OS

    -

    Public image: Select an OS for the node.

    -

    Node Name

    -

    Name of the node, which must be unique. When nodes (ECSs) are created in batches, the value of this parameter is used as the name prefix for each ECS.

    -

    The system generates a default name for you, which can be modified.

    -

    A node name must start with a lowercase letter and cannot end with a hyphen (-). Only digits, lowercase letters, and hyphens (-) are allowed.

    -

    Login Mode

    -
    • Key pair: Select the key pair used to log in to the node. You can select a shared key.

      A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

      -
    -
    -
    -
    -

    Storage configuration

    -
    Configure storage resources on a node for the containers running on it. Set the disk size according to site requirements. -
    - - - - - - - - - - -
    Table 2 Configuration parameters

    Parameter

    -

    Description

    -

    System Disk

    -

    System disk used by the node OS. The value ranges from 40 GB to 1,024 GB. The default value is 50 GB.

    -

    Data Disk

    -

    Data disk used by the container runtime and kubelet on the node. The value ranges from 100 GB to 32,768 GB. The default value is 100 GB. The EVS disk types provided for the data disk are the same as those for the system disk.

    -
    CAUTION:

    If the data disk is uninstalled or damaged, the Docker service becomes abnormal and the node becomes unavailable. You are advised not to delete the data disk.

    -
    -

    Click Expand to set the following parameters:

    -
    • Custom space allocation: Select this option to define the disk space occupied by the container runtime to store the working directories, container image data, and image metadata.
    • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function.
      • Encryption is not selected by default.
      • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
      -
    -
    -
    -
    -

    Networking configuration

    -
    Configure networking resources to allow node and containerized application access. -
    - - - - - - - -
    Table 3 Configuration parameters

    Parameter

    -

    Description

    -

    Node Subnet

    -

    The node subnet selected during cluster creation is used by default. You can choose another subnet instead. The value cannot be changed after creation.

    -
    -
    -
    -

    Advanced Settings

    -
    Configure advanced node capabilities such as labels, taints, and startup command. -
    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 4 Advanced configuration parameters

    Parameter

    -

    Description

    -

    Kubernetes Label

    -

    Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 10 labels can be added.

    -

    Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

    -

    Resource Tags

    -

    You can add resource tags to classify resources.

    -

    You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use these tags to improve tagging and resource migration efficiency.

    -

    CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

    -

    Taints

    -
    This parameter is left blank by default. You can add taints to set anti-affinity for the node. A maximum of 10 taints are allowed for each node. Each taint contains the following parameters:
    • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
    • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
    • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
    -
    NOTICE:
    • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-out may fail or pods cannot be scheduled onto the added nodes.
    • After a node pool is created, you can click Edit to modify its configuration. The modification will be synchronized to all nodes in the node pool.
    -
    -
    -

    Max Pods

    -

    Maximum number of pods that can run on the node, including the default system pods.

    -

    This limit prevents the node from being overloaded of pods. For details, see Maximum Number of Pods That Can Be Created on a Node.

    -

    Pre-installation Script

    -

    Enter commands. A maximum of 1,000 characters are allowed.

    -

    The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed. The commands are run to format data disks.

    -

    Post-installation Script

    -

    Enter commands. A maximum of 1,000 characters are allowed.

    -

    The script will be executed after Kubernetes software is installed and will not affect the installation. The commands are run to modify Docker parameters.

    -

    Agency

    -

    An agency is created by the account administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources.

    -

    If no agency is available, click Create Agency on the right to create one.

    -
    -
    -
    -

  2. Click Next: Confirm to review the configurations.
  3. Click Submit.

    The node list page is displayed. If the node status is Available, the node is created successfully. It takes about 6 to 10 minutes to create a node.

    -

  4. Click Back to Node List. The node is created successfully if it changes to the Available state.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0378.html b/docs/cce/umn/cce_01_0378.html deleted file mode 100644 index 883ed839..00000000 --- a/docs/cce/umn/cce_01_0378.html +++ /dev/null @@ -1,284 +0,0 @@ - - -

PersistentVolumeClaims (PVCs)

-

A PVC describes a workload's request for storage resources. This request consumes existing PVs in the cluster. If there is no PV available, underlying storage and PVs are dynamically created. When creating a PVC, you need to describe the attributes of the requested persistent storage, such as the size of the volume and the read/write permissions.

-

Notes and Constraints

When a PVC is created, the system checks whether there is an available PV with the same configuration in the cluster. If yes, the PVC binds the available PV to the cluster. If no PV meets the matching conditions, the system dynamically creates a storage volume.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Description

-

PVC Field

-

PV Field

-

Matching Logic

-

region

-

pvc.metadata.labels (failure-domain.beta.kubernetes.io/region or topology.kubernetes.io/region)

-

pv.metadata.labels (failure-domain.beta.kubernetes.io/region or topology.kubernetes.io/region)

-

Defined or not defined at the same time. If defined, the settings must be consistent.

-

zone

-

pvc.metadata.labels (failure-domain.beta.kubernetes.io/zone or topology.kubernetes.io/zone)

-

pv.metadata.labels (failure-domain.beta.kubernetes.io/zone or topology.kubernetes.io/zone)

-

Defined or not defined at the same time. If defined, the settings must be consistent.

-

EVS disk type

-

pvc.metadata.annotations (everest.io/disk-volume-type)

-

pv.spec.csi.volumeAttributes (everest.io/disk-volume-type)

-

Defined or not defined at the same time. If defined, the settings must be consistent.

-

Key ID

-

pvc.metadata.annotations (everest.io/crypt-key-id)

-

pv.spec.csi.volumeAttributes (everest.io/crypt-key-id)

-

Defined or not defined at the same time. If defined, the settings must be consistent.

-

accessMode

-

accessMode

-

accessMode

-

The settings must be consistent.

-

Storage class

-

storageclass

-

storageclass

-

The settings must be consistent.

-
-
-
-

Volume Access Modes

PVs can be mounted to the host system only in the mode supported by underlying storage resources. For example, a file storage system can be read and written by multiple nodes, but an EVS disk can be read and written by only one node.

-
  • ReadWriteOnce: A volume can be mounted as read-write by a single node. This access mode is supported by EVS.
  • ReadWriteMany: A volume can be mounted as read-write by multiple nodes. This access mode is supported by SFS, SFS Turbo, and OBS.
- -
- - - - - - - - - - - - - - - - - - - - - -
Table 1 Supported access modes

Storage Type

-

ReadWriteOnce

-

ReadWriteMany

-

EVS

-

√

-

×

-

SFS

-

×

-

√

-

OBS

-

×

-

√

-

SFS Turbo

-

×

-

√

-
-
-
-

Using a Storage Class to Create a PVC

StorageClass describes the storage class used in the cluster. You need to specify StorageClass to dynamically create PVs and underlying storage resources when creating a PVC.

-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Go to the cluster details page, choose Storage from the navigation pane, and click the PersistentVolumeClaims (PVCs) tab.
  3. Click Create PVC in the upper right corner. In the dialog box displayed, set the PVC parameters.

    • Creation Method: Select Storage class.
    • PVC Name: Enter a PVC name.
    • Storage Class: Select the required storage class. The following storage resources can be dynamically provisioned:
      • csi-disk: EVS disk.
      • csi-obs: OBS bucket.
      -
    • AZ (supported only by EVS): Select the AZ where the EVS disk is located.
    • Disk Type: Select an EVS disk type. EVS disk types vary in different regions.
      • Common I/O
      • High I/O
      • Ultra-high I/O
      -
    • Access Mode: ReadWriteOnce and ReadWriteMany are supported. For details, see Volume Access Modes.
    • Capacity (GiB) (supported only by EVS and SFS): storage capacity. This parameter is not available for OBS.
    • Encryption (supported only for EVS and SFS): Select Encryption. After selecting this option, you need to select a key.
    • Secret (supported only for OBS): Select an access key for OBS. For details, see Using a Custom AK/SK to Mount an OBS Volume.
    -

  4. Click Create.
-

Using YAML

-

Example YAML for EVS

-
  • failure-domain.beta.kubernetes.io/region: region where the cluster is located.

    For details about the value of region, see Regions and Endpoints.

    -
  • failure-domain.beta.kubernetes.io/zone: AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

    For details about the value of zone, see Regions and Endpoints.

    -
-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-evs-auto-example
-  namespace: default
-  annotations:
-    everest.io/disk-volume-type: SSD    # EVS disk type.
-    everest.io/crypt-key-id: 0992dbda-6340-470e-a74e-4f0db288ed82  # (Optional) Key ID. The key is used to encrypt EVS disks.
-    
-  labels:
-    failure-domain.beta.kubernetes.io/region: eu-de
-    failure-domain.beta.kubernetes.io/zone: eu-de-01
-spec:
-  accessModes:
-  - ReadWriteOnce               # The value must be ReadWriteOnce for EVS.
-  resources:
-    requests:
-      storage: 10Gi             # EVS disk capacity, ranging from 1 to 32768.
-  storageClassName: csi-disk    # The storage class type is EVS.
-

Example YAML for OBS:

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: obs-warm-provision-pvc
-  namespace: default
-  annotations:
-    everest.io/obs-volume-type: STANDARD      # OBS bucket type. Currently, standard (STANDARD) and infrequent access (WARM) are supported.
-    csi.storage.k8s.io/fstype: obsfs          # File type. obsfs indicates to create a parallel file system (recommended), and s3fs indicates to create an OBS bucket.
-    
-spec:
-  accessModes:
-  - ReadWriteMany             # The value must be ReadWriteMany for OBS.
-  resources:
-    requests:
-      storage: 1Gi                 # This field is valid only for verification (fixed to 1, cannot be empty or 0). The value setting does not take effect for OBS buckets.
-  storageClassName: csi-obs        # The storage class type is OBS.
-
-

Using a PV to Create a PVC

If a PV has been created, you can create a PVC to apply for PV resources.

-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Go to the cluster details page, choose Storage from the navigation pane, and click the PersistentVolumeClaims (PVCs) tab.
  3. Click Create PVC in the upper right corner. In the dialog box displayed, set the PVC parameters.

    • Creation Method: Select Existing volume.
    • PVC Name: Enter a PVC name.
    • Volume Type: Select your required volume type.
      • EVS
      • SFS
      • OBS
      • SFS Turbo
      -
    • Associate Volume: Select the volume to be associated, that is, the PV.
    -

  4. Click Create.
-

Using YAML

-

Example YAML for EVS

-
  • failure-domain.beta.kubernetes.io/region: region where the cluster is located.

    For details about the value of region, see Regions and Endpoints.

    -
  • failure-domain.beta.kubernetes.io/zone: AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

    For details about the value of zone, see Regions and Endpoints.

    -
-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-test
-  namespace: default
-  annotations:
-    everest.io/disk-volume-type: SAS                                # EVS disk type.
-    everest.io/crypt-key-id: fe0757de-104c-4b32-99c5-ee832b3bcaa3   # (Optional) Key ID. The key is used to encrypt EVS disks.
-    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
-    
-  labels:
-    failure-domain.beta.kubernetes.io/region: eu-de
-    failure-domain.beta.kubernetes.io/zone: eu-de-01
-spec:
-  accessModes:
-  - ReadWriteOnce               # The value must be ReadWriteOnce for EVS.
-  resources:
-    requests:
-      storage: 10Gi              
-  storageClassName: csi-disk     # Storage class name. The value is csi-disk for EVS.
-  volumeName: cce-evs-test       # PV name.
-

Example YAML for SFS:

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-sfs-test
-  namespace: default
-  annotations:
-    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
-spec:
-  accessModes:
-  - ReadWriteMany              # The value must be ReadWriteMany for SFS.
-  resources:
-    requests:
-      storage: 100Gi           # Requested PVC capacity.
-  storageClassName: csi-nas    # Storage class name. The value is csi-nas for SFS.
-  volumeName: cce-sfs-test     # PV name.
-

Example YAML for OBS:

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-obs-test
-  namespace: default
-  annotations:
-    everest.io/obs-volume-type: STANDARD                         # OBS bucket type. Currently, standard (STANDARD) and infrequent access (WARM) are supported.
-    csi.storage.k8s.io/fstype: s3fs                              # File type. obsfs indicates to create a parallel file system (recommended), and s3fs indicates to create an OBS bucket.
-    csi.storage.k8s.io/node-publish-secret-name: test-user
-    csi.storage.k8s.io/node-publish-secret-namespace: default
-    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
-    
-spec:
-  accessModes:
-  - ReadWriteMany             # The value must be ReadWriteMany for OBS.
-  resources:
-    requests:
-      storage: 1Gi            # Requested PVC capacity. This field is valid only for verification (fixed to 1, cannot be empty or 0). The value setting does not take effect for OBS buckets.
-  storageClassName: csi-obs   # Storage class name. The value is csi-obs for OBS.
-  volumeName: cce-obs-test    # PV name.
-

Example YAML for SFS Turbo:

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-test
-  namespace: default
-  annotations:
-    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
-spec:
-  accessModes:
-    - ReadWriteMany               # The value must be ReadWriteMany for SFS Turbo.
-  resources:
-    requests:
-      storage: 100Gi              # Requested PVC capacity.
-  storageClassName: csi-sfsturbo  # Storage class name. The value is csi-sfsturbo for SFS Turbo.
-  volumeName: pv-sfsturbo-test         # PV name.
-
-

Using a Snapshot to Creating a PVC

The disk type, encryption setting, and disk mode of the created EVS PVC are consistent with those of the snapshot's source EVS disk.

-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Go to the cluster details page, choose Storage from the navigation pane, and click the PersistentVolumeClaims (PVCs) tab.
  3. Click Create PVC in the upper right corner. In the dialog box displayed, set the PVC parameters.

    • Creation Mode: Select Snapshot.
    • PVC Name: name of a PVC.
    • Snapshot: Select the snapshot to be used.
    -

  4. Click Create.
-

Using YAML

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-test
-  namespace: default
-  annotations:
-    everest.io/disk-volume-type: SSD     # EVS disk type, which must be the same as that of the source EVS disk of the snapshot.
-  labels:
-    failure-domain.beta.kubernetes.io/region: eu-de
-    failure-domain.beta.kubernetes.io/zone: 
-spec:
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: '10'
-  storageClassName: csi-disk
-  dataSource:
-    name: cce-disksnap-test             # Snapshot name
-    kind: VolumeSnapshot
-    apiGroup: snapshot.storage.k8s.io
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0379.html b/docs/cce/umn/cce_01_0379.html deleted file mode 100644 index 5725bca6..00000000 --- a/docs/cce/umn/cce_01_0379.html +++ /dev/null @@ -1,395 +0,0 @@ - - -

PersistentVolumes (PVs)

-

A PV is a persistent storage volume in a cluster. Same as a node, a PV is a cluster-level resource.

-

Notes and Constraints

  • On the new CCE console (the cluster needs to be upgraded to v1.19.10 or later and the everest add-on needs to be upgraded to v1.2.10 or later), PVs are open to you for management. On the old CCE console, PVs can only be imported or dynamically created. You cannot manage the PV lifecycle on the console.
  • Multiple PVs can use the same SFS or SFS Turbo file system with the following restrictions:
    • An error may occur if multiple PVCs/PVs that use the same underlying SFS or SFS Turbo file system are mounted to the same pod.
    • The persistentVolumeReclaimPolicy parameter in the PVs must be set to Retain. Otherwise, when a PV is deleted, the associated underlying volume may be deleted. In this case, other PVs associated with the underlying volume may be abnormal.
    • When the underlying volume is repeatedly used, it is recommended that ReadWriteMany be implemented at the application layer to prevent data overwriting and loss.
    -
-
-

Volume Access Modes

PVs can be mounted to the host system only in the mode supported by underlying storage resources. For example, a file storage system can be read and written by multiple nodes, but an EVS disk can be read and written by only one node.

-
  • ReadWriteOnce: A volume can be mounted as read-write by a single node. This access mode is supported by EVS.
  • ReadWriteMany: A volume can be mounted as read-write by multiple nodes. This access mode is supported by SFS, SFS Turbo, and OBS.
- -
- - - - - - - - - - - - - - - - - - - - - -
Table 1 Access modes supported by cloud storage

Storage Type

-

ReadWriteOnce

-

ReadWriteMany

-

EVS

-

√

-

×

-

SFS

-

×

-

√

-

OBS

-

×

-

√

-

SFS Turbo

-

×

-

√

-
-
-
-

PV Reclaim Policy

A PV reclaim policy is used to delete or reclaim underlying volumes when a PVC is deleted. The value can be Delete or Retain.

-
  • Delete: When a PVC is deleted, the PV and underlying storage resources are deleted.
  • Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After a PVC is deleted, the PV resource is in the Released state and cannot be bound to the PVC again.
-

Everest also allows you to delete a PVC without deleting underlying storage resources. This function can be achieved only by using a YAML file. Set the PV reclaim policy to Delete and add annotations"everest.io/reclaim-policy: retain-volume-only". In this way, when the PVC is deleted, the PV resource is deleted, but the underlying storage resources are retained.

-
-

Creating an EVS Volume

The requirements for creating an EVS volume are as follows:

-
  • System disks, DSS disks, and shared disks cannot be used.
  • The EVS disk is one of the supported types (common I/O, high I/O, and ultra-high I/O), and the EVS disk device type is SCSI.
  • The EVS disk is not frozen or used, and the status is available.
  • If the EVS disk is encrypted, the key must be available.
-
-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Access the cluster details page, choose Storage from the navigation pane, and click the Volumes tab.
  3. Click Create Volume in the upper right corner. In the dialog box displayed, set the volume parameters.

    • Volume Type: Select EVS.
    • EVS:
    • PV Name: Enter a PV name.
    • Access Mode: ReadWriteOnce
    • Reclaim Policy: Select Delete or Retain as required. For details, see PV Reclaim Policy.
    -

  4. Click Create.
-

Using YAML

-
apiVersion: v1
-kind: PersistentVolume
-metadata:
-  annotations:
-    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
-    everest.io/reclaim-policy: retain-volume-only         # (Optional) The PV is deleted while the underlying volume is retained.
-  name: cce-evs-test
-  labels:
-    failure-domain.beta.kubernetes.io/region: eu-de
-    failure-domain.beta.kubernetes.io/zone: eu-de-01
-spec:
-  accessModes:
-    - ReadWriteOnce     # Access mode. The value is fixed to ReadWriteOnce for EVS.
-  capacity:
-    storage: 10Gi       #  EVS disk capacity, in the unit of Gi. The value ranges from 1 to 32768.
-  csi:
-    driver: disk.csi.everest.io     # Dependent storage driver for the mounting.
-    fsType: ext4
-    volumeHandle: 459581af-e78c-4356-9e78-eaf9cd8525eb   # Volume ID of the EVS disk.
-    volumeAttributes:
-      everest.io/disk-mode: SCSI           # Device type of the EVS disk. Only SCSI is supported.
-      everest.io/disk-volume-type: SAS     # EVS disk type.
-      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
-      everest.io/crypt-key-id: 0992dbda-6340-470e-a74e-4f0db288ed82    # (Optional) Encryption key ID. Mandatory for an encrypted disk.
-  persistentVolumeReclaimPolicy: Delete    # Reclain policy.
-  storageClassName: csi-disk               # Storage class name. The value must be csi-disk.
- -
- - - - - - - - - - - - - - - - - - - - - - - - - -
Table 2 Key parameters

Parameter

-

Description

-

everest.io/reclaim-policy: retain-volume-only

-

This field is optional.

-

Currently, only retain-volume-only is supported.

-

This field is valid only when the everest version is 1.2.9 or later and the reclaim policy is Delete. If the reclaim policy is Delete and the current value is retain-volume-only, the associated PV is deleted while the underlying storage volume is retained, when a PVC is deleted.

-

failure-domain.beta.kubernetes.io/region

-

Region where the cluster is located.

-

For details about the value of region, see Regions and Endpoints.

-

failure-domain.beta.kubernetes.io/zone

-

AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

-

For details about the value of zone, see Regions and Endpoints.

-

volumeHandle

-

Volume ID of the EVS disk.

-

To obtain the volume ID, log in to the Cloud Server Console. In the navigation pane, choose Elastic Volume Service > Disks. Click the name of the target EVS disk to go to its details page. On the Summary tab page, click the copy button after ID.

-

everest.io/disk-volume-type

-

EVS disk type. All letters are in uppercase.

-
  • SATA: common I/O
  • SAS: high I/O
  • SSD: ultra-high I/O
-

everest.io/crypt-key-id

-

Encryption key ID. This field is mandatory when the volume is an encrypted volume.

-

persistentVolumeReclaimPolicy

-

A reclaim policy is supported when the cluster version is equal to or later than 1.19.10 and the everest version is equal to or later than 1.2.9.

-

The Delete and Retain policies are supported.

-

Delete:

-
  • If everest.io/reclaim-policy is not specified, both the PV and EVS disk are deleted when a PVC is deleted.
  • If everest.io/reclaim-policy is set to retain-volume-only set, when a PVC is deleted, the PV is deleted but the EVS resources are retained.
-

Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.

-

If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

-
-
-
-

Creating an SFS Volume

  • The SFS file system and the cluster must be in the same VPC.
-
-

Using YAML

-
apiVersion: v1
-kind: PersistentVolume
-metadata:
-  annotations:
-    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
-    everest.io/reclaim-policy: retain-volume-only      # (Optional) The PV is deleted while the underlying volume is retained.
-  name: cce-sfs-test
-spec:
-  accessModes:
-  - ReadWriteMany      # Access mode. The value must be ReadWriteMany for SFS.
-  capacity:
-    storage: 1Gi       # File storage capacity.
-  csi:
-    driver: disk.csi.everest.io   # Mount the dependent storage driver.
-    fsType: nfs
-    volumeHandle: 30b3d92a-0bc7-4610-b484-534660db81be   # SFS file system ID.
-    volumeAttributes:
-      everest.io/share-export-location:   # Shared path of the file storage
-      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
-  persistentVolumeReclaimPolicy: Retain    # Reclaim policy.
-  storageClassName: csi-nas                # Storage class name. The value must be csi-nas for SFS.
-  mountOptions: []                         # Mount options
- -
- - - - - - - - - - - - - - - - - - - - - - -
Table 3 Key parameters

Parameter

-

Description

-

everest.io/reclaim-policy: retain-volume-only

-

This field is optional.

-

Currently, only retain-volume-only is supported.

-

This field is valid only when the everest version is 1.2.9 or later and the reclaim policy is Delete. If the reclaim policy is Delete and the current value is retain-volume-only, the associated PV is deleted while the underlying storage volume is retained, when a PVC is deleted.

-

volumeHandle

-

File system ID.

-

On the management console, choose Service List > Storage > Scalable File Service. In the SFS file system list, click the name of the target file system and copy the content following ID on the page displayed.

-

everest.io/share-export-location

-

Shared path of the file system.

-

On the management console, choose Service List > Storage > Scalable File Service. You can obtain the shared path of the file system from the Mount Address column.

-

mountOptions

-

Mount options.

-

If not specified, the following configurations are used by default. For details, see SFS Volume Mount Options.

-
mountOptions:
-- vers=3
-- timeo=600
-- nolock
-- hard
-

everest.io/crypt-key-id

-

Encryption key ID. This field is mandatory when the volume is an encrypted volume.

-

persistentVolumeReclaimPolicy

-

A reclaim policy is supported when the cluster version is equal to or later than 1.19.10 and the everest version is equal to or later than 1.2.9.

-

The options are as follows:

-

Delete:

-
  • If everest.io/reclaim-policy is not specified, both the PV and SFS volume are deleted when a PVC is deleted.
  • If everest.io/reclaim-policy is set to retain-volume-only set, when a PVC is deleted, the PV is deleted but the file storage resources are retained.
-

Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.

-

If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

-
-
-
-

Creating an OBS Volume

Secure containers do not support OBS volumes.

-

A single user can create a maximum of 100 OBS buckets on the console. If you have a large number of CCE workloads and you want to mount an OBS bucket to every workload, you may easily run out of buckets. In this scenario, you are advised to use OBS through the OBS API or SDK and do not mount OBS buckets to the workload on the console.

-
-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Access the cluster details page, choose Storage from the navigation pane, and click the Volumes tab.
  3. Click Create Volume in the upper right corner. In the dialog box displayed, set the volume parameters.

    • Volume Type: Select OBS.
    • Select OBS resources.
    • PV Name: Enter a PV name.
    • Access Mode: ReadWriteMany
    • Reclaim Policy: Select Delete or Retain as required. For details, see PV Reclaim Policy.
    • Key: You can customize the access key (AK/SK) for mounting an OBS volume. You can use the AK/SK to create a secret and mount the secret to the PV. For details, see Using a Custom AK/SK to Mount an OBS Volume.
    • Mount Options: mount options. For details about the options, see Setting Mount Options.
    -

  4. Click Create.
-

Using YAML

-
apiVersion: v1
-kind: PersistentVolume
-metadata:
-  annotations:
-    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
-    everest.io/reclaim-policy: retain-volume-only         # (Optional) The PV is deleted while the underlying volume is retained.
-  name: cce-obs-test
-spec:
-  accessModes:
-  - ReadWriteMany                      # Access mode. The value must be ReadWriteMany for OBS.
-  capacity:
-    storage: 1Gi      # Storage capacity. This parameter is set only to meet the PV format requirements. It can be set to any value. The actual OBS space size is not limited by this value.
-  csi:
-    driver: obs.csi.everest.io        # Dependent storage driver for the mounting.
-    fsType: obsfs                      # OBS file type.
-    volumeHandle: cce-obs-bucket       # OBS bucket name.
-    volumeAttributes:
-      everest.io/obs-volume-type: STANDARD
-      everest.io/region: eu-de
-      
-      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
-    nodePublishSecretRef:
-      name: test-user
-      namespace: default
-  persistentVolumeReclaimPolicy: Retain       # Reclaim policy.
-  storageClassName: csi-obs                   # Storage class name. The value must be csi-obs for OBS.
-  mountOptions: []                            # Mount options.
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 4 Key parameters

Parameter

-

Description

-

everest.io/reclaim-policy: retain-volume-only

-

This field is optional.

-

Currently, only retain-volume-only is supported.

-

This field is valid only when the everest version is 1.2.9 or later and the reclaim policy is Delete. If the reclaim policy is Delete and the current value is retain-volume-only, the associated PV is deleted while the underlying storage volume is retained, when a PVC is deleted.

-

fsType

-

File type. The value can be obsfs or s3fs. If the value is s3fs, an OBS bucket is created and mounted using s3fs. If the value is obsfs, an OBS parallel file system is created and mounted using obsfs. You are advised to set this field to obsfs.

-

volumeHandle

-

OBS bucket name.

-

everest.io/obs-volume-type

-

Storage class, including STANDARD (standard bucket) and WARM (infrequent access bucket).

-

everest.io/region

-

Region where the OBS bucket is deployed.

-

For details about the value of region, see Regions and Endpoints.

-

nodePublishSecretRef

-

Access key (AK/SK) used for mounting the object storage volume. You can use the AK/SK to create a secret and mount it to the PV. For details, see Using a Custom AK/SK to Mount an OBS Volume.

-

mountOptions

-

Mount options. For details, see OBS Volume Mount Options.

-

persistentVolumeReclaimPolicy

-

A reclaim policy is supported when the cluster version is equal to or later than 1.19.10 and the everest version is equal to or later than 1.2.9.

-

The Delete and Retain policies are supported.

-

Delete:

-
  • If everest.io/reclaim-policy is not specified, both the PV and OBS volume are deleted when a PVC is deleted.
  • If everest.io/reclaim-policy is set to retain-volume-only set, when a PVC is deleted, the PV is deleted but the object storage resources are retained.
-

Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.

-

If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

-
-
-
-

Creating an SFS Turbo Volume

SFS Turbo and the cluster must be in the same VPC.

-
-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Access the cluster details page, choose Storage from the navigation pane, and click the Volumes tab.
  3. Click Create Volume in the upper right corner. In the dialog box displayed, set the volume parameters.

    • Volume Type: Select SFS Turbo.
    • SFS Turbo: Select SFS Turbo resources.
    • PV Name: Enter a PV name.
    • Access Mode: ReadWriteMany
    • Reclaim Policy: Select Retain. For details, see PV Reclaim Policy.
    • Mount Options: mount options. For details about the options, see Setting Mount Options.
    -

  4. Click Create.
-

Using YAML

-
apiVersion: v1
-kind: PersistentVolume
-metadata:
-  annotations:
-    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
-  name: cce-sfsturbo-test
-spec:
-  accessModes:
-    - ReadWriteMany       # Access mode. The value must be ReadWriteMany for SFS Turbo.
-  capacity:
-    storage: 100.00Gi     # SFS Turbo volume capacity.
-  csi:
-    driver: sfsturbo.csi.everest.io    # Dependent storage driver for the mounting.
-    fsType: nfs
-    volumeHandle: 6674bd0a-d760-49de-bb9e-805c7883f047      # SFS Turbo volume ID.
-    volumeAttributes:
-      everest.io/share-export-location: 192.168.0.85:/      # Shared path of the SFS Turbo volume.
-      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
-  persistentVolumeReclaimPolicy: Retain     # Reclaim policy.
-  storageClassName: csi-sfsturbo            # Storage class name. The value must be csi-sfsturbo for SFS Turbo.
-  mountOptions: []                          # Mount options.
- -
- - - - - - - - - - - - - - - - -
Table 5 Key parameters

Parameter

-

Description

-

volumeHandle

-

SFS Turbo volume ID.

-

You can obtain the ID on the SFS Turbo storage instance details page on the SFS console.

-

everest.io/share-export-location

-

Shared path of the SFS Turbo volume.

-

mountOptions

-

Mount options.

-

If not specified, the following configurations are used by default. For details, see SFS Volume Mount Options.

-
mountOptions:
-- vers=3
-- timeo=600
-- nolock
-- hard
-

persistentVolumeReclaimPolicy

-

A reclaim policy is supported when the cluster version is equal to or later than 1.19.10 and the everest version is equal to or later than 1.2.9.

-

The Delete and Retain policies are supported.

-

Delete:

-
  • If everest.io/reclaim-policy is not specified, both the PV and SFS Turbo volume are deleted when a PVC is deleted.
  • If everest.io/reclaim-policy is set to retain-volume-only set, when a PVC is deleted, the PV is deleted but the SFF Turbo resources are retained.
-

Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.

-

If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0380.html b/docs/cce/umn/cce_01_0380.html deleted file mode 100644 index 50546fc5..00000000 --- a/docs/cce/umn/cce_01_0380.html +++ /dev/null @@ -1,209 +0,0 @@ - - -

StorageClass

-

StorageClass describes the storage class used in the cluster. You need to specify StorageClass when creating a PVC or PV. As of now, CCE provides storage classes such as csi-disk, csi-nas, and csi-obs by default. When defining a PVC, you can use a StorageClassName to automatically create a PV of the corresponding type and automatically create underlying storage resources.

-

You can run the following command to query the storage classes that CCE supports. You can use the CSI plug-in provided by CCE to customize a storage class, which functions similarly as the default storage classes in CCE.

-
# kubectl get sc
-NAME                PROVISIONER                     AGE
-csi-disk            everest-csi-provisioner         17d          # Storage class for EVS disks
-csi-nas             everest-csi-provisioner         17d          # Storage class for SFS file systems
-csi-obs             everest-csi-provisioner         17d          # Storage class for OBS buckets
-

After a StorageClass is set, PVs can be automatically created and maintained. You only need to specify the StorageClass when creating a PVC, which greatly reduces the workload.

-

In addition to the predefined storage classes provided by CCE, you can also customize storage classes. The following sections describe the application status, solutions, and methods of customizing storage classes.

-

Challenges

When using storage resources in CCE, the most common method is to specify storageClassName to define the type of storage resources to be created when creating a PVC. The following configuration shows how to use a PVC to apply for an SAS (high I/O) EVS disk (block storage).

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-evs-example
-  namespace: default
-  annotations:
-    everest.io/disk-volume-type: SAS
-spec:
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: 10Gi
-  storageClassName: csi-disk
-

If you need to specify the EVS disk type, you can set the everest.io/disk-volume-type field. The value SAS is used as an example here, indicating the high I/O EVS disk type. Or you can choose SATA (common I/O) and SSD (ultra-high I/O).

-

This configuration method may not work if you want to:

-
  • Set storageClassName only, which is simpler than specifying the EVS disk type by using everest.io/disk-volume-type.
  • Avoid modifying YAML files or Helm charts. Some users switch from self-built or other Kubernetes services to CCE and have written YAML files of many applications. In these YAML files, different types of storage resources are specified by different StorageClassNames. When using CCE, they need to modify a large number of YAML files or Helm charts to use storage resources, which is labor-consuming and error-prone.
  • Set the default storageClassName for all applications to use the default storage class. In this way, you can create storage resources of the default type without needing to specify storageClassName in the YAML file.
-
-

Solution

This section describes how to set a custom storage class in CCE and how to set the default storage class. You can specify different types of storage resources by setting storageClassName.

-
  • For the first scenario, you can define custom storageClassNames for SAS and SSD EVS disks. For example, define a storage class named csi-disk-sas for creating SAS disks. The following figure shows the differences before and after you use a custom storage class.

    -
  • For the second scenario, you can define a storage class with the same name as that in the existing YAML file without needing to modify storageClassName in the YAML file.
  • For the third scenario, you can set the default storage class as described below to create storage resources without specifying storageClassName in YAML files.
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  name: pvc-evs-example
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteOnce
    -  resources:
    -    requests:
    -      storage: 10Gi
    -
-
-

Custom Storage Classes

You can customize a high I/O storage class in a YAML file. For example, the name csi-disk-sas indicates that the disk type is SAS (high I/O).

-
apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
-  name: csi-disk-sas                          # Name of the high I/O storage class, which can be customized.
-parameters:
-  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
-  csi.storage.k8s.io/fstype: ext4
-  everest.io/disk-volume-type: SAS            # High I/O EVS disk type, which cannot be customized.
-  everest.io/passthrough: "true"
-provisioner: everest-csi-provisioner
-reclaimPolicy: Delete
-volumeBindingMode: Immediate
-allowVolumeExpansion: true                    # true indicates that capacity expansion is allowed.
-

For an ultra-high I/O storage class, you can set the class name to csi-disk-ssd to create SSD EVS disk (ultra-high I/O).

-
apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
-  name: csi-disk-ssd                       # Name of the ultra-high I/O storage class, which can be customized.
-parameters:
-  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
-  csi.storage.k8s.io/fstype: ext4
-  everest.io/disk-volume-type: SSD         # Ultra-high I/O EVS disk type, which cannot be customized.
-  everest.io/passthrough: "true"
-provisioner: everest-csi-provisioner
-reclaimPolicy: Delete
-volumeBindingMode: Immediate
-allowVolumeExpansion: true
-

reclaimPolicy: indicates the recycling policies of the underlying cloud storage. The value can be Delete or Retain.

-
  • Delete: When a PVC is deleted, both the PV and the EVS disk are deleted.
  • Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.
-

If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

-

After the definition is complete, run the kubectl create commands to create storage resources.

-
# kubectl create -f sas.yaml
-storageclass.storage.k8s.io/csi-disk-sas created
-# kubectl create -f ssd.yaml
-storageclass.storage.k8s.io/csi-disk-ssd created
-

Query the storage class again. Two more types of storage classes are displayed in the command output, as shown below.

-
# kubectl get sc
-NAME                PROVISIONER                     AGE
-csi-disk            everest-csi-provisioner         17d
-csi-disk-sas        everest-csi-provisioner         2m28s
-csi-disk-ssd        everest-csi-provisioner         16s
-csi-disk-topology   everest-csi-provisioner         17d
-csi-nas             everest-csi-provisioner         17d
-csi-obs             everest-csi-provisioner         17d
-csi-sfsturbo        everest-csi-provisioner         17d
-

Other types of storage resources can be defined in the similar way. You can use kubectl to obtain the YAML file and modify it as required.

-
  • File storage
    # kubectl get sc csi-nas -oyaml
    -kind: StorageClass
    -apiVersion: storage.k8s.io/v1
    -metadata:
    -  name: csi-nas
    -provisioner: everest-csi-provisioner
    -parameters:
    -  csi.storage.k8s.io/csi-driver-name: nas.csi.everest.io
    -  csi.storage.k8s.io/fstype: nfs
    -  everest.io/share-access-level: rw
    -  everest.io/share-access-to: 5e3864c6-e78d-4d00-b6fd-de09d432c632   # ID of the VPC to which the cluster belongs
    -  everest.io/share-is-public: 'false'
    -  everest.io/zone: xxxxx          # AZ
    -reclaimPolicy: Delete
    -allowVolumeExpansion: true
    -volumeBindingMode: Immediate
    -
  • Object storage
    # kubectl get sc csi-obs -oyaml
    -kind: StorageClass
    -apiVersion: storage.k8s.io/v1
    -metadata:
    -  name: csi-obs
    -provisioner: everest-csi-provisioner
    -parameters:
    -  csi.storage.k8s.io/csi-driver-name: obs.csi.everest.io
    -  csi.storage.k8s.io/fstype: s3fs           # Object storage type. s3fs indicates an object bucket, and obsfs indicates a parallel file system.
    -  everest.io/obs-volume-type: STANDARD      # Storage class of the OBS bucket
    -reclaimPolicy: Delete
    -volumeBindingMode: Immediate
    -
-
-

Setting a Default Storage Class

You can specify a storage class as the default class. In this way, if you do not specify storageClassName when creating a PVC, the PVC is created using the default storage class.

-

For example, to specify csi-disk-ssd as the default storage class, edit your YAML file as follows:

-
apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
-  name: csi-disk-ssd
-  annotations:
-    storageclass.kubernetes.io/is-default-class: "true"   # Specifies the default storage class in a cluster. A cluster can have only one default storage class.
-parameters:
-  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
-  csi.storage.k8s.io/fstype: ext4
-  everest.io/disk-volume-type: SSD
-  everest.io/passthrough: "true"
-provisioner: everest-csi-provisioner
-reclaimPolicy: Delete
-volumeBindingMode: Immediate
-allowVolumeExpansion: true
-

Delete the created csi-disk-ssd disk, run the kubectl create command to create a csi-disk-ssd disk again, and then query the storage class. The following information is displayed.

-
# kubectl delete sc csi-disk-ssd
-storageclass.storage.k8s.io "csi-disk-ssd" deleted
-# kubectl create -f ssd.yaml
-storageclass.storage.k8s.io/csi-disk-ssd created
-# kubectl get sc
-NAME                     PROVISIONER                     AGE
-csi-disk                 everest-csi-provisioner         17d
-csi-disk-sas             everest-csi-provisioner         114m
-csi-disk-ssd (default)   everest-csi-provisioner         9s
-csi-disk-topology        everest-csi-provisioner         17d
-csi-nas                  everest-csi-provisioner         17d
-csi-obs                  everest-csi-provisioner         17d
-csi-sfsturbo             everest-csi-provisioner         17d
-
-

Verification

  • Use csi-disk-sas to create a PVC.
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  name:  sas-disk
    -spec:
    -  accessModes:
    -  - ReadWriteOnce
    -  resources:
    -    requests:
    -      storage: 10Gi
    -  storageClassName: csi-disk-sas
    -

    Create a storage class and view its details. As shown below, the object can be created and the value of STORAGECLASS is csi-disk-sas.

    -
    # kubectl create -f sas-disk.yaml 
    -persistentvolumeclaim/sas-disk created
    -# kubectl get pvc
    -NAME       STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
    -sas-disk   Bound    pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            csi-disk-sas   24s
    -# kubectl get pv
    -NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                     STORAGECLASS   REASON   AGE
    -pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            Delete           Bound       default/sas-disk          csi-disk-sas            30s
    -

    View the PVC details on the CCE console. On the PV details page, you can see that the disk type is high I/O.

    -

    -
  • If storageClassName is not specified, the default configuration is used, as shown below.
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  name:  ssd-disk
    -spec:
    -  accessModes:
    -  - ReadWriteOnce
    -  resources:
    -    requests:
    -      storage: 10Gi
    -

    Create and view the storage resource. You can see that the storage class of PVC ssd-disk is csi-disk-ssd, indicating that csi-disk-ssd is used by default.

    -
    # kubectl create -f ssd-disk.yaml 
    -persistentvolumeclaim/ssd-disk created
    -# kubectl get pvc
    -NAME       STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
    -sas-disk   Bound    pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            csi-disk-sas   16m
    -ssd-disk   Bound    pvc-4d2b059c-0d6c-44af-9994-f74d01c78731   10Gi       RWO            csi-disk-ssd   10s
    -# kubectl get pv
    -NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                     STORAGECLASS   REASON   AGE
    -pvc-4d2b059c-0d6c-44af-9994-f74d01c78731   10Gi       RWO            Delete           Bound       default/ssd-disk          csi-disk-ssd            15s
    -pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            Delete           Bound       default/sas-disk          csi-disk-sas            17m
    -

    View the PVC details on the CCE console. On the PV details page, you can see that the disk type is ultra-high I/O.

    -

    -
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0388.html b/docs/cce/umn/cce_01_0388.html deleted file mode 100644 index 905fd44d..00000000 --- a/docs/cce/umn/cce_01_0388.html +++ /dev/null @@ -1,64 +0,0 @@ - - -

Cluster Secrets

-

By default, CCE creates the following secrets in each namespace:

-
  • default-secret
  • paas.elb
  • default-token-xxxxx (xxxxx is a random number.)
-

-

The functions of these secrets are described as follows.

-

default-secret

The type of default-secret is kubernetes.io/dockerconfigjson. The data is the credential for logging in to the SWR image repository and is used to pull images from SWR. If you need to pull an image from SWR when creating a workload on CCE, set imagePullSecrets to default-secret.

-
apiVersion: v1                      
-kind: Pod                          
-metadata:
-  name: nginx                      
-spec:                            
-  containers:
-  - image: nginx:alpine            
-    name: container-0               
-    resources:                      
-      limits:
-        cpu: 100m
-        memory: 200Mi
-      requests:
-        cpu: 100m
-        memory: 200Mi
-  imagePullSecrets:
-  - name: default-secret
-

The data of default-secret is updated periodically, and the current data will expire after a certain period of time. You can run the describe command to view the expiration time in of default-secret.

-

Use default-secret directly instead of copying the secret content to create a new one. The credential in the copied secret will expire and the image cannot be pulled.

-
-
$ kubectl describe secret default-secret
-Name:         default-secret
-Namespace:    default
-Labels:       secret-generated-by=cce
-Annotations:  temporary-ak-sk-expires-at: 2021-11-26 20:55:31.380909 +0000 UTC
-
-Type:  kubernetes.io/dockerconfigjson
-
-Data
-====
-.dockerconfigjson:  347 bytes
-
-

paas.elb

The data of paas.elb is the temporary AK/SK data, which is used to create ELB load balancers during Service and ingress creation. The data of paas.elb is periodically updated and expires after a certain period of time.

-

In practice, you will not directly use paas.elb. However, do not delete it. Otherwise, ELB load balancers will fail to be created.

-
-

default-token-xxxxx

By default, Kubernetes creates a service account named default for each namespace. default-token-xxxxx is the key of the service account, and xxxxx is a random number.

-
$ kubectl get sa
-NAME     SECRETS   AGE
-default  1         30d
-$ kubectl describe sa default
-Name:                default
-Namespace:           default
-Labels:              <none>
-Annotations:         <none>
-Image pull secrets:  <none>
-Mountable secrets:   default-token-vssmw
-Tokens:              default-token-vssmw
-Events:              <none>
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0393.html b/docs/cce/umn/cce_01_0393.html deleted file mode 100644 index 4b713370..00000000 --- a/docs/cce/umn/cce_01_0393.html +++ /dev/null @@ -1,22 +0,0 @@ - - -

Deployment Examples

-

-
-
- - - -
- diff --git a/docs/cce/umn/cce_01_0395.html b/docs/cce/umn/cce_01_0395.html deleted file mode 100644 index 4799eddc..00000000 --- a/docs/cce/umn/cce_01_0395.html +++ /dev/null @@ -1,14 +0,0 @@ - - -

Switching from AOM to HPA for Auto Scaling

-

CCE clusters of v1.15 or earlier support workload scaling based on AOM monitoring data. This function is no longer supported in CCE clusters of v1.17 or later.

-

If you have configured auto scaling based on AOM, you can switch to HPA policies after your cluster is upgraded to v1.17. Note the following differences during the switchover:

-
  • In AOM-based auto scaling, resource usage rate is calculated based on the limit of a workload, from 0% to 100%.
-

For example, if the memory request of a workload is 2 GB and the memory limit is 16 GB, a scale-out is triggered as long as the memory utilization reaches 50% of the limit (8 GB) in AOM-based auto scaling. In HPA-based scaling, you need to set the memory usage rate to 400% (16 x 50%/2) to trigger the same scaling.

-
-
- -
- diff --git a/docs/cce/umn/cce_10_0002.html b/docs/cce/umn/cce_10_0002.html new file mode 100644 index 00000000..c0d8561b --- /dev/null +++ b/docs/cce/umn/cce_10_0002.html @@ -0,0 +1,21 @@ + + +

Cluster Overview

+
+
+ + + +
+ diff --git a/docs/cce/umn/cce_10_0003.html b/docs/cce/umn/cce_10_0003.html new file mode 100644 index 00000000..665f5868 --- /dev/null +++ b/docs/cce/umn/cce_10_0003.html @@ -0,0 +1,131 @@ + + +

Resetting a Node

+

Scenario

You can reset a node to modify the node configuration, such as the node OS and login mode.

+

Resetting a node will reinstall the node OS and the Kubernetes software on the node. If a node is unavailable because you modify the node configuration, you can reset the node to rectify the fault.

+
+

Notes and Constraints

  • For CCE clusters and CCE Turbo clusters, the version must be v1.13 or later to support node resetting.
+
+

Notes

  • Only worker nodes can be reset. If the node is still unavailable after the resetting, delete the node and create a new one.
  • Resetting a node will reinstall the node OS and interrupt workload services running on the node. Therefore, perform this operation during off-peak hours.
  • Data in the system disk and Docker data disks will be cleared. Back up important data before resetting the node.
  • When an extra data disk is mounted to a node, data in this disk will be cleared if the disk has not been unmounted before the node reset. To prevent data loss, back up data in advance and mount the data disk again after the node reset is complete.
  • The IP addresses of the workload pods on the node will change, but the container network access is not affected.
  • There is remaining EVS disk quota.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
+
+

Procedure

The new console allows you to reset nodes in batches. You can also use private images to reset nodes in batches.

+
  1. Log in to the CCE console.
  2. Click the cluster name and access the cluster details page, choose Nodes in the navigation pane, and select one or multiple nodes to be reset in the list on the right. Choose More > Reset.
  3. In the displayed dialog box, click Yes.

    • For nodes in the DefaultPool node pool, the parameter setting page is displayed. Set the parameters by referring to 4.
    • For a node you create in a node pool, resetting the node does not support parameter configuration. You can directly use the configuration image of the node pool to reset the node.
    +

  4. Specify node parameters.

    Compute Settings +
    + + + + + + + + + + + + + + + + +
    Table 1 Configuration parameters

    Parameter

    +

    Description

    +

    Specification

    +

    Node specifications cannot be modified when you reset a node.

    +

    Container Engine

    +

    CCE clusters support Docker.

    +

    For a CCE Turbo cluster, both Docker and containerd are supported. For details, see Mapping between Node OSs and Container Engines.

    +

    OS

    +

    Public image: Select an OS for the node.

    +

    Private image: You can use private images.

    +

    Login Mode

    +
    • Key Pair

      Select the key pair used to log in to the node. You can select a shared key.

      +

      A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create Key Pair.

      +
    +
    +
    +
    +

    Storage Settings

    +
    Configure storage resources on a node for the containers running on it. +
    + + + + + + + + + + +
    Table 2 Configuration parameters

    Parameter

    +

    Description

    +

    System Disk

    +

    Directly use the system disk of the cloud server.

    +

    Data Disk

    +

    At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

    +

    Click Expand and select Allocate Disk Space to define the disk space occupied by the container runtime to store the working directories, container image data, and image metadata. For details about how to allocate data disk space, see Data Disk Space Allocation.

    +

    For other data disks, a raw disk is created without any processing by default. You can also click Expand and select Mount Disk to mount the data disk to a specified directory.

    +
    +
    +
    +
    Advanced Settings +
    + + + + + + + + + + + + + + + + + + + + + + +
    Table 3 Advanced configuration parameters

    Parameter

    +

    Description

    +

    Kubernetes Label

    +

    Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

    +

    Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

    +

    Resource Tag

    +

    You can add resource tags to classify resources.

    +

    You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use these tags to improve tagging and resource migration efficiency.

    +

    CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

    +

    Taint

    +
    This field is left blank by default. You can add taints to set anti-affinity for the node. A maximum of 10 taints are allowed for each node. Each taint contains the following parameters:
    • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
    • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
    • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
    +
    NOTICE:
    • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.
    • After a node pool is created, you can click Edit to modify its configuration. The modification will be synchronized to all nodes in the node pool.
    +
    +
    +

    Max. Pods

    +

    Maximum number of pods that can run on the node, including the default system pods.

    +

    This limit prevents the node from being overloaded with pods.

    +

    Pre-installation Command

    +

    Enter commands. A maximum of 1,000 characters are allowed.

    +

    The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

    +

    Post-installation Command

    +

    Enter commands. A maximum of 1,000 characters are allowed.

    +

    The script will be executed after Kubernetes software is installed and will not affect the installation.

    +
    +
    +
    +

  5. Click Next: Confirm.
  6. Click Submit.
+

+
+
+
+ +
+ diff --git a/docs/cce/umn/cce_10_0004.html b/docs/cce/umn/cce_10_0004.html new file mode 100644 index 00000000..bbe9012a --- /dev/null +++ b/docs/cce/umn/cce_10_0004.html @@ -0,0 +1,105 @@ + + +

Managing Node Labels

+

Node Label Usage Scenario

Node labels are mainly used in the following scenarios:

+
  • Node management: Node labels are used to classify nodes.
  • Affinity and anti-affinity between a workload and node:
    • Different workloads have different resource requirements such as CPU, memory, and I/O. If a workload consumes too many resources in a cluster, other workloads in the same cluster may fail to run properly. In this case, you are advised to add different labels to nodes. When deploying a workload, you can select nodes with specified labels for affinity deployment to ensure the normal operation of the system. Otherwise, node anti-affinity deployment can be used.
    • A system can be divided into multiple modules. Each module consists of multiple microservices. To ensure efficient O&M, you can add a module label to each node so that each module can be deployed on the corresponding node. In this way, modules do not interfere with each other and microservices can be easily maintained on their nodes.
    +
+
+

Inherent Label of a Node

After a node is created, some fixed labels exist and cannot be deleted. For details about these labels, see Table 1.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Inherent label of a node

Key

+

Description

+

New: topology.kubernetes.io/region

+

Old: failure-domain.beta.kubernetes.io/region

+

Region where the node is located

+

New: topology.kubernetes.io/zone

+

Old: failure-domain.beta.kubernetes.io/zone

+

AZ where the node is located

+

New: node.kubernetes.io/baremetal

+

Old: failure-domain.beta.kubernetes.io/is-baremetal

+

Whether the node is a bare metal node

+

false indicates that the node is not a bare metal node.

+

node.kubernetes.io/instance-type

+

Node specifications

+

kubernetes.io/arch

+

Node processor architecture

+

kubernetes.io/hostname

+

Node name

+

kubernetes.io/os

+

OS type

+

node.kubernetes.io/subnetid

+

ID of the subnet where the node is located.

+

os.architecture

+

Node processor architecture

+

For example, amd64 indicates a AMD64-bit processor.

+

os.name

+

Node OS name

+

os.version

+

Node OS kernel version

+

node.kubernetes.io/container-engine

+

Container engine used by the node.

+

accelerator

+

GPU node labels.

+

cce.cloud.com/cce-nodepool

+

The dedicated label of a node in a node pool.

+
+
+
+

Adding or Deleting a Node Label

  1. Log in to the CCE console.
  2. Click the cluster name, access the cluster details page, and choose Nodes in the navigation pane. On the page displayed, select a node and click Manage Labels and Taints.
  3. In the displayed dialog box, click Add batch operations under Batch Operation, and then choose Add/Update or Delete.

    Enter the key and value of the label to be added or deleted, and click OK.

    +

    For example, the key is deploy_qa and the value is true, indicating that the node is used to deploy the QA (test) environment.

    +

  4. After the label is added, check the added label in node data.
+
+
+
+ +
+ diff --git a/docs/cce/umn/cce_01_0006.html b/docs/cce/umn/cce_10_0006.html similarity index 63% rename from docs/cce/umn/cce_01_0006.html rename to docs/cce/umn/cce_10_0006.html index 5f5e19e3..3e407221 100644 --- a/docs/cce/umn/cce_01_0006.html +++ b/docs/cce/umn/cce_10_0006.html @@ -1,79 +1,79 @@ - +

Overview

-

CCE provides Kubernetes-native container deployment and management and supports lifecycle management of container workloads, including creation, configuration, monitoring, auto scaling, upgrade, uninstall, service discovery, and load balancing.

-

Pod

A pod is the smallest and simplest unit in the Kubernetes object model that you create or deploy. A pod encapsulates one or more containers, storage volumes, a unique network IP address, and options that govern how the containers should run.

-

Pods can be used in either of the following ways:

-
  • A container is running in a pod. This is the most common usage of pods in Kubernetes. You can view the pod as a single encapsulated container, but Kubernetes directly manages pods instead of containers.
  • Multiple containers that need to be coupled and share resources run in a pod. In this scenario, an application contains a main container and several sidecar containers, as shown in Figure 1. For example, the main container is a web server that provides file services from a fixed directory, and a sidecar container periodically downloads files to the directory.
    Figure 1 Pod
    +

    CCE provides Kubernetes-native container deployment and management and supports lifecycle management of container workloads, including creation, configuration, monitoring, auto scaling, upgrade, uninstall, service discovery, and load balancing.

    +

    Pod

    A pod is the smallest and simplest unit in the Kubernetes object model that you create or deploy. A pod encapsulates one or more containers, storage volumes, a unique network IP address, and options that govern how the containers should run.

    +

    Pods can be used in either of the following ways:

    +
    • A container is running in a pod. This is the most common usage of pods in Kubernetes. You can view the pod as a single encapsulated container, but Kubernetes directly manages pods instead of containers.
    • Multiple containers that need to be coupled and share resources run in a pod. In this scenario, an application contains a main container and several sidecar containers, as shown in Figure 1. For example, the main container is a web server that provides file services from a fixed directory, and a sidecar container periodically downloads files to the directory.
      Figure 1 Pod
    -

    In Kubernetes, pods are rarely created directly. Instead, controllers such as Deployments and jobs, are used to manage pods. Controllers can create and manage multiple pods, and provide replica management, rolling upgrade, and self-healing capabilities. A controller generally uses a pod template to create corresponding pods.

    +

    In Kubernetes, pods are rarely created directly. Instead, controllers such as Deployments and jobs, are used to manage pods. Controllers can create and manage multiple pods, and provide replica management, rolling upgrade, and self-healing capabilities. A controller generally uses a pod template to create corresponding pods.

    -

    Deployment

    A pod is the smallest and simplest unit that you create or deploy in Kubernetes. It is designed to be an ephemeral, one-off entity. A pod can be evicted when node resources are insufficient and disappears along with a cluster node failure. Kubernetes provides controllers to manage pods. Controllers can create and manage pods, and provide replica management, rolling upgrade, and self-healing capabilities. The most commonly used controller is Deployment.

    -
    Figure 2 Relationship between a Deployment and pods
    -

    A Deployment can contain one or more pods. These pods have the same role. Therefore, the system automatically distributes requests to multiple pods of a Deployment.

    -

    A Deployment integrates a lot of functions, including online deployment, rolling upgrade, replica creation, and restoration of online jobs. To some extent, Deployments can be used to realize unattended rollout, which greatly reduces difficulties and operation risks in the rollout process.

    +

    Deployment

    A pod is the smallest and simplest unit that you create or deploy in Kubernetes. It is designed to be an ephemeral, one-off entity. A pod can be evicted when node resources are insufficient and disappears along with a cluster node failure. Kubernetes provides controllers to manage pods. Controllers can create and manage pods, and provide replica management, rolling upgrade, and self-healing capabilities. The most commonly used controller is Deployment.

    +
    Figure 2 Relationship between a Deployment and pods
    +

    A Deployment can contain one or more pods. These pods have the same role. Therefore, the system automatically distributes requests to multiple pods of a Deployment.

    +

    A Deployment integrates a lot of functions, including online deployment, rolling upgrade, replica creation, and restoration of online jobs. To some extent, Deployments can be used to realize unattended rollout, which greatly reduces difficulties and operation risks in the rollout process.

    -

    StatefulSet

    All pods under a Deployment have the same characteristics except for the name and IP address. If required, a Deployment can use the pod template to create a new pod. If not required, the Deployment can delete any one of the pods.

    -

    However, Deployments cannot meet the requirements in some distributed scenarios when each pod requires its own status or in a distributed database where each pod requires independent storage.

    -

    With detailed analysis, it is found that each part of distributed stateful applications plays a different role. For example, the database nodes are deployed in active/standby mode, and pods are dependent on each other. In this case, you need to meet the following requirements for the pods:

    -
    • A pod can be recognized by other pods. Therefore, a pod must have a fixed identifier.
    • Each pod has an independent storage device. After a pod is deleted and then restored, the data read from the pod must be the same as the previous one. Otherwise, the pod status is inconsistent.
    -

    To address the preceding requirements, Kubernetes provides StatefulSets.

    -
    1. A StatefulSet provides a fixed name for each pod following a fixed number ranging from 0 to N. After a pod is rescheduled, the pod name and the host name remain unchanged.
    2. A StatefulSet provides a fixed access domain name for each pod through the headless Service (described in following sections).
    3. The StatefulSet creates PersistentVolumeClaims (PVCs) with fixed identifiers to ensure that pods can access the same persistent data after being rescheduled.

      +

      StatefulSet

      All pods under a Deployment have the same characteristics except for the name and IP address. If required, a Deployment can use the pod template to create a new pod. If not required, the Deployment can delete any one of the pods.

      +

      However, Deployments cannot meet the requirements in some distributed scenarios when each pod requires its own status or in a distributed database where each pod requires independent storage.

      +

      With detailed analysis, it is found that each part of distributed stateful applications plays a different role. For example, the database nodes are deployed in active/standby mode, and pods are dependent on each other. In this case, you need to meet the following requirements for the pods:

      +
      • A pod can be recognized by other pods. Therefore, a pod must have a fixed identifier.
      • Each pod has an independent storage device. After a pod is deleted and then restored, the data read from the pod must be the same as the previous one. Otherwise, the pod status is inconsistent.
      +

      To address the preceding requirements, Kubernetes provides StatefulSets.

      +
      1. A StatefulSet provides a fixed name for each pod following a fixed number ranging from 0 to N. After a pod is rescheduled, the pod name and the host name remain unchanged.
      2. A StatefulSet provides a fixed access domain name for each pod through the headless Service (described in following sections).
      3. The StatefulSet creates PersistentVolumeClaims (PVCs) with fixed identifiers to ensure that pods can access the same persistent data after being rescheduled.

      -

      DaemonSet

      A DaemonSet runs a pod on each node in a cluster and ensures that there is only one pod. This works well for certain system-level applications, such as log collection and resource monitoring, since they must run on each node and need only a few pods. A good example is kube-proxy.

      -

      DaemonSets are closely related to nodes. If a node becomes faulty, the DaemonSet will not create the same pods on other nodes.

      -
      Figure 3 DaemonSet
      +

      DaemonSet

      A DaemonSet runs a pod on each node in a cluster and ensures that there is only one pod. This works well for certain system-level applications, such as log collection and resource monitoring, since they must run on each node and need only a few pods. A good example is kube-proxy.

      +

      DaemonSets are closely related to nodes. If a node becomes faulty, the DaemonSet will not create the same pods on other nodes.

      +
      Figure 3 DaemonSet
      -

      Job and Cron Job

      Jobs and cron jobs allow you to run short lived, one-off tasks in batch. They ensure the task pods run to completion.

      -
      • A job is a resource object used by Kubernetes to control batch tasks. Jobs are different from long-term servo tasks (such as Deployments and StatefulSets). The former is started and terminated at specific times, while the latter runs unceasingly unless being terminated. The pods managed by a job will be automatically removed after successfully completing tasks based on user configurations.
      • A cron job runs a job periodically on a specified schedule. A cron job object is similar to a line of a crontab file in Linux.
      -

      This run-to-completion feature of jobs is especially suitable for one-off tasks, such as continuous integration (CI).

      +

      Job and Cron Job

      Jobs and cron jobs allow you to run short lived, one-off tasks in batch. They ensure the task pods run to completion.

      +
      • A job is a resource object used by Kubernetes to control batch tasks. Jobs are different from long-term servo tasks (such as Deployments and StatefulSets). The former is started and terminated at specific times, while the latter runs unceasingly unless being terminated. The pods managed by a job will be automatically removed after successfully completing tasks based on user configurations.
      • A cron job runs a job periodically on a specified schedule. A cron job object is similar to a line of a crontab file in Linux.
      +

      This run-to-completion feature of jobs is especially suitable for one-off tasks, such as continuous integration (CI).

      -

      Workload Lifecycle

      -
      Table 1 Status description

      Status

      +

      Workload Lifecycle

      +
      - - - - - - - - - - - - - - - - - @@ -83,7 +83,7 @@
      diff --git a/docs/cce/umn/cce_10_0007.html b/docs/cce/umn/cce_10_0007.html new file mode 100644 index 00000000..3cc1f063 --- /dev/null +++ b/docs/cce/umn/cce_10_0007.html @@ -0,0 +1,129 @@ + + +

      Managing Workloads and Jobs

      +

      Scenario

      After a workload is created, you can upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file. +
      Table 1 Status description

      Status

      Description

      +

      Description

      Running

      +

      Running

      All pods are running.

      +

      All pods are running.

      Unready

      +

      Unready

      A container is abnormal, the number of pods is 0, or the workload is in pending state.

      +

      A container is abnormal, the number of pods is 0, or the workload is in pending state.

      Upgrading/Rolling back

      +

      Upgrading/Rolling back

      The workload is being upgraded or rolled back.

      +

      The workload is being upgraded or rolled back.

      Available

      +

      Available

      For a multi-pod Deployment, some pods are abnormal but at least one pod is available.

      +

      For a multi-pod Deployment, some pods are abnormal but at least one pod is available.

      Completed

      +

      Completed

      The task is successfully executed. This status is available only for common tasks.

      +

      The task is successfully executed. This status is available only for common tasks.

      Stopped

      +

      Stopped

      The workload is stopped and the number of pods changes to 0. This status is available for workloads earlier than v1.13.

      +

      The workload is stopped and the number of pods changes to 0. This status is available for workloads earlier than v1.13.

      Deleting

      +

      Deleting

      The workload is being deleted.

      +

      The workload is being deleted.

      Pausing

      +

      Pausing

      The workload is being paused.

      +

      The workload is being paused.

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 1 Workload/Job management

      Operation

      +

      Description

      +

      Monitor

      +

      You can view the CPU and memory usage of workloads and pods on the CCE console.

      +

      View Log

      +

      You can view the logs of workloads.

      +

      Upgrade

      +

      You can replace images or image tags to quickly upgrade Deployments, StatefulSets, and DaemonSets without interrupting services.

      +

      Edit YAML

      +

      You can modify and download the YAML files of Deployments, StatefulSets, DaemonSets, and pods on the CCE console. YAML files of jobs and cron jobs can only be viewed, copied, and downloaded.

      +

      Roll Back

      +

      Only Deployments can be rolled back.

      +

      Redeploy

      +

      You can redeploy a workload. After the workload is redeployed, all pods in the workload will be restarted.

      +

      Enabling/Disabling the Upgrade

      +

      Only Deployments support this operation.

      +

      Manage Label

      +

      Labels are key-value pairs and can be attached to workloads for affinity and anti-affinity scheduling. Jobs and Cron Jobs do not support this operation.

      +

      Delete

      +

      You can delete a workload or job that is no longer needed. Deleted workloads or jobs cannot be recovered.

      +

      View Events

      +

      You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time.

      +

      Stop/Start

      +

      You can only start or stop a cron job.

      +
      +
      +
      + +

      Monitoring a Workload

      You can view the CPU and memory usage of Deployments and pods on the CCE console to determine the resource specifications you may need. This section uses a Deployment as an example to describe how to monitor a workload.

      +
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and click Monitor of the target workload. On the page that is displayed, you can view CPU usage and memory usage of the workload.
      3. Click the workload name. On the Pods tab page, click the Monitor of the target pod to view its CPU and memory usage.
      +
      +

      Viewing Logs

      You can view logs of Deployments, StatefulSets, DaemonSets, and jobs. This section uses a Deployment as an example to describe how to view logs.

      +
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and click the View Log of the target workload.

        On the displayed View Log window, you can view logs by time.

        +

      +
      +

      Upgrading a Workload

      You quickly upgrade Deployments, StatefulSets, and DaemonSets on the CCE console.

      +

      This section uses a Deployment as an example to describe how to upgrade a workload.

      +

      Before replacing an image or image version, upload the new image to the SWR service.

      +
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and click Upgrade of the target workload.

        • Workloads cannot be upgraded in batches.
        • Before performing an in-place StatefulSet upgrade, you must manually delete old pods. Otherwise, the upgrade status is always displayed as Upgrading.
        +
        +

      3. Upgrade the workload based on service requirements. The method for setting parameter is the same as that for creating a workload.
      4. After the update is complete, click Upgrade Workload, manually confirm the YAML file, and submit the upgrade.
      +
      +

      Editing a YAML file

      You can modify and download the YAML files of Deployments, StatefulSets, DaemonSets, and pods on the CCE console. YAML files of jobs and cron jobs can only be viewed, copied, and downloaded. This section uses a Deployment as an example to describe how to edit the YAML file.

      +
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and choose More > Edit YAML in the Operation column of the target workload. In the dialog box that is displayed, modify the YAML file.
      3. Click Edit and then OK to save the changes.
      4. (Optional) In the Edit YAML window, click Download to download the YAML file.
      +
      +

      Rolling Back a Workload (Available Only for Deployments)

      CCE records the release history of all Deployments. You can roll back a Deployment to a specified version.

      +
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab, choose More > Roll Back in the Operation column of the target workload.
      3. Switch to the Change History tab page, click Roll Back to This Version of the target version, manually confirm the YAML file, and click OK.

        +

      +
      +

      Redeploying a Workload

      After you redeploy a workload, all pods in the workload will be restarted. This section uses Deployments as an example to illustrate how to redeploy a workload.

      +
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and choose More > Redeploy in the Operation column of the target workload.
      3. In the dialog box that is displayed, click Yes to redeploy the workload.
      +
      +

      Disabling/Enabling Upgrade (Available Only for Deployments)

      Only Deployments support this operation.

      +
      • After the upgrade is disabled, the upgrade command can be delivered but will not be applied to the pods.

        If you are performing a rolling upgrade, the rolling upgrade stops after the disabling upgrade command is delivered. In this case, the new and old pods co-exist.

        +
      • If a Deployment is being upgraded, it can be upgraded or rolled back. Its pods will inherit the latest updates of the Deployment. If they are inconsistent, the pods are upgraded automatically according to the latest information of the Deployment.
      +

      Deployments in the disable upgrade state cannot be rolled back.

      +
      +
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and choose More > Disable/Enable Upgrade in the Operation column of the workload.
      3. In the dialog box that is displayed, click Yes.
      +
      +

      Managing Labels

      Labels are key-value pairs and can be attached to workloads. Workload labels are often used for affinity and anti-affinity scheduling. You can add labels to multiple workloads or a specified workload.

      +

      You can manage the labels of Deployments, StatefulSets, and DaemonSets based on service requirements. This section uses Deployments as an example to describe how to manage labels.

      +

      In the following figure, three labels (release, env, and role) are defined for workload APP 1, APP 2, and APP 3. The values of these labels vary with workload.

      +
      • Label of APP 1: [release:alpha;env:development;role:frontend]
      • Label of APP 2: [release:beta;env:testing;role:frontend]
      • Label of APP 3: [release:alpha;env:production;role:backend]
      +

      If you set key to role and value to frontend when using workload scheduling or another function, APP 1 and APP 2 will be selected.

      +
      Figure 1 Label example
      +
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and choose More > Manage Label in the Operation column of the target workload.
      3. Click Add, enter a key and a value, and click OK.

        A key-value pair must contain 1 to 63 characters starting and ending with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed.

        +
        +

      +
      +

      Deleting a Workload/Job

      You can delete a workload or job that is no longer needed. Deleted workloads or jobs cannot be recovered. Exercise caution when you perform this operation. This section uses a Deployment as an example to describe how to delete a workload.

      +
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. In the same row as the workload you will delete, choose Operation > More > Delete.

        Read the system prompts carefully. A workload cannot be recovered after it is deleted. Exercise caution when performing this operation.

        +

      3. Click Yes.

        • If the node where the pod is located is unavailable or shut down and the workload cannot be deleted, you can forcibly delete the pod from the pod list on the workload details page.
        • Ensure that the storage volumes to be deleted are not used by other workloads. If these volumes are imported or have snapshots, you can only unbind them.
        +
        +

      +
      +

      Viewing Events

      This section uses Deployments as an example to illustrate how to view events of a workload. To view the event of a job or cron jon, click View Event in the Operation column of the target workload.

      +
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. On the Deployments tab page, click the target workload. In the Pods tab page, click the View Events to view the event name, event type, number of occurrences, Kubernetes event, first occurrence time, and last occurrence time.

        Event data will be retained for one hour and then automatically deleted.

        +
        +

      +
      + +
      + +
      + diff --git a/docs/cce/umn/cce_10_0009.html b/docs/cce/umn/cce_10_0009.html new file mode 100644 index 00000000..9772fa48 --- /dev/null +++ b/docs/cce/umn/cce_10_0009.html @@ -0,0 +1,37 @@ + + +

      Using a Third-Party Image

      +

      Scenario

      CCE allows you to create workloads using images pulled from third-party image repositories.

      +

      Generally, a third-party image repository can be accessed only after authentication (using your account and password). CCE uses the secret-based authentication to pull images. Therefore, you need to create a secret for an image repository before pulling images from the repository.

      +
      +

      Prerequisites

      The node where the workload is running is accessible from public networks.

      +
      +

      Using the Console

      1. Create a secret for accessing a third-party image repository.

        Click the cluster name and access the cluster console. In the navigation pane, choose ConfigMaps and Secrets. On the Secrets tab page, click Create Secret in the upper right corner. Set Secret Type to kubernetes.io/dockerconfigjson. For details, see Creating a Secret.

        +

        Enter the user name and password used to access the third-party image repository.

        +

      2. When creating a workload, you can enter a private image path in the format of domainname/namespace/imagename:tag in Image Name and select the key created in 1.

        +

      3. Set other parameters and click Create Workload.
      +
      +

      Using kubectl

      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create a secret of the dockercfg type using kubectl.

        kubectl create secret docker-registry myregistrykey --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
        +

        In the preceding commands, myregistrykey indicates the secret name, and other parameters are described as follows:

        +
        • DOCKER_REGISTRY_SERVER: address of a third-party image repository, for example, www.3rdregistry.com or 10.10.10.10:443
        • DOCKER_USER: account used for logging in to a third-party image repository
        • DOCKER_PASSWORD: password used for logging in to a third-party image repository
        • DOCKER_EMAIL: email of a third-party image repository
        +

      3. Use a third-party image to create a workload.

        A dockecfg secret is used for authentication when you obtain a private image. The following is an example of using the myregistrykey for authentication.
        apiVersion: v1
        +kind: Pod
        +metadata:
        +  name: foo
        +  namespace: default
        +spec:
        +  containers:
        +    - name: foo
        +      image: www.3rdregistry.com/janedoe/awesomeapp:v1
        +  imagePullSecrets:
        +    - name: myregistrykey              #Use the created secret.
        +
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0010.html b/docs/cce/umn/cce_10_0010.html new file mode 100644 index 00000000..6a672493 --- /dev/null +++ b/docs/cce/umn/cce_10_0010.html @@ -0,0 +1,38 @@ + + +

      Overview

      +

      You can learn about a cluster network from the following two aspects:

      +
      • What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are running on the nodes. Nodes and containers need to communicate with each other. For details about the cluster network types and their functions, see Cluster Network Structure.
      • How is pod access implemented in a cluster? Accessing a pod or container is a process of accessing services of a user. Kubernetes provides Service and Ingress to address pod access issues. This section summarizes common network access scenarios. You can select the proper scenario based on site requirements. For details about the network access scenarios, see Access Scenarios.
      +

      Cluster Network Structure

      All nodes in the cluster are located in a VPC and use the VPC network. The container network is managed by dedicated network add-ons.

      +

      +
      • Node Network

        A node network assigns IP addresses to hosts (nodes in the figure above) in a cluster. You need to select a VPC subnet as the node network of the CCE cluster. The number of available IP addresses in a subnet determines the maximum number of nodes (including master nodes and worker nodes) that can be created in a cluster. This quantity is also affected by the container network. For details, see the container network model.

        +
      • Container Network

        A container network assigns IP addresses to containers in a cluster. CCE inherits the IP-Per-Pod-Per-Network network model of Kubernetes. That is, each pod has an independent IP address on a network plane and all containers in a pod share the same network namespace. All pods in a cluster exist in a directly connected flat network. They can access each other through their IP addresses without using NAT. Kubernetes only provides a network mechanism for pods, but does not directly configure pod networks. The configuration of pod networks is implemented by specific container network add-ons. The container network add-ons are responsible for configuring networks for pods and managing container IP addresses.

        +

        Currently, CCE supports the following container network models:

        +
        • Container tunnel network: The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch.
        • VPC network: The VPC network uses VPC routing to integrate with the underlying network. This network model is applicable to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network. Each node is assigned a CIDR block of a fixed size. This networking model is free from tunnel encapsulation overhead and outperforms the container tunnel network model. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in a cluster can be directly accessed from outside the cluster.
        • Developed by CCE, Cloud Native Network 2.0 deeply integrates Elastic Network Interfaces (ENIs) and Sub Network Interfaces (sub-ENIs) of VPC. Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and elastic IPs (EIPs) are bound to deliver high performance.
        +

        The performance, networking scale, and application scenarios of a container network vary according to the container network model. For details about the functions and features of different container network models, see Overview.

        +
      • Service Network

        Service is also a Kubernetes object. Each Service has a fixed IP address. When creating a cluster on CCE, you can specify the Service CIDR block. The Service CIDR block cannot overlap with the node or container CIDR block. The Service CIDR block can be used only within a cluster.

        +
      +
      +

      Service

      A Service is used for pod access. With a fixed IP address, a Service forwards access traffic to pods and performs load balancing for these pods.

      +
      Figure 1 Accessing pods through a Service
      +

      You can configure the following types of Services:

      +
      • ClusterIP: used to make the Service only reachable from within a cluster.
      • NodePort: used for access from outside a cluster. A NodePort Service is accessed through the port on the node.
      • LoadBalancer: used for access from outside a cluster. It is an extension of NodePort, to which a load balancer routes, and external systems only need to access the load balancer.
      +

      For details about the Service, see Service Overview.

      +
      +

      Ingress

      Services forward requests using layer-4 TCP and UDP protocols. Ingresses forward requests using layer-7 HTTP and HTTPS protocols. Domain names and paths can be used to achieve finer granularities.

      +
      Figure 2 Ingress and Service
      +

      For details about the ingress, see Ingress Overview.

      +
      +

      Access Scenarios

      Workload access scenarios can be categorized as follows:

      +
      • Intra-cluster access: A ClusterIP Service is used for workloads in the same cluster to access each other.
      • Access from outside a cluster: A Service (NodePort or LoadBalancer type) or an ingress is recommended for a workload outside a cluster to access workloads in the cluster.
        • Access through the internet requires an EIP to be bound the node or load balancer.
        • Access through an intranet uses only the intranet IP address of the node or load balancer. If workloads are located in different VPCs, a peering connection is required to enable communication between different VPCs.
        +
      • External access initiated by a workload:
        • Accessing an intranet: The workload accesses the intranet address, but the implementation method varies depending on container network models. Ensure that the peer security group allows the access requests from the container CIDR block.
        • Accessing a public network: You need to assign an EIP to the node where the workload runs (when the VPC network or tunnel network model is used), bind an EIP to the pod IP address (when the Cloud Native Network 2.0 model is used), or configure SNAT rules through the NAT gateway. For details, see Accessing Public Networks from a Container.
        +
      +
      Figure 3 Network access diagram
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0011.html b/docs/cce/umn/cce_10_0011.html new file mode 100644 index 00000000..1a08ed51 --- /dev/null +++ b/docs/cce/umn/cce_10_0011.html @@ -0,0 +1,118 @@ + + +

      Intra-Cluster Access (ClusterIP)

      +

      Scenario

      ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.

      +

      The cluster-internal domain name format is <Service name>.<Namespace of the workload>.svc.cluster.local:<Port>, for example, nginx.default.svc.cluster.local:80.

      +

      Figure 1 shows the mapping relationships between access channels, container ports, and access ports.

      +
      Figure 1 Intra-cluster access (ClusterIP)
      +
      +

      Creating a ClusterIP Service

      1. Log in to the CCE console and access the cluster console.
      2. Choose Networking in the navigation pane and click Create Service in the upper right corner.
      3. Set intra-cluster access parameters.

        • Service Name: Service name, which can be the same as the workload name.
        • Service Type: Select ClusterIP.
        • Namespace: Namespace to which the workload belongs.
        • Selector: Add a label and click Add. A Service selects a pod based on the added label. You can also click Reference Workload Label to reference the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
        • Port Settings
          • Protocol: protocol used by the Service.
          • Service Port: port used by the Service. The port number ranges from 1 to 65535.
          • Container Port: port on which the workload listens. For example, Nginx uses port 80 by default.
          +
        +

      4. Click OK.
      +
      +

      Setting the Access Type Using kubectl

      You can run kubectl commands to set the access type (Service). This section uses a Nginx workload as an example to describe how to implement intra-cluster access using kubectl.

      +
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create and edit the nginx-deployment.yaml and nginx-clusterip-svc.yaml files.

        The file names are user-defined. nginx-deployment.yaml and nginx-clusterip-svc.yaml are merely example file names.

        +
        vi nginx-deployment.yaml
        apiVersion: apps/v1
        +kind: Deployment
        +metadata:
        +  name: nginx
        +spec:
        +  replicas: 1
        +  selector:
        +    matchLabels:
        +      app: nginx
        +  template:
        +    metadata:
        +      labels:
        +        app: nginx
        +    spec:
        +      containers:
        +      - image: nginx:latest
        +        name: nginx
        +      imagePullSecrets:
        +      - name: default-secret
        +
        +
        vi nginx-clusterip-svc.yaml
        apiVersion: v1
        +kind: Service
        +metadata:
        +  labels:
        +    app: nginx
        +  name: nginx-clusterip
        +spec:
        +  ports:
        +  - name: service0
        +    port: 8080                # Port for accessing a Service.
        +    protocol: TCP             # Protocol used for accessing a Service. The value can be TCP or UDP.
        +    targetPort: 80            # Port used by a Service to access the target container. This port is closely related to the applications running in a container. In this example, the Nginx image uses port 80 by default.
        +  selector:                   # Label selector. A Service selects a pod based on the label and forwards the requests for accessing the Service to the pod. In this example, select the pod with the app:nginx label.
        +    app: nginx
        +  type: ClusterIP             # Type of a Service. ClusterIP indicates that a Service is only reachable from within the cluster.
        +
        +

      3. Create a workload.

        kubectl create -f nginx-deployment.yaml

        +

        If information similar to the following is displayed, the workload has been created.

        +
        deployment "nginx" created
        +

        kubectl get po

        +

        If information similar to the following is displayed, the workload is running.

        +
        NAME                     READY     STATUS             RESTARTS   AGE
        +nginx-2601814895-znhbr   1/1       Running            0          15s
        +

      4. Create a Service.

        kubectl create -f nginx-clusterip-svc.yaml

        +

        If information similar to the following is displayed, the Service is being created.

        +
        service "nginx-clusterip" created
        +

        kubectl get svc

        +

        If information similar to the following is displayed, the Service has been created, and a cluster-internal IP address has been assigned to the Service.

        +
        # kubectl get svc
        +NAME              TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
        +kubernetes        ClusterIP   10.247.0.1     <none>        443/TCP    4d6h
        +nginx-clusterip   ClusterIP   10.247.74.52   <none>        8080/TCP   14m
        +

      5. Access a Service.

        A Service can be accessed from containers or nodes in a cluster.

        +

        Create a pod, access the pod, and run the curl command to access IP address:Port or the domain name of the Service, as shown in the following figure.

        +

        The domain name suffix can be omitted. In the same namespace, you can directly use nginx-clusterip:8080 for access. In other namespaces, you can use nginx-clusterip.default:8080 for access.

        +
        # kubectl run -i --tty --image nginx:alpine test --rm /bin/sh
        +If you do not see a command prompt, try pressing Enter.
        +/ # curl 10.247.74.52:8080
        +<!DOCTYPE html>
        +<html>
        +<head>
        +<title>Welcome to nginx!</title>
        +<style>
        +    body {
        +        width: 35em;
        +        margin: 0 auto;
        +        font-family: Tahoma, Verdana, Arial, sans-serif;
        +    }
        +</style>
        +</head>
        +<body>
        +<h1>Welcome to nginx!</h1>
        +<p>If you see this page, the nginx web server is successfully installed and
        +working. Further configuration is required.</p>
        +
        +<p>For online documentation and support please refer to
        +<a href="http://nginx.org/">nginx.org</a>.<br/>
        +Commercial support is available at
        +<a href="http://nginx.com/">nginx.com</a>.</p>
        +
        +<p><em>Thank you for using nginx.</em></p>
        +</body>
        +</html>
        +/ # curl nginx-clusterip.default.svc.cluster.local:8080
        +...
        +<h1>Welcome to nginx!</h1>
        +...
        +/ # curl nginx-clusterip.default:8080
        +...
        +<h1>Welcome to nginx!</h1>
        +...
        +/ # curl nginx-clusterip:8080
        +...
        +<h1>Welcome to nginx!</h1>
        +...
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0012.html b/docs/cce/umn/cce_10_0012.html new file mode 100644 index 00000000..222dec7e --- /dev/null +++ b/docs/cce/umn/cce_10_0012.html @@ -0,0 +1,236 @@ + + +

      Creating a Node Pool

      +

      Scenario

      This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.

      +
      +

      Notes and Constraints

      • The autoscaler add-on needs to be installed for node auto scaling. For details about the add-on installation and parameter configuration, see autoscaler.
      +
      +

      Procedure

      1. Log in to the CCE console.
      2. Click the cluster name and access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
      3. In the upper right corner of the page, click Create Node Pool.

        Basic Settings

        + +
        + + + + + + + + + + + + + +
        Table 1 Basic settings

        Parameter

        +

        Description

        +

        Node Pool Name

        +

        Name of a node pool. By default, the name is in the format of Cluster name-nodepool-Random number. If you do not want to use the default name format, you can customize the name.

        +

        Nodes

        +

        Number of nodes to be created in this node pool.

        +

        Auto Scaling

        +

        By default, auto scaling is disabled.

        +

        Install the autoscaler add-on to enable auto scaling.

        +

        After you enable auto scaling by switching on , nodes in the node pool will be automatically created or deleted based on cluster loads.

        +
        • Maximum Nodes and Minimum Nodes: You can set the maximum and minimum number of nodes to ensure that the number of nodes to be scaled is within a proper range.
        • Priority: Set this parameter based on service requirements. A larger value indicates a higher priority. For example, if this parameter is set to 1 and 4 respectively for node pools A and B, B has a higher priority than A. If the priorities of multiple node pools are set to the same value, for example, 2, the node pools are not prioritized and the system performs scaling based on the minimum resource waste principle.
          NOTE:

          CCE selects a node pool for auto scaling based on the following policies:

          +
          1. CCE uses algorithms to determine whether a node pool meets the conditions to allow scheduling of a pod in pending state, including whether the node resources are greater than requested by the pod, and whether the nodeSelect, nodeAffinity, and taints meet the conditions. In addition, the node pools that fail to be scaled (due to insufficient resources or other reasons) and are still in the 15-minute cool-down interval are filtered.
          2. If multiple node pools meet the scaling requirements, the system checks the priority of each node pool and selects the node pool with the highest priority for scaling. The value ranges from 0 to 100 and the default priority is 0. The value 100 indicates the highest priority, and the value 0 indicates the lowest priority.
          3. If multiple node pools have the same priority or no priority is configured for them, the system selects the node pool that will consume the least resources based on the configured VM specification.
          4. If the VM specifications of multiple node pools are the same but the node pools are deployed in different AZs, the system randomly selects a node pool to trigger scaling.
          +
          +
        • Cooldown Period: Requied. The unit is minute. This field indicates the period during which the nodes added in the current node pool cannot be scaled in.

          Scale-in cooling intervals can be configured in the node pool settings and the autoscaler add-on settings.

          +

          Scale-in cooling interval configured in a node pool

          +

          This interval indicates the period during which nodes added to the current node pool after a scale-out operation cannot be deleted. This interval takes effect at the node pool level.

          +

          Scale-in cooling interval configured in the autoscaler add-on

          +

          The interval after a scale-out indicates the period during which the entire cluster cannot be scaled in after the autoscaler add-on triggers scale-out (due to the unschedulable pods, metrics, and scaling policies). This interval takes effect at the cluster level.

          +

          The interval after a node is deleted indicates the period during which the cluster cannot be scaled in after the autoscaler add-on triggers scale-in. This interval takes effect at the cluster level.

          +

          The interval after a failed scale-in indicates the period during which the cluster cannot be scaled in after the autoscaler add-on triggers scale-in. This interval takes effect at the cluster level.

          +
        +
        NOTE:

        You are advised not to store important data on nodes in a node pool because after auto scaling, data cannot be restored as nodes may be deleted.

        +
        +
        +
        +

        Compute Settings

        +
        You can configure the specifications and OS of a cloud server, on which your containerized applications run. +
        + + + + + + + + + + + + + + + + + + + + + + +
        Table 2 Configuration parameters

        Parameter

        +

        Description

        +

        AZ

        +

        AZ where the node is located. Nodes in a cluster can be created in different AZs for higher reliability. The value cannot be changed after the node is created.

        +

        You are advised to select Random to deploy your node in a random AZ based on the selected node flavor.

        +

        An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network. To enhance workload availability, create nodes in different AZs.

        +

        Node Type

        +

        For a CCE cluster, ECS and BMS are supported.

        +

        CCE Turbo clusters support ECSs of the VM and physical types.

        +

        Container Engine

        +

        CCE clusters support Docker. Starting from CCE 1.23, containerd is supported.

        +

        For a CCE Turbo cluster, both Docker and containerd are supported. For details, see Mapping between Node OSs and Container Engines.

        +

        Specifications

        +

        Select node specifications that best fit your business needs.

        +

        OS

        +

        Select an OS type. Different types of nodes support different OSs. For details, see Supported Node Specifications.

        +

        Public image: Select an OS for the node.

        +

        Private image: You can use private images.

        +

        Login Mode

        +
        • Key Pair

          Select the key pair used to log in to the node. You can select a shared key.

          +

          A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create Key Pair.

          +
        +
        +
        +
        +

        Storage Settings

        +
        Configure storage resources on a node for the containers running on it. Set the disk size according to site requirements. +
        + + + + + + + + + + +
        Table 3 Configuration parameters

        Parameter

        +

        Description

        +

        System Disk

        +

        System disk used by the node OS. The value ranges from 40 GB to 1,024 GB. The default value is 50 GB.

        +
        Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function. This function is available only in certain regions.
        • Encryption is not selected by default.
        • After you select Encryption, you can select an existing key in the displayed dialog box. If no key is available, click View Key List to create a key. After the key is created, click the refresh icon.
        +
        +

        Data Disk

        +

        Data disk used by the container runtime and kubelet on the node.

        +

        At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

        +

        Click Expand to set the following parameters:

        +
        • Allocate Disk Space: Select this option to define the disk space occupied by the container runtime to store the working directories, container image data, and image metadata. For details about how to allocate data disk space, see Data Disk Space Allocation.
        • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function. This function is available only in certain regions.
          • Encryption is not selected by default.
          • After you select Encryption, you can select an existing key in the displayed dialog box. If no key is available, click View Key List to create a key. After the key is created, click the refresh icon.
          +
        +

        Adding Multiple Data Disks

        +

        A maximum of four data disks can be added. By default, raw disks are created without any processing. You can also click Expand and select any of the following options:

        +
        • Default: By default, a raw disk is created without any processing.
        • Mount Disk: The data disk is attached to a specified directory.
        +

        Local Disk Description

        +

        If the node flavor is disk-intensive or ultra-high I/O, one data disk can be a local disk.

        +

        Local disks may break down and do not ensure data reliability. It is recommended that you store service data in EVS disks, which are more reliable than local disks.

        +
        +
        +
        +

        Network Settings

        +
        Configure networking resources to allow node and containerized application access. +
        + + + + + + + + + + + + + +
        Table 4 Configuration parameters

        Parameter

        +

        Description

        +

        Node Subnet

        +

        The node subnet selected during cluster creation is used by default. You can choose another subnet instead.

        +

        Node IP Address

        +

        Random allocation is supported.

        +

        Associate Security Group

        +

        Security group used by the nodes created in the node pool. A maximum of 5 security groups can be selected.

        +

        When a cluster is created, a node security group named {Cluster name}-cce-node-{Random ID} is created and used by default.

        +

        Traffic needs to pass through certain ports in the node security group to ensure node communications. Ensure that you have enabled these ports if you select another security group.

        +
        +
        +
        +

        Advanced Settings

        +
        Configure advanced node capabilities such as labels, taints, and startup command. +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 5 Advanced configuration parameters

        Parameter

        +

        Description

        +

        Kubernetes Label

        +

        Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

        +

        Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

        +

        Resource Tag

        +

        You can add resource tags to classify resources.

        +

        You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use these tags to improve tagging and resource migration efficiency.

        +

        CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

        +

        Taint

        +
        This parameter is left blank by default. You can add taints to set anti-affinity for the node. A maximum of 10 taints are allowed for each node. Each taint contains the following parameters:
        • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
        • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
        • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
        +
        +

        For details, see Managing Node Taints.

        +
        NOTE:

        For a cluster of v1.19 or earlier, the workload may have been scheduled to a node before the taint is added. To avoid such a situation, select a cluster of v1.19 or later.

        +
        +

        Max. Pods

        +

        Maximum number of pods that can run on the node, including the default system pods.

        +

        This limit prevents the node from being overloaded with pods.

        +

        This number is also decided by other factors. For details, see Maximum Number of Pods That Can Be Created on a Node.

        +

        ECS Group

        +

        An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.

        +

        Anti-affinity: ECSs in an ECS group are deployed on different physical hosts to improve service reliability.

        +

        Select an existing ECS group, or click Add ECS Group to create one. After the ECS group is created, click the refresh button.

        +

        Pre-installation Command

        +

        Enter commands. A maximum of 1,000 characters are allowed.

        +

        The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

        +

        Post-installation Command

        +

        Enter commands. A maximum of 1,000 characters are allowed.

        +

        The script will be executed after Kubernetes software is installed and will not affect the installation.

        +

        Agency

        +

        An agency is created by the account administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources.

        +

        If no agency is available, click Create Agency on the right to create one.

        +
        +
        +
        +

      4. Click Next: Confirm.
      5. Click Submit.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0014.html b/docs/cce/umn/cce_10_0014.html new file mode 100644 index 00000000..779f5bb1 --- /dev/null +++ b/docs/cce/umn/cce_10_0014.html @@ -0,0 +1,788 @@ + + +

      LoadBalancer

      +

      Scenario

      A workload can be accessed from public networks through a load balancer, which is more secure and reliable than EIP.

      +

      The LoadBalancer access address is in the format of <IP address of public network load balancer>:<access port>, for example, 10.117.117.117:80.

      +

      In this access mode, requests are transmitted through an ELB load balancer to a node and then forwarded to the destination pod through the Service.

      +
      Figure 1 LoadBalancer
      +

      When CCE Turbo clusters and dedicated load balancers are used, passthrough networking is supported to reduce service latency and ensure zero performance loss.

      +

      External access requests are directly forwarded from a load balancer to pods. Internal access requests can be forwarded to a pod through a Service.

      +
      Figure 2 Passthrough networking
      +
      +

      Notes and Constraints

      • LoadBalancer Services allow workloads to be accessed from public networks through ELB. This access mode has the following restrictions:
        • It is recommended that automatically created load balancers not be used by other resources. Otherwise, these load balancers cannot be completely deleted, causing residual resources.
        • Do not change the listener name for the load balancer in clusters of v1.15 and earlier. Otherwise, the load balancer cannot be accessed.
        +
      • After a Service is created, if the affinity setting is switched from the cluster level to the node level, the connection tracing table will not be cleared. You are advised not to modify the Service affinity setting after the Service is created. If you need to modify it, create a Service again.
      • If the service affinity is set to the node level (that is, externalTrafficPolicy is set to Local), the cluster may fail to access the Service by using the ELB address. For details, see Why a Cluster Fails to Access Services by Using the ELB Address.
      • CCE Turbo clusters support only cluster-level service affinity.
      • Dedicated ELB load balancers can be used only in clusters of v1.17 and later.
      • Dedicated load balancers must be the network type (TCP/UDP) supporting private networks (with a private IP). If the Service needs to support HTTP, the specifications of dedicated load balancers must use HTTP/HTTPS (application load balancing) in addition to TCP/UDP (network load balancing).
      • If you create a LoadBalancer Service on the CCE console, a random node port is automatically generated. If you use kubectl to create a LoadBalancer Service, a random node port is generated unless you specify one.
      • In a CCE cluster, if the cluster-level affinity is configured for a LoadBalancer Service, requests are distributed to the node ports of each node using SNAT when entering the cluster. The number of node ports cannot exceed the number of available node ports on the node. If the Service affinity is at the node level (local), there is no such constraint. In a CCE Turbo cluster, this constraint applies to shared ELB load balancers, but not dedicated ones. You are advised to use dedicated ELB load balancers in CCE Turbo clusters.
      • When the cluster service forwarding (proxy) mode is IPVS, the node IP cannot be configured as the external IP of the Service. Otherwise, the node is unavailable.
      • In a cluster using the IPVS proxy mode, if the ingress and Service use the same ELB load balancer, the ingress cannot be accessed from the nodes and containers in the cluster because kube-proxy mounts the LoadBalancer Service address to the ipvs-0 bridge. This bridge intercepts the traffic of the load balancer connected to the ingress. You are advised to use different ELB load balancers for the ingress and Service.
      +
      +

      Creating a LoadBalancer Service

      1. Log in to the CCE console and click the cluster name to access the cluster.
      2. Choose Networking in the navigation pane and click Create Service in the upper right corner.
      3. Set parameters.

        • Service Name: Specify a Service name, which can be the same as the workload name.
        • Access Type: Select LoadBalancer.
        • Namespace: Namespace to which the workload belongs.
        • Service Affinity: For details, see externalTrafficPolicy (Service Affinity).
          • Cluster level: The IP addresses and access ports of all nodes in a cluster can be used to access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
          • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
          +
        • Selector: Add a label and click Add. A Service selects a pod based on the added label. You can also click Reference Workload Label to reference the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
        • Load Balancer

          Select the load balancer to interconnect. Only load balancers in the same VPC as the cluster are supported. If no load balancer is available, click Create Load Balancer to create one on the ELB console.

          +

          You can click Edit and configure load balancer parameters in the Load Balancer dialog box.

          +
          • Distribution Policy: Three algorithms are available: weighted round robin, weighted least connections algorithm, or source IP hash.
            • Weighted round robin: Requests are forwarded to different servers based on their weights, which indicate server processing performance. Backend servers with higher weights receive proportionately more requests, whereas equal-weighted servers receive the same number of requests. This algorithm is often used for short connections, such as HTTP services.
            • Weighted least connections: In addition to the weight assigned to each server, the number of connections processed by each backend server is also considered. Requests are forwarded to the server with the lowest connections-to-weight ratio. Building on least connections, the weighted least connections algorithm assigns a weight to each server based on their processing capability. This algorithm is often used for persistent connections, such as database connections.
            • Source IP hash: The source IP address of each request is calculated using the hash algorithm to obtain a unique hash key, and all backend servers are numbered. The generated key allocates the client to a particular server. This enables requests from different clients to be distributed in load balancing mode and ensures that requests from the same client are forwarded to the same server. This algorithm applies to TCP connections without cookies.
            +
            +
          • Type: This function is disabled by default. You can select Source IP address. Listeners ensure session stickiness based on IP addresses. Requests from the same IP address will be forwarded to the same backend server.
          • Health Check: This function is disabled by default. The health check is for the load balancer. When TCP is selected during the port settings, you can choose either TCP or HTTP. When UDP is selected during the port settings, only UDP is supported.. By default, the service port (Node Port and container port of the Service) is used for health check. You can also specify another port for health check. After the port is specified, a service port named cce-healthz will be added for the Service.
          +
        • Port Settings
          • Protocol: protocol used by the Service.
          • Service Port: port used by the Service. The port number ranges from 1 to 65535.
          • Container Port: port on which the workload listens. For example, Nginx uses port 80 by default.
          +
        • Annotation: The LoadBalancer Service has some advanced CCE functions, which are implemented by annotations. For details, see Service Annotations. When you use kubectl to create a container, annotations will be used. For details, see Using kubectl to Create a Service (Using an Existing Load Balancer) and Using kubectl to Create a Service (Automatically Creating a Load Balancer).
        +

      4. Click OK.
      +
      +

      Using kubectl to Create a Service (Using an Existing Load Balancer)

      You can set the access type when creating a workload using kubectl. This section uses an Nginx workload as an example to describe how to add a LoadBalancer Service using kubectl.

      +
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create and edit the nginx-deployment.yaml and nginx-elb-svc.yaml files.

        The file names are user-defined. nginx-deployment.yaml and nginx-elb-svc.yaml are merely example file names.

        +

        vi nginx-deployment.yaml

        +
        apiVersion: apps/v1
        +kind: Deployment
        +metadata:
        +  name: nginx
        +spec:
        +  replicas: 1
        +  selector:
        +    matchLabels:
        +      app: nginx
        +  template:
        +    metadata:
        +      labels:
        +        app: nginx
        +    spec:
        +      containers:
        +      - image: nginx 
        +        name: nginx
        +      imagePullSecrets:
        +      - name: default-secret
        +

        +

        vi nginx-elb-svc.yaml

        +

        Before enabling sticky session, ensure that the following conditions are met:

        +
        • The workload protocol is TCP.
        • Anti-affinity has been configured between pods of the workload. That is, all pods of the workload are deployed on different nodes. For details, see Scheduling Policy (Affinity/Anti-affinity).
        +
        +
        apiVersion: v1 
        +kind: Service 
        +metadata: 
        +  annotations:
        +    kubernetes.io/elb.id: 5083f225-9bf8-48fa-9c8b-67bd9693c4c0   # ELB ID. Replace it with the actual value.
        +    kubernetes.io/elb.class: union                   # Load balancer type
        +  name: nginx 
        +spec: 
        +  ports: 
        +  - name: service0 
        +    port: 80     # Port for accessing the Service, which is also the listener port on the load balancer.
        +    protocol: TCP 
        +    targetPort: 80  # Port used by a Service to access the target container. This port is closely related to the applications running in a container. 
        +  selector: 
        +    app: nginx 
        +  type: LoadBalancer
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 1 Key parameters

        Parameter

        +

        Mandatory

        +

        Type

        +

        Description

        +

        kubernetes.io/elb.class

        +

        Yes

        +

        String

        +

        Select a proper load balancer type as required.

        +

        The value can be:

        +
        • union: shared load balancer
        • performance: dedicated load balancer, which can be used only in clusters of v1.17 and later.
        +

        kubernetes.io/elb.session-affinity-mode

        +

        No

        +

        String

        +

        Listeners ensure session stickiness based on IP addresses. Requests from the same IP address will be forwarded to the same backend server.

        +
        • Disabling sticky session: Do not set this parameter.
        • Enabling sticky session: Set this parameter to SOURCE_IP, indicating that the sticky session is based on the source IP address.
        +

        kubernetes.io/elb.session-affinity-option

        +

        No

        +

        Table 2 Object

        +

        This parameter specifies the sticky session timeout.

        +

        kubernetes.io/elb.id

        +

        Yes

        +

        String

        +

        This parameter indicates the ID of a load balancer. The value can contain 1 to 100 characters.

        +

        Mandatory when an existing load balancer is to be associated.

        +

        Obtaining the load balancer ID:

        +

        On the management console, click Service List, and choose Networking > Elastic Load Balance. Click the name of the target load balancer. On the Summary tab page, find and copy the ID.

        +
        NOTE:

        The system preferentially interconnects with the load balancer based on the kubernetes.io/elb.id field. If this field is not specified, the spec.loadBalancerIP field is used (optional and available only in 1.23 and earlier versions).

        +

        Do not use the spec.loadBalancerIP field to connect to the load balancer. This field will be discarded by Kubernetes. For details, see Deprecation.

        +
        +

        kubernetes.io/elb.subnet-id

        +

        -

        +

        String

        +

        This parameter indicates the ID of the subnet where the cluster is located. The value can contain 1 to 100 characters.

        +
        • Mandatory when a cluster of v1.11.7-r0 or earlier is to be automatically created.
        • Optional for clusters later than v1.11.7-r0.
        +

        kubernetes.io/elb.lb-algorithm

        +

        No

        +

        String

        +

        This parameter indicates the load balancing algorithm of the backend server group. The default value is ROUND_ROBIN.

        +

        Options:

        +
        • ROUND_ROBIN: weighted round robin algorithm
        • LEAST_CONNECTIONS: weighted least connections algorithm
        • SOURCE_IP: source IP hash algorithm
        +

        When the value is SOURCE_IP, the weights of backend servers in the server group are invalid.

        +

        kubernetes.io/elb.health-check-flag

        +

        No

        +

        String

        +

        Whether to enable the ELB health check.

        +
        • Enabling health check: Leave blank this parameter or set it to on.
        • Disabling health check: Set this parameter to off.
        +

        If this parameter is enabled, the kubernetes.io/elb.health-check-option field must also be specified at the same time.

        +

        kubernetes.io/elb.health-check-option

        +

        No

        +

        Table 3 Object

        +

        ELB health check configuration items.

        +
        +
        + +
        + + + + + + + + + + + +
        Table 2 Data structure of the elb.session-affinity-option field

        Parameter

        +

        Mandatory

        +

        Type

        +

        Description

        +

        persistence_timeout

        +

        Yes

        +

        String

        +

        Sticky session timeout, in minutes. This parameter is valid only when elb.session-affinity-mode is set to SOURCE_IP.

        +

        Value range: 1 to 60. Default value: 60

        +
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 3 Data structure description of the elb.health-check-option field

        Parameter

        +

        Mandatory

        +

        Type

        +

        Description

        +

        delay

        +

        No

        +

        String

        +

        Initial waiting time (in seconds) for starting the health check.

        +

        Value range: 1 to 50. Default value: 5

        +

        timeout

        +

        No

        +

        String

        +

        Health check timeout, in seconds.

        +

        Value range: 1 to 50. Default value: 10

        +

        max_retries

        +

        No

        +

        String

        +

        Maximum number of health check retries.

        +

        Value range: 1 to 10. Default value: 3

        +

        protocol

        +

        No

        +

        String

        +

        Health check protocol.

        +

        Default value: protocol of the associated Service

        +

        Value options: TCP, UDP, or HTTP

        +

        path

        +

        No

        +

        String

        +

        Health check URL. This parameter needs to be configured when the protocol is HTTP.

        +

        Default value: /

        +

        The value can contain 1 to 10,000 characters.

        +
        +
        +

      3. Create a workload.

        kubectl create -f nginx-deployment.yaml

        +

        If information similar to the following is displayed, the workload has been created.

        +
        deployment/nginx created
        +

        kubectl get pod

        +

        If information similar to the following is displayed, the workload is running.

        +
        NAME                     READY     STATUS             RESTARTS   AGE
        +nginx-2601814895-c1xhw   1/1       Running            0          6s
        +

      4. Create a Service.

        kubectl create -f nginx-elb-svc.yaml

        +

        If information similar to the following is displayed, the Service has been created.

        +
        service/nginx created
        +

        kubectl get svc

        +

        If information similar to the following is displayed, the access type has been set successfully, and the workload is accessible.

        +
        NAME         TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
        +kubernetes   ClusterIP      10.247.0.1       <none>        443/TCP        3d
        +nginx        LoadBalancer   10.247.130.196   10.78.42.242   80:31540/TCP   51s
        +

      5. Enter the URL in the address box of the browser, for example, 10.78.42.242:80. 10.78.42.242 indicates the IP address of the load balancer, and 80 indicates the access port displayed on the CCE console.

        The Nginx is accessible.

        +
        Figure 3 Accessing Nginx through the LoadBalancer Service
        +

      +
      +

      Using kubectl to Create a Service (Automatically Creating a Load Balancer)

      You can add a Service when creating a workload using kubectl. This section uses an Nginx workload as an example to describe how to add a LoadBalancer Service using kubectl.

      +
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create and edit the nginx-deployment.yaml and nginx-elb-svc.yaml files.

        The file names are user-defined. nginx-deployment.yaml and nginx-elb-svc.yaml are merely example file names.

        +

        vi nginx-deployment.yaml

        +
        apiVersion: apps/v1
        +kind: Deployment
        +metadata:
        +  name: nginx
        +spec:
        +  replicas: 1
        +  selector:
        +    matchLabels:
        +      app: nginx
        +  template:
        +    metadata:
        +      labels:
        +        app: nginx
        +    spec:
        +      containers:
        +      - image: nginx 
        +        name: nginx
        +      imagePullSecrets:
        +      - name: default-secret
        +

        +

        vi nginx-elb-svc.yaml

        +

        Before enabling sticky session, ensure that the following conditions are met:

        +
        • The workload protocol is TCP.
        • Anti-affinity has been configured between pods of the workload. That is, all pods of the workload are deployed on different nodes. For details, see Scheduling Policy (Affinity/Anti-affinity).
        +
        +
        Example of a Service using a shared, public network load balancer:
        apiVersion: v1 
        +kind: Service 
        +metadata: 
        +  annotations:   
        +    kubernetes.io/elb.class: union
        +    kubernetes.io/elb.autocreate: 
        +        '{
        +            "type": "public",
        +            "bandwidth_name": "cce-bandwidth-1551163379627",
        +            "bandwidth_chargemode": "bandwidth",
        +            "bandwidth_size": 5,
        +            "bandwidth_sharetype": "PER",
        +            "eip_type": "5_bgp"
        +        }'
        +  labels: 
        +    app: nginx 
        +  name: nginx 
        +spec: 
        +  ports: 
        +  - name: service0 
        +    port: 80
        +    protocol: TCP 
        +    targetPort: 80
        +  selector: 
        +    app: nginx 
        +  type: LoadBalancer
        +
        +
        Example Service using a public network dedicated load balancer (for clusters of v1.17 and later only):
        apiVersion: v1
        +kind: Service
        +metadata:
        +  name: nginx
        +  labels:
        +    app: nginx
        +  namespace: default
        +  annotations:
        +    kubernetes.io/elb.class: performance
        +    kubernetes.io/elb.autocreate: 
        +        '{
        +            "type": "public",
        +            "bandwidth_name": "cce-bandwidth-1626694478577",
        +            "bandwidth_chargemode": "bandwidth",
        +            "bandwidth_size": 5,
        +            "bandwidth_sharetype": "PER",
        +            "eip_type": "5_bgp",
        +            "available_zone": [
        +                ""
        +            ],
        +            "l4_flavor_name": "L4_flavor.elb.s1.small"
        +        }'
        +spec:
        +  selector:
        +    app: nginx
        +  ports:
        +  - name: cce-service-0
        +    targetPort: 80
        +    nodePort: 0
        +    port: 80
        +    protocol: TCP
        +  type: LoadBalancer
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 4 Key parameters

        Parameter

        +

        Mandatory

        +

        Type

        +

        Description

        +

        kubernetes.io/elb.class

        +

        Yes

        +

        String

        +

        Select a proper load balancer type as required.

        +

        The value can be:

        +
        • union: shared load balancer
        • performance: dedicated load balancer, which can be used only in clusters of v1.17 and later.
        +

        kubernetes.io/elb.subnet-id

        +

        -

        +

        String

        +

        This parameter indicates the ID of the subnet where the cluster is located. The value can contain 1 to 100 characters.

        +
        • Mandatory when a cluster of v1.11.7-r0 or earlier is to be automatically created.
        • Optional for clusters later than v1.11.7-r0.
        +

        kubernetes.io/elb.session-affinity-option

        +

        No

        +

        Table 2 Object

        +

        Sticky session timeout.

        +

        kubernetes.io/elb.autocreate

        +

        Yes

        +

        elb.autocreate object

        +

        Whether to automatically create a load balancer associated with the Service.

        +

        Example:

        +
        • Automatically created public network load balancer:

          {"type":"public","bandwidth_name":"cce-bandwidth-1551163379627","bandwidth_chargemode":"bandwidth","bandwidth_size":5,"bandwidth_sharetype":"PER","eip_type":"5_bgp","name":"james"}

          +
        • Automatically created private network load balancer:

          {"type":"inner","name":"A-location-d-test"}

          +
        +

        kubernetes.io/elb.lb-algorithm

        +

        No

        +

        String

        +

        This parameter indicates the load balancing algorithm of the backend server group. The default value is ROUND_ROBIN.

        +

        Options:

        +
        • ROUND_ROBIN: weighted round robin algorithm
        • LEAST_CONNECTIONS: weighted least connections algorithm
        • SOURCE_IP: source IP hash algorithm
        +

        When the value is SOURCE_IP, the weights of backend servers in the server group are invalid.

        +

        kubernetes.io/elb.health-check-flag

        +

        No

        +

        String

        +

        Whether to enable the ELB health check.

        +
        • Enabling health check: Leave blank this parameter or set it to on.
        • Disabling health check: Set this parameter to off.
        +

        If this parameter is enabled, the kubernetes.io/elb.health-check-option field must also be specified at the same time.

        +

        kubernetes.io/elb.health-check-option

        +

        No

        +

        Table 3 Object

        +

        ELB health check configuration items.

        +

        kubernetes.io/elb.session-affinity-mode

        +

        No

        +

        String

        +

        Listeners ensure session stickiness based on IP addresses. Requests from the same IP address will be forwarded to the same backend server.

        +
        • Disabling sticky session: Do not set this parameter.
        • Enabling sticky session: Set this parameter to SOURCE_IP, indicating that the sticky session is based on the source IP address.
        +

        kubernetes.io/elb.session-affinity-option

        +

        No

        +

        Table 2 Object

        +

        Sticky session timeout.

        +

        kubernetes.io/hws-hostNetwork

        +

        No

        +

        String

        +

        This parameter indicates whether the workload Services use the host network. Setting this parameter to true will enable the ELB load balancer to forward requests to the host network.

        +

        The host network is not used by default. The value can be true or false.

        +

        externalTrafficPolicy

        +

        No

        +

        String

        +

        If sticky session is enabled, add this parameter so that requests are transferred to a fixed node. If a LoadBalancer Service with this parameter set to Local is created, a client can access the target backend only if the client is installed on the same node as the backend.

        +
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 5 Data structure of the elb.autocreate field

        Parameter

        +

        Mandatory

        +

        Type

        +

        Description

        +

        name

        +

        No

        +

        String

        +

        Name of the load balancer that is automatically created.

        +

        Value range: 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

        +

        Default: cce-lb+service.UID

        +

        type

        +

        No

        +

        String

        +

        Network type of the load balancer.

        +
        • public: public network load balancer
        • inner: private network load balancer
        +

        Default: inner

        +

        bandwidth_name

        +

        Yes for public network load balancers

        +

        String

        +

        Bandwidth name. The default value is cce-bandwidth-******.

        +

        Value range: 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

        +

        bandwidth_chargemode

        +

        No

        +

        String

        +

        Bandwidth mode.

        + +

        bandwidth_size

        +

        Yes for public network load balancers

        +

        Integer

        +

        Bandwidth size. The default value is 1 to 2000 Mbit/s. Set this parameter based on the bandwidth range allowed in your region.

        +

        bandwidth_sharetype

        +

        Yes for public network load balancers

        +

        String

        +

        Bandwidth sharing mode.

        +
        • PER: dedicated bandwidth
        +

        eip_type

        +

        Yes for public network load balancers

        +

        String

        +

        EIP type.

        +
        • 5_bgp: dynamic BGP
        • 5_sbgp: static BGP
        +

        available_zone

        +

        Yes

        +

        Array of strings

        +

        AZ where the load balancer is located.

        +

        This parameter is available only for dedicated load balancers.

        +

        l4_flavor_name

        +

        Yes

        +

        String

        +

        Flavor name of the layer-4 load balancer.

        +

        This parameter is available only for dedicated load balancers.

        +

        l7_flavor_name

        +

        No

        +

        String

        +

        Flavor name of the layer-7 load balancer.

        +

        This parameter is available only for dedicated load balancers.

        +

        elb_virsubnet_ids

        +

        No

        +

        Array of strings

        +

        Subnet where the backend server of the load balancer is located. If this parameter is left blank, the default cluster subnet is used. Load balancers occupy different number of subnet IP addresses based on their specifications. Therefore, you are not advised to use the subnet CIDR blocks of other resources (such as clusters and nodes) as the load balancer CIDR block.

        +

        This parameter is available only for dedicated load balancers.

        +

        Example:

        +
        "elb_virsubnet_ids": [
        +   "14567f27-8ae4-42b8-ae47-9f847a4690dd"
        + ]
        +
        +
        +

      3. Create a workload.

        kubectl create -f nginx-deployment.yaml

        +

        If information similar to the following is displayed, the workload is being created.

        +
        deployment/nginx created
        +

        kubectl get po

        +

        If information similar to the following is displayed, the workload is running.

        +
        NAME                     READY     STATUS             RESTARTS   AGE
        +nginx-2601814895-c1xhw   1/1       Running            0          6s
        +

      4. Create a Service.

        kubectl create -f nginx-elb-svc.yaml

        +

        If information similar to the following is displayed, the Service has been created.

        +
        service/nginx created
        +

        kubectl get svc

        +

        If information similar to the following is displayed, the access type has been set successfully, and the workload is accessible.

        +
        NAME         TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
        +kubernetes   ClusterIP      10.247.0.1       <none>        443/TCP        3d
        +nginx        LoadBalancer   10.247.130.196   10.78.42.242   80:31540/TCP   51s
        +

      5. Enter the URL in the address box of the browser, for example, 10.78.42.242:80. 10.78.42.242 indicates the IP address of the load balancer, and 80 indicates the access port displayed on the CCE console.

        The Nginx is accessible.

        +
        Figure 4 Accessing Nginx through the LoadBalancer Service
        +

      +
      +

      ELB Forwarding

      After a Service of the LoadBalancer type is created, you can view the listener forwarding rules of the load balancer on the ELB console.

      +

      You can find that a listener is created for the load balancer. Its backend server is the node where the pod is located, and the backend server port is the NodePort (node port) of the Service. When traffic passes through ELB, it is forwarded to IP address of the node where the pod is located:Node port. That is, the Service is accessed and then the pod is accessed, which is the same as that described in Scenario.

      +

      In the passthrough networking scenario (CCE Turbo + dedicated load balancer), after a LoadBalancer Service is created, you can view the listener forwarding rules of the load balancer on the ELB console.

      +

      You can see that a listener is created for the load balancer. The backend server address is the IP address of the pod, and the service port is the container port. This is because the pod uses an ENI or sub-ENI. When traffic passes through the load balancer, it directly forwards the traffic to the pod. This is the same as that described in Scenario.

      +
      +

      Why a Cluster Fails to Access Services by Using the ELB Address

      If the service affinity of a LoadBalancer Service is set to the node level, that is, the value of externalTrafficPolicy is Local, the ELB address may fail to be accessed from the cluster (specifically, nodes or containers). Information similar to the following is displayed:
      upstream connect error or disconnect/reset before headers. reset reason: connection failure
      +
      +

      This is because when the LoadBalancer Service is created, kube-proxy adds the ELB access address as the external IP to iptables or IPVS. If a client initiates a request to access the ELB address from inside the cluster, the address is considered as the external IP address of the service and is directly forwarded by kube-proxy without passing through the ELB outside the cluster.

      +

      When the value of externalTrafficPolicy is Local, the situation varies according to the container network model and service forwarding mode. The details are as follows:

      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      Server

      +

      Client

      +

      Tunnel Network Cluster (IPVS)

      +

      VPC Network Cluster (IPVS)

      +

      Tunnel Network Cluster (iptables)

      +

      VPC Network Cluster (iptables)

      +

      NodePort Service

      +

      Same node

      +

      OK. The node where the pod runs is accessible, not any other nodes.

      +

      OK. The node where the pod runs is accessible.

      +

      OK. The node where the pod runs is accessible.

      +

      OK. The node where the pod runs is accessible.

      +

      Cross-node

      +

      OK. The node where the pod runs is accessible, not any other nodes.

      +

      OK. The node where the pod runs is accessible.

      +

      OK. The node where the pod runs is accessible by visiting the node IP + port, not by any other ways.

      +

      OK. The node where the pod runs is accessible by visiting the node IP + port, not by any other ways.

      +

      Containers on the same node

      +

      OK. The node where the pod runs is accessible, not any other nodes.

      +

      OK. The node where the pod runs is not accessible.

      +

      OK. The node where the pod runs is accessible.

      +

      OK. The node where the pod runs is not accessible.

      +

      Containers across nodes

      +

      OK. The node where the pod runs is accessible, not any other nodes.

      +

      OK. The node where the pod runs is accessible.

      +

      OK. The node where the pod runs is accessible.

      +

      OK. The node where the pod runs is accessible.

      +

      LoadBalancer Service using a dedicated load balancer

      +

      Same node

      +

      Accessible for public networks, not private networks.

      +

      Accessible for public networks, not private networks.

      +

      Accessible for public networks, not private networks.

      +

      Accessible for public networks, not private networks.

      +

      Containers on the same node

      +

      Accessible for public networks, not private networks.

      +

      Accessible for public networks, not private networks.

      +

      Accessible for public networks, not private networks.

      +

      Accessible for public networks, not private networks.

      +

      Local Service of the nginx-ingress add-on using a dedicated load balancer

      +

      Same node

      +

      Accessible for public networks, not private networks.

      +

      Accessible for public networks, not private networks.

      +

      Accessible for public networks, not private networks.

      +

      Accessible for public networks, not private networks.

      +

      Containers on the same node

      +

      Accessible for public networks, not private networks.

      +

      Accessible for public networks, not private networks.

      +

      Accessible for public networks, not private networks.

      +

      Accessible for public networks, not private networks.

      +
      +
      +

      The following methods can be used to solve this problem:

      +
      • (Recommended) In the cluster, use the ClusterIP Service or service domain name for access.
      • Set externalTrafficPolicy of the Service to Cluster, which means cluster-level service affinity. Note that this affects source address persistence.
        apiVersion: v1 
        +kind: Service
        +metadata: 
        +  annotations:   
        +    kubernetes.io/elb.class: union
        +    kubernetes.io/elb.autocreate: '{"type":"public","bandwidth_name":"cce-bandwidth","bandwidth_chargemode":"bandwidth","bandwidth_size":5,"bandwidth_sharetype":"PER","eip_type":"5_bgp","name":"james"}'
        +  labels: 
        +    app: nginx 
        +  name: nginx 
        +spec: 
        +  externalTrafficPolicy: Cluster
        +  ports: 
        +  - name: service0 
        +    port: 80
        +    protocol: TCP 
        +    targetPort: 80
        +  selector: 
        +    app: nginx 
        +  type: LoadBalancer
        +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_01_0015.html b/docs/cce/umn/cce_10_0015.html similarity index 51% rename from docs/cce/umn/cce_01_0015.html rename to docs/cce/umn/cce_10_0015.html index d5a0c602..416062e7 100644 --- a/docs/cce/umn/cce_01_0015.html +++ b/docs/cce/umn/cce_10_0015.html @@ -1,19 +1,19 @@ - +

      Using a ConfigMap

      -
      -

      The following example shows how to use a ConfigMap.

      -
      apiVersion: v1
      +
      +

      The following example shows how to use a ConfigMap.

      +
      apiVersion: v1
       kind: ConfigMap
       metadata:
         name: cce-configmap
       data:
         SPECIAL_LEVEL: Hello
         SPECIAL_TYPE: CCE
      -

      When a ConfigMap is used in a pod, the pod and ConfigMap must be in the same cluster and namespace.

      +

      When a ConfigMap is used in a pod, the pod and ConfigMap must be in the same cluster and namespace.

      -

      Setting Workload Environment Variables

      When creating a workload, you can use a ConfigMap to set environment variables. The valueFrom parameter indicates the key-value pair to be referenced.

      -
      apiVersion: v1
      +

      Setting Workload Environment Variables

      When creating a workload, you can use a ConfigMap to set environment variables. The valueFrom parameter indicates the key-value pair to be referenced.

      +
      apiVersion: v1
       kind: Pod
       metadata:
         name: configmap-pod-1
      @@ -29,20 +29,20 @@ spec:
                     name: cce-configmap                ## Name of the referenced configuration file.
                     key: SPECIAL_LEVEL                 ## Key of the referenced ConfigMap.
         restartPolicy: Never
      -
      If you need to define the values of multiple ConfigMaps as the environment variables of the pods, add multiple environment variable parameters to the pods.
      env:
      +
      If you need to define the values of multiple ConfigMaps as the environment variables of the pods, add multiple environment variable parameters to the pods.
      env:
       - name: SPECIAL_LEVEL_KEY
         valueFrom:
           configMapKeyRef:
      -          name: cce-configmap
      -          key: SPECIAL_LEVEL
      +          name: cce-configmap
      +          key: SPECIAL_LEVEL
       - name: SPECIAL_TYPE_KEY
         valueFrom:
           configMapKeyRef:
      -          name: cce-configmap
      -          key: SPECIAL_TYPE
      + name: cce-configmap + key: SPECIAL_TYPE
      -

      To add all data in a ConfigMap to environment variables, use the envFrom parameter. The keys in the ConfigMap will become names of environment variables in a pod.

      -
      apiVersion: v1
      +

      To add all data in a ConfigMap to environment variables, use the envFrom parameter. The keys in the ConfigMap will become names of environment variables in a pod.

      +
      apiVersion: v1
       kind: Pod
       metadata:
         name: configmap-pod-2
      @@ -53,11 +53,11 @@ spec:
             command: [ "/bin/sh", "-c", "env" ]
             envFrom:
             - configMapRef:
      -          name: cce-configmap
      +          name: cce-configmap
         restartPolicy: Never
      -

      Setting Command Line Parameters

      You can use a ConfigMap to set commands or parameter values for a container by using the environment variable substitution syntax $(VAR_NAME). The following shows an example.

      -
      apiVersion: v1
      +

      Setting Command Line Parameters

      You can use a ConfigMap to set commands or parameter values for a container by using the environment variable substitution syntax $(VAR_NAME). The following shows an example.

      +
      apiVersion: v1
       kind: Pod
       metadata:
         name: configmap-pod-3
      @@ -65,24 +65,24 @@ spec:
         containers:
           - name: test-container
             image: busybox
      -      command: [ "/bin/sh", "-c", "echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)" ]
      +      command: [ "/bin/sh", "-c", "echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)" ]
             env:
               - name: SPECIAL_LEVEL_KEY
                 valueFrom:
                   configMapKeyRef:
      -              name: cce-configmap
      -              key: SPECIAL_LEVEL
      +              name: cce-configmap
      +              key: SPECIAL_LEVEL
               - name: SPECIAL_TYPE_KEY
                 valueFrom:
                   configMapKeyRef:
      -              name: cce-configmap
      -              key: SPECIAL_TYPE
      +              name: cce-configmap
      +              key: SPECIAL_TYPE
         restartPolicy: Never
      -

      After the pod runs, the following information is displayed:

      -
      Hello CCE
      +

      After the pod runs, the following information is displayed:

      +
      Hello CCE
      -

      Attaching a ConfigMap to the Workload Data Volume

      A ConfigMap can also be used in the data volume. You only need to attach the ConfigMap to the workload when creating the workload. After the mounting is complete, a configuration file with key as the file name and value as the file content is generated.

      -
      apiVersion: v1
      +

      Attaching a ConfigMap to the Workload Data Volume

      A ConfigMap can also be used in the data volume. You only need to attach the ConfigMap to the workload when creating the workload. After the mounting is complete, a configuration file with key as the file name and value as the file content is generated.

      +
      apiVersion: v1
       kind: Pod
       metadata:
         name: configmap-pod-4
      @@ -97,17 +97,17 @@ spec:
         volumes:
           - name: config-volume
             configMap:
      -        name: cce-configmap
      +        name: cce-configmap
         restartPolicy: Never
      -

      After the pod is run, the SPECIAL_LEVEL and SPECIAL_TYPE files are generated in the /etc/config directory. The contents of the files are Hello and CCE, respectively. Also, the following file names will be displayed.

      -
      SPECIAL_TYPE
      +

      After the pod is run, the SPECIAL_LEVEL and SPECIAL_TYPE files are generated in the /etc/config directory. The contents of the files are Hello and CCE, respectively. Also, the following file names will be displayed.

      +
      SPECIAL_TYPE
       SPECIAL_LEVEL
      -

      To mount a ConfigMap to a data volume, you can also perform operations on the CCE console. When creating a workload, set advanced settings for the container, choose Data Storage > Local Volume, click Add Local Volume, and select ConfigMap. For details, see ConfigMap.

      +

      To mount a ConfigMap to a data volume, you can also perform operations on the CCE console. When creating a workload, set advanced settings for the container, choose Data Storage > Local Volume, click Add Local Volume, and select ConfigMap. For details, see ConfigMap.

      diff --git a/docs/cce/umn/cce_10_0016.html b/docs/cce/umn/cce_10_0016.html new file mode 100644 index 00000000..7c5c5744 --- /dev/null +++ b/docs/cce/umn/cce_10_0016.html @@ -0,0 +1,86 @@ + + +

      Using a Secret

      +

      The following secrets are used by the CCE system. Do not perform any operations on them.

      +
      • Do not operate secrets under kube-system.
      • Do not operate default-secret and paas.elb in any of the namespaces. The default-secret is used to pull the private image of SWR, and the paas.elb is used to connect the service in the namespace to the ELB service.
      +
      + +

      The following example shows how to use a secret.

      +
      apiVersion: v1
      +kind: Secret
      +metadata:
      +  name: mysecret
      +type: Opaque
      +data:
      +  username: ****** #The value must be Base64-encoded.
      +  password: ******  #The value must be encoded using Base64.
      +

      When a secret is used in a pod, the pod and secret must be in the same cluster and namespace.

      +
      +

      Configuring the Data Volume of a Pod

      A secret can be used as a file in a pod. As shown in the following example, the username and password of the mysecret secret are saved in the /etc/foo directory as files.
      apiVersion: v1
      +kind: Pod
      +metadata:
      +  name: mypod
      +spec:
      +  containers:
      +  - name: mypod
      +    image: redis
      +    volumeMounts:
      +    - name: foo
      +      mountPath: "/etc/foo"
      +      readOnly: true
      +  volumes:
      +  - name: foo
      +    secret:
      +      secretName: mysecret
      +
      +
      In addition, you can specify the directory and permission to access a secret. The username is stored in the /etc/foo/my-group/my-username directory of the container.
      apiVersion: v1
      +kind: Pod
      +metadata:
      +  name: mypod
      +spec:
      +  containers:
      +  - name: mypod
      +    image: redis
      +    volumeMounts:
      +    - name: foo
      +      mountPath: "/etc/foo"
      +  volumes:
      +  - name: foo
      +    secret:
      +      secretName: mysecret
      +      items:
      +      - key: username
      +        path: my-group/my-username
      +        mode: 511
      +
      +

      To mount a secret to a data volume, you can also perform operations on the CCE console. When creating a workload, set advanced settings for the container, choose Data Storage > Local Volume, click Add Local Volume, and select Secret. For details, see Secret.

      +
      +

      Setting Environment Variables of a Pod

      A secret can be used as an environment variable of a pod. As shown in the following example, the username and password of the mysecret secret are defined as an environment variable of the pod.
      apiVersion: v1
      +kind: Pod
      +metadata:
      +  name: secret-env-pod
      +spec:
      +  containers:
      +  - name: mycontainer
      +    image: redis
      +    env:
      +      - name: SECRET_USERNAME
      +        valueFrom:
      +          secretKeyRef:
      +            name: mysecret
      +            key: username
      +      - name: SECRET_PASSWORD
      +        valueFrom:
      +          secretKeyRef:
      +            name: mysecret
      +            key: password
      +  restartPolicy: Never
      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0018.html b/docs/cce/umn/cce_10_0018.html new file mode 100644 index 00000000..84e7a111 --- /dev/null +++ b/docs/cce/umn/cce_10_0018.html @@ -0,0 +1,203 @@ + + +

      Using ICAgent to Collect Container Logs

      +

      CCE works with AOM to collect workload logs. When creating a node, CCE installs the ICAgent for you (the DaemonSet named icagent in the kube-system namespace of the cluster). After the ICAgent collects workload logs and reports them to AOM, you can view workload logs on the CCE or AOM console.

      +

      Notes and Constraints

      The ICAgent only collects *.log, *.trace, and *.out text log files.

      +
      +

      Using ICAgent to Collect Logs

      1. When creating a workload, set logging for the container.
      2. Click to add a log policy.

        The following uses Nginx as an example. Log policies vary depending on workloads.
        Figure 1 Adding a log policy
        +
        +

      3. Set Storage Type to Host Path or Container Path.

        +

        + + + + + + + + + + + + + + + + + + + +
        Table 1 Configuring log policies

        Parameter

        +

        Description

        +

        Storage Type

        +
        • Host Path (hostPath): A host path is mounted to the specified container path (mount path). In the node host path, you can view the container logs output into the mount path.
        • Container Path (emptyDir): A temporary path of the node is mounted to the specified path (mount path). Log data that exists in the temporary path but is not reported by the collector to AOM will disappear after the pod is deleted.
        +

        Host Path

        +

        Enter a host path, for example, /var/paas/sys/log/nginx.

        +

        Container Path

        +
        Container path (for example, /tmp) to which the storage resources will be mounted.
        NOTICE:
        • Do not mount storage to a system directory such as / or /var/run; this action may cause a container error to occur. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
        • When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.
        • AOM collects only the first 20 log files that have been modified recently. It collects files from 2 levels of subdirectories by default.
        • AOM only collects .log, .trace, and .out text log files in the mount paths.
        • For details about how to set permissions for mount points in a container, see Configure a Security Context for a Pod or Container.
        +
        +
        +

        Extended Host Path

        +

        This parameter is mandatory only if Storage Type is set to Host Path.

        +

        Extended host paths contain pod IDs or container names to distinguish different containers into which the host path is mounted.

        +

        A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

        +
        • None: No extended path is configured.
        • PodUID: ID of a pod.
        • PodName: name of a pod.
        • PodUID/ContainerName: ID of a pod or name of a container.
        • PodName/ContainerName: name of a pod or container.
        +

        Log Dump

        +

        Log dump refers to rotating log files on a local host.

        +
        • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
        • Disabled: AOM does not dump log files.
        +
        NOTE:
        • AOM rotates log files using copytruncate. Before enabling log dumping, ensure that log files are written in the append mode. Otherwise, file holes may occur.
        • Currently, mainstream log components such as Log4j and Logback support log file rotation. If you have already set rotation for log files, skip the configuration. Otherwise, conflicts may occur.
        • You are advised to configure log file rotation for your own services to flexibly control the size and number of rolled files.
        +
        +
        +
        +

      4. Click OK.
      +
      +

      YAML Example

      You can set the container log storage path by defining a YAML file.

      +

      As shown in the following figure, an emptyDir volume is mounted a temporary path to /var/log/nginx. In this way, the ICAgent collects logs in /var/log/nginx. The policy field is customized by CCE and allows the ICAgent to identify and collect logs.

      +
      apiVersion: apps/v1
      +kind: Deployment
      +metadata:
      +  name: testlog
      +  namespace: default
      +spec:
      +  selector:
      +    matchLabels:
      +      app: testlog
      +  template:
      +    replicas: 1
      +    metadata:
      +      labels:
      +        app: testlog
      +    spec:
      +      containers:
      +        - image: 'nginx:alpine'
      +          name: container-0
      +          resources:
      +            requests:
      +              cpu: 250m
      +              memory: 512Mi
      +            limits:
      +              cpu: 250m
      +              memory: 512Mi
      +          volumeMounts:
      +            - name: vol-log
      +              mountPath: /var/log/nginx
      +              policy:
      +                logs:
      +                  rotate: ''
      +      volumes:
      +        - emptyDir: {}
      +          name: vol-log
      +      imagePullSecrets:
      +        - name: default-secret
      +

      The following shows how to use a hostPath volume. Compared with emptyDir, the type of volumes is changed to hostPath, and the path on the host needs to be configured for this hostPath volume. In the following example, /tmp/log on the host is mounted to /var/log/nginx. In this way, the ICAgent can collects logs in /var/log/nginx, without deleting the logs from /tmp/log.

      +
      apiVersion: apps/v1
      +kind: Deployment
      +metadata:
      +  name: testlog
      +  namespace: default
      +spec:
      +  replicas: 1
      +  selector:
      +    matchLabels:
      +      app: testlog
      +  template:
      +    metadata:
      +      labels:
      +        app: testlog
      +    spec:
      +      containers:
      +        - image: 'nginx:alpine'
      +          name: container-0
      +          resources:
      +            requests:
      +              cpu: 250m
      +              memory: 512Mi
      +            limits:
      +              cpu: 250m
      +              memory: 512Mi
      +          volumeMounts:
      +            - name: vol-log
      +              mountPath: /var/log/nginx
      +              readOnly: false
      +              extendPathMode: PodUID
      +              policy:
      +                logs:
      +                  rotate: Hourly
      +                  annotations:
      +                    
      +                    format: ''
      +      volumes:
      +        - hostPath:
      +            path: /tmp/log
      +          name: vol-log
      +      imagePullSecrets:
      +        - name: default-secret
      + +
      + + + + + + + + + + + + + + + + + +
      Table 2 Parameter description

      Parameter

      +

      Explanation

      +

      Description

      +

      extendPathMode

      +

      Extended host path

      +

      Extended host paths contain pod IDs or container names to distinguish different containers into which the host path is mounted.

      +

      A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

      +
      • None: No extended path is configured.
      • PodUID: ID of a pod.
      • PodName: name of a pod.
      • PodUID/ContainerName: ID of a pod or name of a container.
      • PodName/ContainerName: name of a pod or container.
      +

      policy.logs.rotate

      +

      Log dump

      +

      Log dump refers to rotating log files on a local host.

      +
      • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
      • Disabled: AOM does not dump log files.
      +
      NOTE:
      • AOM rotates log files using copytruncate. Before enabling log dumping, ensure that log files are written in the append mode. Otherwise, file holes may occur.
      • Currently, mainstream log components such as Log4j and Logback support log file rotation. If you have set rotation for log files, skip the configuration. Otherwise, conflicts may occur.
      • You are advised to configure log file rotation for your own services to flexibly control the size and number of rolled files.
      +
      +

      policy.logs.annotations.format

      +

      Multi-line log matching

      +

      Some program logs (for example, Java program logs) contain a log that occupies multiple lines. By default, the log collection system collects logs by line. If you want to display logs as a single log message in the log collection system, you can enable the multi-line log function and use the log time or regular pattern mode. When a line of log message matches the preset time format or regular expression, it is considered as the start of a log message and the next line starts with this line of log message is considered as the end identifier of the log message.

      +

      The format is as follows:

      +
      {
      +    "multi": {
      +        "mode": "time",
      +        "value": "YYYY-MM-DD hh:mm:ss"
      +    }
      +}
      +

      multi indicates the multi-line mode.

      +
      • time: log time. Enter a time wildcard. For example, if the time in the log is 2017-01-01 23:59:59, the wildcard is YYYY-MM-DD hh:mm:ss.
      • regular: regular pattern. Enter a regular expression.
      +
      +
      +
      +

      Viewing Logs

      After a log collection path is configured and the workload is created, the ICAgent collects log files from the configured path. The collection takes about 1 minute.

      +

      After the log collection is complete, go to the workload details page and click Logs in the upper right corner to view logs.

      +

      You can also view logs on the AOM console.

      +

      You can also run the kubectl logs command to view the standard output of a container.

      +
      # View logs of a specified pod.
      +kubectl logs <pod_name>
      +kubectl logs -f <pod_name> # Similar to tail -f
      +
      +# View logs of a specified container in a specified pod.
      +kubectl logs <pod_name> -c <container_name>
      +
      +kubectl logs pod_name -c container_name -n namespace (one-off query)
      +kubectl logs -f <pod_name> -n namespace (real-time query in tail -f mode)
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0019.html b/docs/cce/umn/cce_10_0019.html new file mode 100644 index 00000000..75f21323 --- /dev/null +++ b/docs/cce/umn/cce_10_0019.html @@ -0,0 +1,13 @@ + + +

      Charts

      +
      + + diff --git a/docs/cce/umn/cce_10_0020.html b/docs/cce/umn/cce_10_0020.html new file mode 100644 index 00000000..688efde1 --- /dev/null +++ b/docs/cce/umn/cce_10_0020.html @@ -0,0 +1,27 @@ + + +

      Networking

      +
      + + diff --git a/docs/cce/umn/cce_01_0024.html b/docs/cce/umn/cce_10_0024.html similarity index 54% rename from docs/cce/umn/cce_01_0024.html rename to docs/cce/umn/cce_10_0024.html index 1331d79b..56be0999 100644 --- a/docs/cce/umn/cce_01_0024.html +++ b/docs/cce/umn/cce_10_0024.html @@ -1,12 +1,12 @@ - +

      Cloud Trace Service (CTS)

      diff --git a/docs/cce/umn/cce_10_0025.html b/docs/cce/umn/cce_10_0025.html new file mode 100644 index 00000000..adbe1614 --- /dev/null +++ b/docs/cce/umn/cce_10_0025.html @@ -0,0 +1,596 @@ + + +

      CCE Operations Supported by CTS

      +
      +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 1 CCE operations supported by CTS

      Operation

      +

      Resource Type

      +

      Event Name

      +

      Creating an agency

      +

      Cluster

      +

      createUserAgencies

      +

      Creating a cluster

      +

      Cluster

      +

      createCluster

      +

      Updating the description of a cluster

      +

      Cluster

      +

      updateCluster

      +

      Upgrading a cluster

      +

      Cluster

      +

      clusterUpgrade

      +

      Deleting a cluster

      +

      Cluster

      +

      claimCluster/deleteCluster

      +

      Downloading a cluster certificate

      +

      Cluster

      +

      getClusterCertByUID

      +

      Binding and unbinding an EIP

      +

      Cluster

      +

      operateMasterEIP

      +

      Waking up a cluster and resetting node management (V2)

      +

      Cluster

      +

      operateCluster

      +

      Hibernating a cluster (V3)

      +

      Cluster

      +

      hibernateCluster

      +

      Waking up a cluster (V3)

      +

      Cluster

      +

      awakeCluster

      +

      Changing the specifications of a cluster

      +

      Cluster

      +

      resizeCluster

      +

      Modifying configurations of a cluster

      +

      Cluster

      +

      updateConfiguration

      +

      Creating a node pool

      +

      Node pool

      +

      createNodePool

      +

      Updating a node pool

      +

      Node pool

      +

      updateNodePool

      +

      Deleting a node pool

      +

      Node pool

      +

      claimNodePool

      +

      Migrating a node pool

      +

      Node pool

      +

      migrateNodepool

      +

      Modifying node pool configurations

      +

      Node pool

      +

      updateConfiguration

      +

      Creating a node

      +

      Node

      +

      createNode

      +

      Deleting all the nodes from a specified cluster

      +

      Node

      +

      deleteAllHosts

      +

      Deleting a single node

      +

      Node

      +

      deleteOneHost/claimOneHost

      +

      Updating the description of a node

      +

      Node

      +

      updateNode

      +

      Creating an add-on instance

      +

      Add-on instance

      +

      createAddonInstance

      +

      Deleting an add-on instance

      +

      Add-on instance

      +

      deleteAddonInstance

      +

      Uploading a chart

      +

      Chart

      +

      uploadChart

      +

      Updating a chart

      +

      Chart

      +

      updateChart

      +

      Deleting a chart

      +

      Chart

      +

      deleteChart

      +

      Creating a release

      +

      Release

      +

      createRelease

      +

      Upgrading a release

      +

      Release

      +

      updateRelease

      +

      Deleting a release

      +

      Release

      +

      deleteRelease

      +

      Creating a ConfigMap

      +

      Kubernetes resource

      +

      createConfigmaps

      +

      Creating a DaemonSet

      +

      Kubernetes resource

      +

      createDaemonsets

      +

      Creating a Deployment

      +

      Kubernetes resource

      +

      createDeployments

      +

      Creating an event

      +

      Kubernetes resource

      +

      createEvents

      +

      Creating an Ingress

      +

      Kubernetes resource

      +

      createIngresses

      +

      Creating a job

      +

      Kubernetes resource

      +

      createJobs

      +

      Creating a namespace

      +

      Kubernetes resource

      +

      createNamespaces

      +

      Creating a node

      +

      Kubernetes resource

      +

      createNodes

      +

      Creating a PersistentVolumeClaim

      +

      Kubernetes resource

      +

      createPersistentvolumeclaims

      +

      Creating a pod

      +

      Kubernetes resource

      +

      createPods

      +

      Creating a replica set

      +

      Kubernetes resource

      +

      createReplicasets

      +

      Creating a resource quota

      +

      Kubernetes resource

      +

      createResourcequotas

      +

      Creating a secret

      +

      Kubernetes resource

      +

      createSecrets

      +

      Creating a service

      +

      Kubernetes resource

      +

      createServices

      +

      Creating a StatefulSet

      +

      Kubernetes resource

      +

      createStatefulsets

      +

      Creating a volume

      +

      Kubernetes resource

      +

      createVolumes

      +

      Deleting a ConfigMap

      +

      Kubernetes resource

      +

      deleteConfigmaps

      +

      Deleting a DaemonSet

      +

      Kubernetes resource

      +

      deleteDaemonsets

      +

      Deleting a Deployment

      +

      Kubernetes resource

      +

      deleteDeployments

      +

      Deleting an event

      +

      Kubernetes resource

      +

      deleteEvents

      +

      Deleting an Ingress

      +

      Kubernetes resource

      +

      deleteIngresses

      +

      Deleting a job

      +

      Kubernetes resource

      +

      deleteJobs

      +

      Deleting a namespace

      +

      Kubernetes resource

      +

      deleteNamespaces

      +

      Deleting a node

      +

      Kubernetes resource

      +

      deleteNodes

      +

      Deleting a Pod

      +

      Kubernetes resource

      +

      deletePods

      +

      Deleting a replica set

      +

      Kubernetes resource

      +

      deleteReplicasets

      +

      Deleting a resource quota

      +

      Kubernetes resource

      +

      deleteResourcequotas

      +

      Deleting a secret

      +

      Kubernetes resource

      +

      deleteSecrets

      +

      Deleting a service

      +

      Kubernetes resource

      +

      deleteServices

      +

      Deleting a StatefulSet

      +

      Kubernetes resource

      +

      deleteStatefulsets

      +

      Deleting volumes

      +

      Kubernetes resource

      +

      deleteVolumes

      +

      Replacing a specified ConfigMap

      +

      Kubernetes resource

      +

      updateConfigmaps

      +

      Replacing a specified DaemonSet

      +

      Kubernetes resource

      +

      updateDaemonsets

      +

      Replacing a specified Deployment

      +

      Kubernetes resource

      +

      updateDeployments

      +

      Replacing a specified event

      +

      Kubernetes resource

      +

      updateEvents

      +

      Replacing a specified ingress

      +

      Kubernetes resource

      +

      updateIngresses

      +

      Replacing a specified job

      +

      Kubernetes resource

      +

      updateJobs

      +

      Replacing a specified namespace

      +

      Kubernetes resource

      +

      updateNamespaces

      +

      Replacing a specified node

      +

      Kubernetes resource

      +

      updateNodes

      +

      Replacing a specified PersistentVolumeClaim

      +

      Kubernetes resource

      +

      updatePersistentvolumeclaims

      +

      Replacing a specified pod

      +

      Kubernetes resource

      +

      updatePods

      +

      Replacing a specified replica set

      +

      Kubernetes resource

      +

      updateReplicasets

      +

      Replacing a specified resource quota

      +

      Kubernetes resource

      +

      updateResourcequotas

      +

      Replacing a specified secret

      +

      Kubernetes resource

      +

      updateSecrets

      +

      Replacing a specified service

      +

      Kubernetes resource

      +

      updateServices

      +

      Replacing a specified StatefulSet

      +

      Kubernetes resource

      +

      updateStatefulsets

      +

      Replacing the specified status

      +

      Kubernetes resource

      +

      updateStatus

      +

      Uploading a chart

      +

      Kubernetes resource

      +

      uploadChart

      +

      Updating a component template

      +

      Kubernetes resource

      +

      updateChart

      +

      Deleting a chart

      +

      Kubernetes resource

      +

      deleteChart

      +

      Creating a template application

      +

      Kubernetes resource

      +

      createRelease

      +

      Updating a template application

      +

      Kubernetes resource

      +

      updateRelease

      +

      Deleting a template application

      +

      Kubernetes resource

      +

      deleteRelease

      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0026.html b/docs/cce/umn/cce_10_0026.html new file mode 100644 index 00000000..cbb20eaf --- /dev/null +++ b/docs/cce/umn/cce_10_0026.html @@ -0,0 +1,21 @@ + + +

      Querying CTS Logs

      +

      Scenario

      After you enable CTS, the system starts recording operations on CCE resources. Operation records of the last 7 days can be viewed on the CTS management console.

      +
      +

      Procedure

      1. Log in to the management console.
      2. Click in the upper left corner and select a region.
      3. Choose Service List from the main menu. Choose Management & Deployment > Cloud Trace Service.
      4. In the navigation pane of the CTS console, choose Cloud Trace Service > Trace List.
      5. On the Trace List page, query operation records based on the search criteria. Currently, the trace list supports trace query based on the combination of the following search criteria:

        • Trace Source, Resource Type, and Search By

          Select the search criteria from the drop-down lists. Select CCE from the Trace Source drop-down list.

          +

          If you select Trace name from the Search By drop-down list, specify the trace name.

          +

          If you select Resource ID from the Search By drop-down list, select or enter a specific resource ID.

          +

          If you select Resource name from the Search By drop-down list, select or enter a specific resource name.

          +
        • Operator: Select a specific operator (at user level rather than account level).
        • Trace Status: Set this parameter to any of the following values: All trace statuses, normal, warning, and incident.
        • Time range: You can query traces generated during any time range in the last seven days.
        +

      6. Click on the left of a trace to expand its details, as shown below.

        Figure 1 Expanding trace details
        +

      7. Click View Trace in the Operation column. The trace details are displayed.

        Figure 2 Viewing event details
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0028.html b/docs/cce/umn/cce_10_0028.html new file mode 100644 index 00000000..52c3ac5f --- /dev/null +++ b/docs/cce/umn/cce_10_0028.html @@ -0,0 +1,39 @@ + + +

      Creating a CCE Cluster

      +

      On the CCE console, you can easily create Kubernetes clusters. Kubernetes can manage container clusters at scale. A cluster manages a group of node resources.

      +

      In CCE, you can create a CCE cluster to manage VMs. By using high-performance network models, hybrid clusters provide a multi-scenario, secure, and stable runtime environment for containers.

      +

      Notes and Constraints

      • During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
      • You can create a maximum of 50 clusters in a single region.
      • After a cluster is created, the following items cannot be changed:
        • Cluster type
        • Number of master nodes in the cluster
        • AZ of a master node
        • Network configuration of the cluster, such as the VPC, subnet, container CIDR block, Service CIDR block, and kube-proxy (forwarding) settings
        • Network model. For example, change Tunnel network to VPC network.
        +
      +
      +

      Procedure

      1. Log in to the CCE console. Choose Clusters. On the displayed page, click Create next to CCE cluster.
      2. Set cluster parameters.

        Basic Settings
        • Cluster Name
        • Cluster Version: Select the Kubernetes version used by the cluster.
        • Cluster Scale: maximum number of nodes that can be managed by the cluster.
        • HA: distribution mode of master nodes. By default, master nodes are randomly distributed in different AZs to improve DR capabilities.
          You can also expand advanced settings and customize the master node distribution mode. The following two modes are supported:
          • Random: Master nodes are created in different AZs for DR.
          • Custom: You can determine the location of each master node.
            • Host: Master nodes are created on different hosts in the same AZ.
            • Custom: You can determine the location of each master node.
            +
          +
          +
        +
        +

        Network Settings

        +

        The cluster network settings cover nodes, containers, and Services. For details about the cluster networking and container network models, see Overview.

        +
        • Network Model: CCE clusters support VPC network and tunnel network models. For details, see VPC Network and Container Tunnel Network.
        • VPC: Select the VPC to which the cluster belongs. If no VPC is available, click Create VPC to create one. The VPC cannot be changed after creation.
        • Master Node Subnet: Select the subnet where the master node is deployed. If no subnet is available, click Create Subnet to create one. The subnet cannot be changed after creation.
        • Container CIDR Block: Set the CIDR block used by containers.
        • Service CIDR Block: CIDR block for Services used by containers in the same cluster to access each other. The value determines the maximum number of Services you can create. The value cannot be changed after creation.
        +

        Advanced Settings

        +
        • Request Forwarding: The IPVS and iptables modes are supported. For details, see Comparing iptables and IPVS.
        • CPU Manager: For details, see Binding CPU Cores.
        • Certificate Authentication:
          • Default: The X509-based authentication mode is enabled by default. X509 is a commonly used certificate format.
          • Custom: The cluster can identify users based on the header in the request body for authentication.

            You need to upload your CA root certificate, client certificate, and private key of the client certificate.

            +
            • Upload a file smaller than 1 MB. The CA certificate and client certificate can be in .crt or .cer format. The private key of the client certificate can only be uploaded unencrypted.
            • The validity period of the client certificate must be longer than five years.
            • The uploaded CA certificate is used for both the authentication proxy and the kube-apiserver aggregation layer configuration. If the certificate is invalid, the cluster cannot be created.
            • Starting from v1.25, Kubernetes no longer supports certificate authentication generated using the SHA1WithRSA or ECDSAWithSHA1 algorithm. You are advised to use the SHA256 algorithm.
            +
            +
          +
        • Description: The value can contain a maximum of 200 English characters.
        +

      3. Click Next: Add-on Configuration.

        By default, cordens and everest add-ons are installed.

        +
        Service log
        • ICAgent:

          A log collector provided by Application Operations Management (AOM), reporting logs to AOM and Log Tank Service (LTS) according to the log collection rules you configured.

          +

          You can collect stdout logs as required.

          +
        +
        +

      4. After the parameters are specified, click Next: Confirm. The cluster resource list is displayed. Confirm the information and click Submit.

        It takes about 6 to 10 minutes to create a cluster. You can click Back to Cluster List to perform other operations on the cluster or click Go to Cluster Events to view the cluster details.

        +

      +
      +

      Related Operations

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0030.html b/docs/cce/umn/cce_10_0030.html new file mode 100644 index 00000000..8478731b --- /dev/null +++ b/docs/cce/umn/cce_10_0030.html @@ -0,0 +1,15 @@ + + +

      Namespaces

      +
      + + diff --git a/docs/cce/umn/cce_10_0031.html b/docs/cce/umn/cce_10_0031.html new file mode 100644 index 00000000..a61780a4 --- /dev/null +++ b/docs/cce/umn/cce_10_0031.html @@ -0,0 +1,21 @@ + + +

      Managing a Cluster

      +
      + + diff --git a/docs/cce/umn/cce_10_0035.html b/docs/cce/umn/cce_10_0035.html new file mode 100644 index 00000000..1742be9b --- /dev/null +++ b/docs/cce/umn/cce_10_0035.html @@ -0,0 +1,15 @@ + + +

      Node Pools

      +
      + + diff --git a/docs/cce/umn/cce_10_0036.html b/docs/cce/umn/cce_10_0036.html new file mode 100644 index 00000000..6c37df45 --- /dev/null +++ b/docs/cce/umn/cce_10_0036.html @@ -0,0 +1,17 @@ + + +

      Stopping a Node

      +

      Scenario

      After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not result in adverse impacts.

      +
      +

      Notes and Constraints

      • Deleting a node will lead to pod migration, which may affect services. Therefore, delete nodes during off-peak hours.
      • Unexpected risks may occur during node deletion. Back up related data in advance.
      • While the node is being deleted, the backend will set the node to the unschedulable state.
      • Only worker nodes can be stopped.
      +
      +

      Procedure

      1. Log in to the CCE console and click the cluster name to access the cluster.
      2. In the navigation pane, choose Nodes. In the right pane, click the name of the node to be stopped.
      3. In the upper right corner of the ECS details page, click Stop in the instance status area. In the displayed dialog box, click Yes.

        Figure 1 ECS details page
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0045.html b/docs/cce/umn/cce_10_0045.html new file mode 100644 index 00000000..6ae35260 --- /dev/null +++ b/docs/cce/umn/cce_10_0045.html @@ -0,0 +1,19 @@ + + +

      Configuration Center

      +
      + + diff --git a/docs/cce/umn/cce_10_0046.html b/docs/cce/umn/cce_10_0046.html new file mode 100644 index 00000000..72d826dd --- /dev/null +++ b/docs/cce/umn/cce_10_0046.html @@ -0,0 +1,35 @@ + + +

      Workloads

      +
      + + diff --git a/docs/cce/umn/cce_10_0047.html b/docs/cce/umn/cce_10_0047.html new file mode 100644 index 00000000..1afbf991 --- /dev/null +++ b/docs/cce/umn/cce_10_0047.html @@ -0,0 +1,178 @@ + + +

      Creating a Deployment

      +

      Scenario

      Deployments are workloads (for example, Nginx) that do not store any data or status. You can create Deployments on the CCE console or by running kubectl commands.

      +
      +

      Prerequisites

      • Before creating a containerized workload, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Cluster.
      • To enable public access to a workload, ensure that an EIP or load balancer has been bound to at least one node in the cluster.

        If a pod has multiple containers, ensure that the ports used by the containers do not conflict with each other. Otherwise, creating the Deployment will fail.

        +
        +
      +
      +

      Using the CCE Console

      1. Log in to the CCE console.
      2. Click the cluster name to access the cluster details page, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
      3. Set basic information about the workload.

        Basic Info
        • Workload Type: Select Deployment. For details about workload types, see Overview.
        • Workload Name: Enter the name of the workload.
        • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
        • Pods: Enter the number of pods.
        • Container Runtime: A CCE cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences between runC and Kata, see Kata Containers and Common Containers.
        • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
        +
        +
        Container Settings
        • Container Information
          Multiple containers can be configured in a pod. You can click Add Container on the right to configure multiple containers for the pod. +
          +
        • Image Access Credential: Select the credential used for accessing the image repository. The default value is default-secret. You can use default-secret to access images in SWR. For details about default-secret, see default-secret.
        • GPU graphics card: All is selected by default. The workload instance will be scheduled to the node with the specified GPU graphics card type.
        +
        +

        Service Settings

        +

        A Service is used for pod access. With a fixed IP address, a Service forwards access traffic to pods and performs load balancing for these pods.

        +

        You can also create a Service after creating a workload. For details about the Service, see Service Overview.

        +
        Advanced Settings +
        +

      4. Click Create Workload in the lower right corner.
      +
      +

      Using kubectl

      The following procedure uses Nginx as an example to describe how to create a workload using kubectl.

      +
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create and edit the nginx-deployment.yaml file. nginx-deployment.yaml is an example file name. You can rename it as required.

        vi nginx-deployment.yaml

        +

        The following is an example YAML file. For more information about Deployments, see Kubernetes documentation.

        +
        apiVersion: apps/v1
        +kind: Deployment
        +metadata:
        +  name: nginx
        +spec:
        +  replicas: 1
        +  selector:
        +    matchLabels:
        +      app: nginx
        +  strategy:
        +    type: RollingUpdate
        +  template:
        +    metadata:
        +      labels:
        +        app: nginx
        +    spec:
        +      containers:
        +      - image: nginx    # If you use an image in My Images, obtain the image path from SWR.
        +        imagePullPolicy: Always
        +        name: nginx
        +      imagePullSecrets:
        +      - name: default-secret
        +

        For details about these parameters, see Table 1.

        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 1 Deployment YAML parameters

        Parameter

        +

        Description

        +

        Mandatory/Optional

        +

        apiVersion

        +

        API version.

        +
        NOTE:

        Set this parameter based on the cluster version.

        +
        • For clusters of v1.17 or later, the apiVersion format of Deployments is apps/v1.
        • For clusters of v1.15 or earlier, the apiVersion format of Deployments is extensions/v1beta1.
        +
        +

        Mandatory

        +

        kind

        +

        Type of a created object.

        +

        Mandatory

        +

        metadata

        +

        Metadata of a resource object.

        +

        Mandatory

        +

        name

        +

        Name of the Deployment.

        +

        Mandatory

        +

        Spec

        +

        Detailed description of the Deployment.

        +

        Mandatory

        +

        replicas

        +

        Number of pods.

        +

        Mandatory

        +

        selector

        +

        Determines container pods that can be managed by the Deployment.

        +

        Mandatory

        +

        strategy

        +

        Upgrade mode. Possible values:

        +
        • RollingUpdate
        • ReplaceUpdate
        +

        By default, rolling update is used.

        +

        Optional

        +

        template

        +

        Detailed description of a created container pod.

        +

        Mandatory

        +

        metadata

        +

        Metadata.

        +

        Mandatory

        +

        labels

        +

        metadata.labels: Container labels.

        +

        Optional

        +

        spec:

        +

        containers

        +
        • image (mandatory): Name of a container image.
        • imagePullPolicy (optional): Policy for obtaining an image. The options include Always (attempting to download images each time), Never (only using local images), and IfNotPresent (using local images if they are available; downloading images if local images are unavailable). The default value is Always.
        • name (mandatory): Container name.
        +

        Mandatory

        +

        imagePullSecrets

        +

        Name of the secret used during image pulling. If a private image is used, this parameter is mandatory.

        +
        • To pull an image from the Software Repository for Container (SWR), set this parameter to default-secret.
        • To pull an image from a third-party image repository, set this parameter to the name of the created secret.
        +

        Optional

        +
        +
        +

      3. Create a Deployment.

        kubectl create -f nginx-deployment.yaml

        +

        If the following information is displayed, the Deployment is being created.

        +
        deployment "nginx" created
        +

      4. Query the Deployment status.

        kubectl get deployment

        +

        If the following information is displayed, the Deployment is running.

        +
        NAME           READY     UP-TO-DATE   AVAILABLE   AGE 
        +nginx          1/1       1            1           4m5s
        +

        Parameter description

        +
        • NAME: Name of the application running in the pod.
        • READY: indicates the number of available workloads. The value is displayed as "the number of available pods/the number of expected pods".
        • UP-TO-DATE: indicates the number of replicas that have been updated.
        • AVAILABLE: indicates the number of available pods.
        • AGE: period the Deployment keeps running
        +

      5. If the Deployment will be accessed through a ClusterIP or NodePort Service, add the corresponding Service. For details, see Networking.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0048.html b/docs/cce/umn/cce_10_0048.html new file mode 100644 index 00000000..ebe858d0 --- /dev/null +++ b/docs/cce/umn/cce_10_0048.html @@ -0,0 +1,126 @@ + + +

      Creating a StatefulSet

      +

      Scenario

      StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.

      +

      A container can be migrated between different hosts, but data is not stored on the hosts. To store StatefulSet data persistently, attach HA storage volumes provided by CCE to the container.

      +
      +

      Notes and Constraints

      • When you delete or scale a StatefulSet, the system does not delete the storage volumes associated with the StatefulSet to ensure data security.
      • When you delete a StatefulSet, reduce the number of replicas to 0 before deleting the StatefulSet so that pods in the StatefulSet can be stopped in order.
      • When you create a StatefulSet, a headless Service is required for pod access. For details, see Headless Service.
      • When a node is unavailable, pods become Unready. In this case, you need to manually delete the pods of the StatefulSet so that the pods can be migrated to a normal node.
      +
      +

      Prerequisites

      • Before creating a workload, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Cluster.
      • To enable public access to a workload, ensure that an EIP or load balancer has been bound to at least one node in the cluster.

        If a pod has multiple containers, ensure that the ports used by the containers do not conflict with each other. Otherwise, creating the StatefulSet will fail.

        +
        +
      +
      +

      Using the CCE Console

      1. Log in to the CCE console.
      2. Click the cluster name to access the cluster details page, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
      3. Set basic information about the workload.

        Basic Info
        • Workload Type: Select StatefulSet. For details about workload types, see Overview.
        • Workload Name: Enter the name of the workload.
        • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
        • Pods: Enter the number of pods.
        • Container Runtime: A CCE cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences between runC and Kata, see Kata Containers and Common Containers.
        • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
        +
        +
        Container Settings
        • Container Information
          Multiple containers can be configured in a pod. You can click Add Container on the right to configure multiple containers for the pod.
          • Basic Info: See Setting Basic Container Information.
          • Lifecycle: See Setting Container Lifecycle Parameters.
          • Health Check: See Setting Health Check for a Container.
          • Environment Variables: See Setting an Environment Variable.
          • Data Storage: See Overview.
            • StatefulSets support dynamically provisioned EVS volumes.

              Dynamic mounting is achieved by using the volumeClaimTemplates field and depends on the dynamic creation capability of StorageClass. A StatefulSet associates each pod with a unique PVC using the volumeClaimTemplates field, and the PVCs are bound to their corresponding PVs. Therefore, after the pod is rescheduled, the original data can still be mounted thanks to the PVC.

              +
            • After a workload is created, the storage that is dynamically mounted cannot be updated.
            +
            +
          • Security Context: Set container permissions to protect the system and other containers from being affected. Enter the user ID to set container permissions and prevent systems and other containers from being affected.
          • Logging: See Using ICAgent to Collect Container Logs.
          +
          +
        • Image Access Credential: Select the credential used for accessing the image repository. The default value is default-secret. You can use default-secret to access images in SWR. For details about default-secret, see default-secret.
        • GPU graphics card: All is selected by default. The workload instance will be scheduled to the node with the specified GPU graphics card type.
        +
        +

        Headless Service Parameters

        +

        A headless Service is used to solve the problem of mutual access between pods in a StatefulSet. The headless Service provides a fixed access domain name for each pod. For details, see Headless Service.

        +

        Service Settings

        +

        A Service is used for pod access. With a fixed IP address, a Service forwards access traffic to pods and performs load balancing for these pods.

        +

        You can also create a Service after creating a workload. For details about the Service, see Service Overview.

        +
        Advanced Settings
        • Upgrade: See Configuring the Workload Upgrade Policy.
        • Scheduling: See Scheduling Policy (Affinity/Anti-affinity).
        • Instances Management Policies

          For some distributed systems, the StatefulSet sequence is unnecessary and/or should not occur. These systems require only uniqueness and identifiers.

          +
          • OrderedReady: The StatefulSet will deploy, delete, or scale pods in order and one by one. (The StatefulSet continues only after the previous pod is ready or deleted.) This is the default policy.
          • Parallel: The StatefulSet will create pods in parallel to match the desired scale without waiting, and will delete all pods at once.
          +
        • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Tolerations.
        • Labels and Annotations: See Pod Labels and Annotations.
        • DNS: See DNS Configuration.
        +
        +

      4. Click Create Workload in the lower right corner.
      +
      +

      Using kubectl

      In this example, an nginx workload is used and the EVS volume is dynamically mounted to it using the volumeClaimTemplates field.

      +
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create and edit the nginx-statefulset.yaml file.

        nginx-statefulset.yaml is an example file name, and you can change it as required.

        +

        vi nginx-statefulset.yaml

        +

        The following provides an example of the file contents. For more information on StatefulSet, see the Kubernetes documentation.

        +
        apiVersion: apps/v1
        +kind: StatefulSet
        +metadata:
        +  name: nginx
        +spec:
        +  selector:
        +    matchLabels:
        +      app: nginx
        +  template:
        +    metadata:
        +      labels:
        +        app: nginx
        +    spec:
        +      containers:
        +        - name: container-1
        +          image: nginx:latest
        +          imagePullPolicy: IfNotPresent
        +          resources:
        +            requests:
        +              cpu: 250m
        +              memory: 512Mi
        +            limits:
        +              cpu: 250m
        +              memory: 512Mi
        +          volumeMounts:
        +            - name: test
        +              readOnly: false
        +              mountPath: /usr/share/nginx/html
        +              subPath: ''
        +      imagePullSecrets:
        +        - name: default-secret
        +      dnsPolicy: ClusterFirst
        +      volumes: []
        +  serviceName: nginx-svc
        +  replicas: 2
        +volumeClaimTemplates:  # Dynamically mounts the EVS volume to the workload.
        +    - apiVersion: v1
        +      kind: PersistentVolumeClaim
        +      metadata:
        +        name: test
        +        namespace: default
        +        annotations:
        +          everest.io/disk-volume-type: SAS  # SAS EVS volume type.
        +        labels:
        +          failure-domain.beta.kubernetes.io/region: eu-de  # region where the EVS volume is created.
        +          failure-domain.beta.kubernetes.io/zone:    # AZ where the EVS volume is created. It must be the same as the AZ of the node.
        +      spec:
        +        accessModes:
        +          - ReadWriteOnce  # The value must be ReadWriteOnce for the EVS volume.
        +        resources:
        +          requests:
        +            storage: 10Gi
        +        storageClassName: csi-disk # Storage class name. The value is csi-disk for the EVS volume.
        +  updateStrategy:
        +    type: RollingUpdate
        +

        vi nginx-headless.yaml

        +
        apiVersion: v1
        +kind: Service
        +metadata:
        +  name: nginx-svc
        +  namespace: default
        +  labels:
        +    app: nginx
        +spec:
        +  selector:
        +    app: nginx
        +    version: v1
        +  clusterIP: None
        +  ports:
        +    - name: nginx
        +      targetPort: 80
        +      nodePort: 0
        +      port: 80
        +      protocol: TCP
        +  type: ClusterIP
        +

      3. Create a workload and the corresponding headless service.

        kubectl create -f nginx-statefulset.yaml

        +

        If the following information is displayed, the StatefulSet has been successfully created.

        +
        statefulset.apps/nginx created
        +

        kubectl create -f nginx-headless.yaml

        +

        If the following information is displayed, the headless service has been successfully created.

        +
        service/nginx-svc created
        +

      4. If the workload will be accessed through a ClusterIP or NodePort Service, set the corresponding workload access type. For details, see Networking.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0059.html b/docs/cce/umn/cce_10_0059.html new file mode 100644 index 00000000..0b1f95a4 --- /dev/null +++ b/docs/cce/umn/cce_10_0059.html @@ -0,0 +1,171 @@ + + +

      Network Policies

      +

      NetworkPolicy is a Kubernetes object used to restrict pod access. In CCE, by setting network policies, you can define ingress rules specifying the addresses to access pods or egress rules specifying the addresses pods can access. This is equivalent to setting up a firewall at the application layer to further ensure network security.

      +

      Network policies depend on the networking add-on of the cluster to which the policies apply.

      +

      By default, if a namespace does not have any policy, pods in the namespace accept traffic from any source and send traffic to any destination.

      +

      Network policy rules are classified into the following types:

      +
      • namespaceSelector: selects particular namespaces for which all pods should be allowed as ingress sources or egress destinations.
      • podSelector: selects particular pods in the same namespace as the network policy which should be allowed as ingress sources or egress destinations.
      • ipBlock: selects particular IP blocks to allow as ingress sources or egress destinations. (Only egress rules support IP blocks.)
      +

      Notes and Constraints

      • Only clusters that use the tunnel network model support network policies.
      • Network isolation is not supported for IPv6 addresses.
      • Network policies do not support egress rules except for clusters of v1.23 or later.

        Egress rules are supported only in the following operating systems:

        +
        • EulerOS 2.9: kernel version 4.18.0-147.5.1.6.h541.eulerosv2r9.x86_64
        • CentOS 7.7: kernel version 3.10.0-1062.18.1.el7.x86_64
        • EulerOS 2.5: kernel version 3.10.0-862.14.1.5.h591.eulerosv2r7.x86_64
        +
      • If a cluster is upgraded to v1.23 in in-place mode, you cannot use egress rules because the node OS is not upgraded. In this case, reset the node.
      +
      +

      Using Ingress Rules

      • Using podSelector to specify the access scope
        apiVersion: networking.k8s.io/v1
        +kind: NetworkPolicy
        +metadata:
        +  name: test-network-policy
        +  namespace: default
        +spec:
        +  podSelector:                  # The rule takes effect for pods with the role=db label.
        +    matchLabels:
        +      role: db
        +  ingress:                      #This is an ingress rule.
        +  - from:
        +    - podSelector:              #Only traffic from the pods with the role=frontend label is allowed.
        +        matchLabels:
        +          role: frontend
        +    ports:                      #Only TCP can be used to access port 6379.
        +    - protocol: TCP
        +      port: 6379
        +

        Diagram:

        +
        Figure 1 podSelector
        +
      +
      • Using namespaceSelector to specify the access scope
        apiVersion: networking.k8s.io/v1
        +kind: NetworkPolicy
        +metadata:
        +  name: test-network-policy
        +spec:
        +  podSelector:                  # The rule takes effect for pods with the role=db label.
        +    matchLabels:
        +      role: db
        +  ingress:                      #This is an ingress rule.
        +  - from:
        +    - namespaceSelector:        # Only traffic from the pods in the namespace with the "project=myproject" label is allowed.
        +        matchLabels:
        +          project: myproject
        +    ports:                      #Only TCP can be used to access port 6379.
        +    - protocol: TCP
        +      port: 6379
        +

        Figure 2 shows how namespaceSelector selects ingress sources.

        +
        Figure 2 namespaceSelector
        +
      +
      +

      Using Egress Rules

      Egress supports not only podSelector and namespaceSelector, but also ipBlock.

      +

      Only clusters of version 1.23 or later support egress rules. Currently, only EulerOS 2.5, EulerOS 2.9, and CentOS 7.7 nodes are supported.

      +
      +
      apiVersion: networking.k8s.io/v1
      +kind: NetworkPolicy
      +metadata:
      +  name: deny-client-a-via-except-cidr-egress-rule
      +  namespace: default
      +spec:
      +  policyTypes:                  # Must be specified for an egress rule.
      +    - Egress
      +  podSelector:                  # The rule takes effect for pods with the role=db label.
      +    matchLabels:
      +      role: db
      +  egress:                       # Egress rule
      +  - to:
      +    - ipBlock:
      +        cidr: 172.16.0.16/16    # Allow access to this CIDR block.
      +        except:
      +        - 172.16.0.40/32        # This CIDR block cannot be accessed. This value must fall within the range specified by cidr.
      +

      Diagram:

      +
      Figure 3 ipBlock
      +

      You can define ingress and egress in the same rule.

      +
      apiVersion: networking.k8s.io/v1
      +kind: NetworkPolicy
      +metadata:
      +  name: test-network-policy
      +  namespace: default
      +spec:
      +  policyTypes:
      +  - Ingress
      +  - Egress
      +  podSelector:                  # The rule takes effect for pods with the role=db label.
      +    matchLabels:
      +      role: db
      +  ingress:                      # Ingress rule
      +  - from:
      +    - podSelector:              #Only traffic from the pods with the "role=frontend" label is allowed.
      +        matchLabels:
      +          role: frontend
      +    ports:                      #Only TCP can be used to access port 6379.
      +    - protocol: TCP
      +      port: 6379
      +  egress:                       # Egress rule
      +  - to:
      +    - podSelector:              # Only pods with the role=web label can be accessed.
      +        matchLabels:
      +          role: web
      +

      Diagram:

      +
      Figure 4 Using both ingress and egress
      +
      +

      Creating a Network Policy on the Console

      1. Log in to the CCE console and access the cluster console.
      2. Choose Networking in the navigation pane, click the Network Policies tab, and click Create Network Policy in the upper right corner.

        • Policy Name: Specify a network policy name.
        • Namespace: Select a namespace in which the network policy is applied.
        • Selector: Enter a label, select the pod to be associated, and click Add. You can also click Reference Workload Label to reference the label of an existing workload.
        • Inbound Rule: Click to add an inbound rule. For details about parameter settings, see Table 1.

          +
          +
          + + + + + + + + + + + + + +
          Table 1 Adding an inbound rule

          Parameter

          +

          Description

          +

          Protocol & Port

          +

          Select the protocol type and port. Currently, TCP and UDP are supported.

          +

          Source Namespace

          +

          Select a namespace whose objects can be accessed. If this parameter is not specified, the source object belongs to the same namespace as the current policy.

          +

          Source Pod Label

          +

          Allow access to the pods with this label, if not specified, all pods in the namespace can be accessed.

          +
          +
          +
          +
        • Outbound Rule: Click to add an outbound rule. For details about parameter settings, see Table 1.
          +
          + + + + + + + + + + + + + + + + +
          Table 2 Adding an outbound rule

          Parameter

          +

          Description

          +

          Protocol & Port

          +

          Select the protocol type and port. Currently, TCP and UDP are supported. If this parameter is not specified, the protocol type is not limited.

          +

          Destination CIDR Block

          +

          Allows requests to be routed to a specified CIDR block (and not to the exception CIDR blocks). Separate the destination and exception CIDR blocks by vertical bars (|), and separate multiple exception CIDR blocks by commas (,). For example, 172.17.0.0/16|172.17.1.0/24,172.17.2.0/24 indicates that 172.17.0.0/16 is accessible, but not for 172.17.1.0/24 or 172.17.2.0/24.

          +

          Destination Namespace

          +

          Select a namespace whose objects can be accessed. If this parameter is not specified, the source object belongs to the same namespace as the current policy.

          +

          Destination Pod Label

          +

          Allow access to the pods with this label, if not specified, all pods in the namespace can be accessed.

          +
          +
          +
          +
        +

      3. Click OK.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0063.html b/docs/cce/umn/cce_10_0063.html new file mode 100644 index 00000000..606aaad6 --- /dev/null +++ b/docs/cce/umn/cce_10_0063.html @@ -0,0 +1,26 @@ + + +

      Managing Node Scaling Policies

      +

      Scenario

      After a node scaling policy is created, you can delete, edit, disable, enable, or clone the policy.

      +
      +

      Viewing a Node Scaling Policy

      You can view the associated node pool, rules, and scaling history of a node scaling policy and rectify faults according to the error information displayed.

      +
      1. Log in to the CCE console and access the cluster console.
      2. Choose Node Scaling in the navigation pane and click in front of the policy to be viewed.
      3. In the expanded area, the Associated Node Pools, Rules, and Scaling History tab pages are displayed. If the policy is abnormal, locate and rectify the fault based on the error information.

        You can also disable or enable auto scaling on the Node Pools page.

        +
        1. Log in to the CCE console and access the cluster console.
        2. In the navigation pane, choose Nodes and switch to the Node Pools tab page.
        3. Click Edit of the node pool to be operated. In the Edit Node Pool dialog box that is displayed, set the limits of the number of nodes.
        +
        +

      +
      +

      Deleting a Node Scaling Policy

      1. Log in to the CCE console and access the cluster console.
      2. Choose Node Scaling in the navigation pane and choose More > Delete next to the policy to be deleted.
      3. In the Delete Node Scaling Policy dialog box displayed, confirm whether to delete the policy.
      4. Click Yes to delete the policy.
      +
      +

      Editing a Node Scaling Policy

      1. Log in to the CCE console and access the cluster console.
      2. Choose Node Scaling in the navigation pane and click Edit in the Operation column of the policy to be edited.
      3. On the Edit Node Scaling Policy page displayed, modify policy parameter values listed in Table 1.
      4. After the configuration is complete, click OK.
      +
      +

      Cloning a Node Scaling Policy

      1. Log in to the CCE console and access the cluster console.
      2. Choose Node Scaling in the navigation pane and choose More > Clone next to the policy to be cloned.
      3. On the Clone Node Scaling Policy page displayed, certain parameters have been cloned. Add or modify other policy parameters based on service requirements.
      4. Click OK.
      +
      +

      Enabling or Disabling a Node Scaling Policy

      1. Log in to the CCE console and access the cluster console.
      2. Choose Node Scaling in the navigation pane and click Disable in the Operation column of the policy to be disabled. If the policy is in the disabled state, click Enable in the Operation column of the policy.
      3. In the dialog box displayed, confirm whether to disable or enable the node policy.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0064.html b/docs/cce/umn/cce_10_0064.html new file mode 100644 index 00000000..4b6a9428 --- /dev/null +++ b/docs/cce/umn/cce_10_0064.html @@ -0,0 +1,27 @@ + + +

      Add-ons

      +
      + + diff --git a/docs/cce/umn/cce_10_0066.html b/docs/cce/umn/cce_10_0066.html new file mode 100644 index 00000000..05b49acb --- /dev/null +++ b/docs/cce/umn/cce_10_0066.html @@ -0,0 +1,30 @@ + + +

      everest (System Resource Add-On, Mandatory)

      +

      Introduction

      Everest is a cloud native container storage system. Based on the Container Storage Interface (CSI), clusters of Kubernetes v1.15.6 or later obtain access to cloud storage services.

      +

      everest is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.15 or later is created.

      +
      +

      Notes and Constraints

      • If your cluster is upgraded from v1.13 to v1.15, storage-driver is replaced by everest (v1.1.6 or later) for container storage. The takeover does not affect the original storage functions.
      • In version 1.2.0 of the everest add-on, key authentication is optimized when OBS is used. After the everest add-on is upgraded from a version earlier than 1.2.0, you need to restart all workloads that use OBS in the cluster. Otherwise, workloads may not be able to use OBS.
      • By default, this add-on is installed in clusters of v1.15 and later. For clusters of v1.13 and earlier, the storage-driver add-on is installed by default.
      +
      +

      Installing the Add-on

      This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

      +
      1. Log in to the CCE console and access the cluster console. Choose Add-ons in the navigation pane, locate everest on the right, and click Install.
      2. Select Standalone, HA, or Custom for Add-on Specifications.

        The everest add-on contains the following containers. You can adjust the specifications as required.
        • everest-csi-controller: A Deployment workload. This container is responsible for creating, deleting, snapshotting, expanding, attaching, and detaching volumes. If the cluster version is 1.19 or later and the add-on version is 1.2.x, the pod of the everest-csi-driver component also has an everest-localvolume-manager container by default. This container manages the creation of LVM storage pools and local PVs on the node.
          If you select Custom, the recommended everest-csi-controller memory configuration is as follows:
          • If the number of pods and PVCs is less than 2000, set the memory upper limit to 600 MiB.
          • If the number of pods and PVCs is less than 5000, set the memory upper limit to 1 GiB.
          +
          +
          +
        • everest-csi-driver: A DaemonSet workload. This container is responsible for mounting and unmounting PVs and resizing file systems. If the add-on version is 1.2.x and the region where the cluster is located supports node-attacher, the pod of the everest-csi-driver component also contains an everest-node-attacher container. This container is responsible for distributed EVS attaching. This configuration item is available in some regions.

          If you select Custom, it is recommended that the everest-csi-driver memory limit be greater than or equal to 300 MiB. If the value is too small, the add-on container cannot be started and the add-on is unavailable.

          +
          +
        +
        +

      3. Set related parameters.

        In everest 1.2.26 or later, the performance of attaching a large number of EVS volumes is optimized. The following three parameters are provided:
        • csi_attacher_worker_threads: number of workers that can concurrently mount EVS volumes. The default value is 60.
        • csi_attacher_detach_worker_threads: number of workers that can concurrently unmount EVS volumes. The default value is 60.
        • volume_attaching_flow_ctrl: maximum number of EVS volumes that can be mounted by the everest add-on within one minute. The default value is 0, indicating that the EVS volume mounting performance is determined by the underlying storage resources.
        +
        +

        The preceding three parameters are associated with each other and are constrained by the underlying storage resources in the region where the cluster is located. If you want to mount a large number of volumes (more than 500 EVS volumes per minute), you can contact the customer service personnel and configure the parameters under their guidance to prevent the everest add-on from running abnormally due to improper parameter settings.

        +
        Other parameters
        • cluster_id: cluster ID
        • default_vpc_id: ID of the VPC to which the data warehouse cluster belongs
        • disable_auto_mount_secret: indicates whether the default AK/SK can be used when an object bucket or parallel file system is mounted. The default value is false.
        • enable_node_attacher: indicates whether to enable the attacher on the agent to process the VolumeAttachment.
        • flow_control: This parameter is left blank by default.
        • over_subscription: overcommitment ratio of the local storage pool (local_storage). The default value is 80. If the size of the local storage pool is 100 GB, you can overcommit 180 GB.
        • project_id: ID of the project to which the cluster belongs.
        +
        +

      4. Click Install.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0068.html b/docs/cce/umn/cce_10_0068.html new file mode 100644 index 00000000..e893604b --- /dev/null +++ b/docs/cce/umn/cce_10_0068.html @@ -0,0 +1,23 @@ + + +

      Release Notes

      +
      + + diff --git a/docs/cce/umn/cce_10_0081.html b/docs/cce/umn/cce_10_0081.html new file mode 100644 index 00000000..2fec523e --- /dev/null +++ b/docs/cce/umn/cce_10_0081.html @@ -0,0 +1,135 @@ + + +

      Node Pool Overview

      +

      Introduction

      CCE introduces node pools to help you better manage nodes in Kubernetes clusters. A node pool contains one node or a group of nodes with identical configuration in a cluster.

      +

      You can create custom node pools on the CCE console. With node pools, you can quickly create, manage, and destroy nodes without affecting the cluster. All nodes in a custom node pool have identical parameters and node type. You cannot configure a single node in a node pool; any configuration changes affect all nodes in the node pool.

      +

      You can also use node pools for auto scaling.

      +
      • When a pod in a cluster cannot be scheduled due to insufficient resources, scale-out can be automatically triggered.
      • When there is an idle node or a monitoring metric threshold is met, scale-in can be automatically triggered.
      +

      This section describes how node pools work in CCE and how to create and manage node pools.

      +
      +

      Node Pool Architecture

      Generally, all nodes in a node pool have the following same attributes:

      +
      • Node OS
      • Node specifications
      • Node login mode.
      • Node runtime.
      • Startup parameters of Kubernetes components on a node
      • User-defined startup script of a node
      • K8s Labels and Taints
      +

      CCE provides the following extended attributes for node pools:

      +
      • Node pool OS
      • Maximum number of pods on each node in a node pool
      +
      +

      Description of DefaultPool

      DefaultPool is not a real node pool. It only classifies nodes that are not in the user-created node pools. These nodes are directly created on the console or by calling APIs. DefaultPool does not support any user-created node pool functions, including scaling and parameter configuration. DefaultPool cannot be edited, deleted, expanded, or auto scaled, and nodes in it cannot be migrated.

      +
      +

      Applicable Scenarios

      When a large-scale cluster is required, you are advised to use node pools to manage nodes.

      +

      The following table describes multiple scenarios of large-scale cluster management and the functions of node pools in each scenario.

      + +
      + + + + + + + + + + + + + +
      Table 1 Using node pools for different management scenarios

      Scenario

      +

      Function

      +

      Multiple heterogeneous nodes (with different models and configurations) in the cluster

      +

      Nodes can be grouped into different pools for management.

      +

      Frequent node scaling required in a cluster

      +

      Node pools support auto scaling to dynamically add or reduce nodes.

      +

      Complex application scheduling rules in a cluster

      +

      Node pool tags can be used to quickly specify service scheduling rules.

      +
      +
      +
      +

      Functions and Precautions

      +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      Function

      +

      Description

      +

      Notes

      +

      Creating a node pool

      +

      Add a node pool.

      +

      It is recommended that a cluster contains no more than 100 node pools.

      +

      Deleting a node pool

      +

      Deleting a node pool will delete nodes in the pool. Pods on these nodes will be automatically migrated to available nodes in other node pools.

      +

      If pods in the node pool have a specific node selector and none of the other nodes in the cluster satisfies the node selector, the pods will become unschedulable.

      +

      Enabling auto scaling for a node pool

      +

      After auto scaling is enabled, nodes will be automatically created or deleted in the node pool based on the cluster loads.

      +

      You are advised not to store important data on nodes in a node pool because after auto scaling, data cannot be restored as nodes may be deleted.

      +

      Enabling auto scaling for a node pool

      +

      After auto scaling is disabled, the number of nodes in a node pool will not automatically change with the cluster loads.

      +

      /

      +

      Adjusting the size of a node pool

      +

      The number of nodes in a node pool can be directly adjusted. If the number of nodes is reduced, nodes are randomly removed from the current node pool.

      +

      After auto scaling is enabled, you are not advised to manually adjust the node pool size.

      +

      Changing node pool configurations

      +

      You can modify the node pool name, node quantity, Kubernetes labels (and their quantity), and taints.

      +

      The deleted or added Kubernetes labels and taints (as well as their quantity) will apply to all nodes in the node pool, which may cause pod re-scheduling. Therefore, exercise caution when performing this operation.

      +

      Removing a node from a node pool

      +

      Nodes in a node pool can be migrated to the default node pool of the same cluster.

      +

      Nodes in the default node pool cannot be migrated to other node pools, and nodes in a user-created node pool cannot be migrated to other user-created node pools.

      +

      Cloning a node pool

      +

      You can copy the configuration of an existing node pool to create a new node pool.

      +

      /

      +

      Setting Kubernetes parameters

      +

      You can configure core components with fine granularity.

      +
      • This function is supported only in clusters of v1.15 and later. It is not displayed for versions earlier than v1.15.
      • The default node pool DefaultPool does not support this type of configuration.
      +
      +
      +
      +

      Deploying a Workload in a Specified Node Pool

      When creating a workload, you can constrain pods to run in a specified node pool.

      +

      For example, on the CCE console, you can set the affinity between the workload and the node on the Scheduling Policies tab page on the workload details page to forcibly deploy the workload to a specific node pool. In this way, the workload runs only on nodes in the node pool. If you need to better control where the workload is to be scheduled, you can use affinity or anti-affinity policies between workloads and nodes described in Scheduling Policy (Affinity/Anti-affinity).

      +

      For example, you can use container's resource request as a nodeSelector so that workloads will run only on the nodes that meet the resource request.

      +

      If the workload definition file defines a container that requires four CPUs, the scheduler will not choose the nodes with two CPUs to run workloads.

      +
      +

      Related Operations

      You can log in to the CCE console and refer to the following sections to perform operations on node pools:

      + +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0083.html b/docs/cce/umn/cce_10_0083.html new file mode 100644 index 00000000..ab9defe2 --- /dev/null +++ b/docs/cce/umn/cce_10_0083.html @@ -0,0 +1,106 @@ + + +

      Managing Workload Scaling Policies

      +

      Scenario

      After an HPA policy is created, you can update, clone, edit, and delete the policy, as well as edit the YAML file.

      +
      +

      Checking an HPA Policy

      You can view the rules, status, and events of an HPA policy and handle exceptions based on the error information displayed.

      +
      1. Log in to the CCE console and access the cluster console.
      2. In the navigation pane, choose Workload Scaling. On the HPA Policies tab page, click next to the target HPA policy.
      3. In the expanded area, you can view the Rules, Status, and Events tab pages. If the policy is abnormal, locate and rectify the fault based on the error information.

        You can also view the created HPA policy on the workload details page.

        +
        1. Log in to the CCE console and access the cluster console.
        2. In the navigation pane, choose Workloads. Click the workload name to view its details.
        3. On the workload details page, swich to the Auto Scaling tab page to view the HPA policies. You can also view the scaling policies you configured in Workload Scaling.
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 1 Event types and names

        Event Type

        +

        Event Name

        +

        Description

        +

        Normal

        +

        SuccessfulRescale

        +

        The scaling is performed successfully.

        +

        Abnormal

        +

        InvalidTargetRange

        +

        Invalid target range.

        +

        InvalidSelector

        +

        Invalid selector.

        +

        FailedGetObjectMetric

        +

        Objects fail to be obtained.

        +

        FailedGetPodsMetric

        +

        Pods fail to be obtained.

        +

        FailedGetResourceMetric

        +

        Resources fail to be obtained.

        +

        FailedGetExternalMetric

        +

        External metrics fail to be obtained.

        +

        InvalidMetricSourceType

        +

        Invalid metric source type.

        +

        FailedConvertHPA

        +

        HPA conversion failed.

        +

        FailedGetScale

        +

        The scale fails to be obtained.

        +

        FailedComputeMetricsReplicas

        +

        Failed to calculate metric-defined replicas.

        +

        FailedGetScaleWindow

        +

        Failed to obtain ScaleWindow.

        +

        FailedRescale

        +

        Failed to scale the service.

        +
        +
        +

      +
      +

      Updating an HPA Policy

      An HPA policy is used as an example.

      +
      1. Log in to the CCE console and access the cluster console.
      2. In the navigation pane, choose Workload Scaling. Click Update in the Operation column of the target policy.
      3. On the Update HPA Policy page displayed, set the policy parameters listed in Table 1.
      4. Click Update.
      +
      +

      Editing the YAML File (HPA Policy)

      1. Log in to the CCE console and access the cluster console.
      2. In the navigation pane, choose Workload Scaling. Click More > Edit YAML in the Operation column of the target HPA policy.
      3. In the Edit YAML dialog box displayed, edit or download the YAML file.
      4. Click the close button in the upper right corner.
      +
      +

      Deleting an HPA Policy

      1. Log in to the CCE console and access the cluster console.
      2. In the navigation pane, choose Workload Scaling. Click Delete in the Operation column of the target policy.
      3. In the dialog box displayed, click Yes.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0091.html b/docs/cce/umn/cce_10_0091.html new file mode 100644 index 00000000..2e7c452c --- /dev/null +++ b/docs/cce/umn/cce_10_0091.html @@ -0,0 +1,25 @@ + + +

      Clusters

      +
      + + diff --git a/docs/cce/umn/cce_01_0094.html b/docs/cce/umn/cce_10_0094.html similarity index 62% rename from docs/cce/umn/cce_01_0094.html rename to docs/cce/umn/cce_10_0094.html index 5a79eda9..eb548c61 100644 --- a/docs/cce/umn/cce_01_0094.html +++ b/docs/cce/umn/cce_10_0094.html @@ -1,21 +1,21 @@ - + -

      Overview

      -

      Why We Need Ingresses

      A Service is generally used to forward access requests based on TCP and UDP and provide layer-4 load balancing for clusters. However, in actual scenarios, if there is a large number of HTTP/HTTPS access requests on the application layer, the Service cannot meet the forwarding requirements. Therefore, the Kubernetes cluster provides an HTTP-based access mode, that is, ingress.

      -

      An ingress is an independent resource in the Kubernetes cluster and defines rules for forwarding external access traffic. As shown in Figure 1, you can customize forwarding rules based on domain names and URLs to implement fine-grained distribution of access traffic.

      -
      Figure 1 Ingress diagram
      -

      The following describes the ingress-related definitions:

      -
      • Ingress object: a set of access rules that forward requests to specified Services based on domain names or URLs. It can be added, deleted, modified, and queried by calling APIs.
      • Ingress Controller: an executor for request forwarding. It monitors the changes of resource objects such as ingresses, Services, endpoints, secrets (mainly TLS certificates and keys), nodes, and ConfigMaps in real time, parses rules defined by ingresses, and forwards requests to the corresponding backend Services.
      +

      Ingress Overview

      +

      Why We Need Ingresses

      A Service is generally used to forward access requests based on TCP and UDP and provide layer-4 load balancing for clusters. However, in actual scenarios, if there is a large number of HTTP/HTTPS access requests on the application layer, the Service cannot meet the forwarding requirements. Therefore, the Kubernetes cluster provides an HTTP-based access mode, that is, ingress.

      +

      An ingress is an independent resource in the Kubernetes cluster and defines rules for forwarding external access traffic. As shown in Figure 1, you can customize forwarding rules based on domain names and URLs to implement fine-grained distribution of access traffic.

      +
      Figure 1 Ingress diagram
      +

      The following describes the ingress-related definitions:

      +
      • Ingress object: a set of access rules that forward requests to specified Services based on domain names or URLs. It can be added, deleted, modified, and queried by calling APIs.
      • Ingress Controller: an executor for request forwarding. It monitors the changes of resource objects such as ingresses, Services, endpoints, secrets (mainly TLS certificates and keys), nodes, and ConfigMaps in real time, parses rules defined by ingresses, and forwards requests to the corresponding backend Services.
      -

      Working Principle of ELB Ingress Controller

      ELB Ingress Controller developed by CCE implements layer-7 network access for the internet and intranet (in the same VPC) based on ELB and distributes access traffic to the corresponding Services using different URLs.

      -

      ELB Ingress Controller is deployed on the master node and bound to the load balancer in the VPC where the cluster resides. Different domain names, ports, and forwarding policies can be configured for the same load balancer (with the same IP address). Figure 2 shows the working principle of ELB Ingress Controller.

      -
      1. A user creates an ingress object and configures a traffic access rule in the ingress, including the load balancer, URL, SSL, and backend service port.
      2. When Ingress Controller detects that the ingress object changes, it reconfigures the listener and backend server route on the ELB side according to the traffic access rule.
      3. When a user accesses a workload, the traffic is forwarded to the corresponding backend service port based on the forwarding policy configured on ELB, and then forwarded to each associated workload through the Service.
      -
      Figure 2 Working principle of ELB Ingress Controller
      +

      Working Principle of ELB Ingress Controller

      ELB Ingress Controller developed by CCE implements layer-7 network access for the internet and intranet (in the same VPC) based on ELB and distributes access traffic to the corresponding Services using different URLs.

      +

      ELB Ingress Controller is deployed on the master node and bound to the load balancer in the VPC where the cluster resides. Different domain names, ports, and forwarding policies can be configured for the same load balancer (with the same IP address). Figure 2 shows the working principle of ELB Ingress Controller.

      +
      1. A user creates an ingress object and configures a traffic access rule in the ingress, including the load balancer, URL, SSL, and backend service port.
      2. When Ingress Controller detects that the ingress object changes, it reconfigures the listener and backend server route on the ELB side according to the traffic access rule.
      3. When a user accesses a workload, the traffic is forwarded to the corresponding backend service port based on the forwarding policy configured on ELB, and then forwarded to each associated workload through the Service.
      +
      Figure 2 Working principle of ELB Ingress Controller
      diff --git a/docs/cce/umn/cce_10_0105.html b/docs/cce/umn/cce_10_0105.html new file mode 100644 index 00000000..9941eeba --- /dev/null +++ b/docs/cce/umn/cce_10_0105.html @@ -0,0 +1,197 @@ + + +

      Setting Container Lifecycle Parameters

      +

      Scenario

      CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before stopping, you can register a hook function.

      +

      CCE provides the following lifecycle callback functions:

      +
      • Startup Command: executed to start a container. For details, see Startup Commands.
      • Post-Start: executed immediately after a container is started. For details, see Post-Start Processing.
      • Pre-Stop: executed before a container is stopped. The pre-stop processing function helps you ensure that the services running on the pods can be completed in advance in the case of pod upgrade or deletion. For details, see Pre-Stop Processing.
      +
      +

      Startup Commands

      By default, the default command during image start. To run a specific command or rewrite the default image value, you must perform specific settings:

      +

      A Docker image has metadata that stores image information. If lifecycle commands and arguments are not set, CCE runs the default commands and arguments, that is, Docker instructions ENTRYPOINT and CMD, provided during image creation.

      +

      If the commands and arguments used to run a container are set during application creation, the default commands ENTRYPOINT and CMD are overwritten during image build. The rules are as follows:

      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 1 Commands and arguments used to run a container

      Image ENTRYPOINT

      +

      Image CMD

      +

      Command to Run a Container

      +

      Parameters to Run a Container

      +

      Command Executed

      +

      [touch]

      +

      [/root/test]

      +

      Not set

      +

      Not set

      +

      [touch /root/test]

      +

      [touch]

      +

      [/root/test]

      +

      [mkdir]

      +

      Not set

      +

      [mkdir]

      +

      [touch]

      +

      [/root/test]

      +

      Not set

      +

      [/opt/test]

      +

      [touch /opt/test]

      +

      [touch]

      +

      [/root/test]

      +

      [mkdir]

      +

      [/opt/test]

      +

      [mkdir /opt/test]

      +
      +
      +
      1. Log in to the CCE console. When creating a workload, configure container information and select Lifecycle.
      2. Enter a command and arguments on the Startup Command tab page.

        +

        + + + + + + + + + + +
        Table 2 Container startup command

        Configuration Item

        +

        Procedure

        +

        Command

        +

        Enter an executable command, for example, /run/server.

        +

        If there are multiple commands, separate them with spaces. If the command contains a space, you need to add a quotation mark ("").

        +
        NOTE:

        In the case of multiple commands, you are advised to run /bin/sh or other shell commands. Other commands are used as parameters.

        +
        +

        Args

        +

        Enter the argument that controls the container running command, for example, --port=8080.

        +

        If there are multiple arguments, separate them in different lines.

        +
        +
        +

      +
      +

      Post-Start Processing

      1. Log in to the CCE console. When creating a workload, configure container information and select Lifecycle.
      2. Set the post-start processing parameters on the Post-Start tab page.

        +

        + + + + + + + + + + +
        Table 3 Post-start processing parameters

        Parameter

        +

        Description

        +

        CLI

        +

        Set commands to be executed in the container for post-start processing. The command format is Command Args[1] Args[2].... Command is a system command or a user-defined executable program. If no path is specified, an executable program in the default path will be selected. If multiple commands need to be executed, write the commands into a script for execution. Commands that are executed in the background or asynchronously are not supported.

        +

        Example command:

        +
        exec: 
        +  command: 
        +  - /install.sh 
        +  - install_agent
        +

        Enter /install install_agent in the script. This command indicates that install.sh will be executed after the container is created successfully.

        +

        HTTP request

        +

        Send an HTTP request for post-start processing. The related parameters are described as follows:

        +
        • Path: (optional) request URL.
        • Port: (mandatory) request port.
        • Host: (optional) IP address of the request. The default value is the IP address of the node where the container resides.
        +
        +
        +

      +
      +

      Pre-Stop Processing

      1. Log in to the CCE console. When creating a workload, configure container information and select Lifecycle.
      2. Set the pre-start processing parameters on the Pre-Stop tab page.

        +

        + + + + + + + + + + +
        Table 4 Pre-stop processing parameters

        Parameter

        +

        Description

        +

        CLI

        +

        Set commands to be executed in the container for pre-stop processing. The command format is Command Args[1] Args[2].... Command is a system command or a user-defined executable program. If no path is specified, an executable program in the default path will be selected. If multiple commands need to be executed, write the commands into a script for execution.

        +

        Example command:

        +
        exec: 
        +  command: 
        +  - /uninstall.sh 
        +  - uninstall_agent
        +

        Enter /uninstall uninstall_agent in the script. This command indicates that the uninstall.sh script will be executed before the container completes its execution and stops running.

        +

        HTTP request

        +

        Send an HTTP request for pre-stop processing. The related parameters are described as follows:

        +
        • Path: (optional) request URL.
        • Port: (mandatory) request port.
        • Host: (optional) IP address of the request. The default value is the IP address of the node where the container resides.
        +
        +
        +

      +
      +

      Example YAML

      This section uses Nginx as an example to describe how to set the container lifecycle.

      +

      In the following configuration file, the postStart command is defined to run the install.sh command in the /bin/bash directory. preStop is defined to run the uninstall.sh command.

      +
      apiVersion: apps/v1
      +kind: Deployment
      +metadata:
      +  name: nginx
      +spec:
      +  replicas: 1
      +  selector:
      +    matchLabels:
      +      app: nginx
      +  template:
      +    metadata:
      +      labels:
      +        app: nginx
      +    spec:
      +      containers:
      +      - image: nginx 
      +        command:
      +        - sleep 3600                        #Startup command
      +        imagePullPolicy: Always
      +        lifecycle:
      +          postStart:
      +            exec:
      +              command:
      +              - /bin/bash
      +              - install.sh                  #Post-start command
      +          preStop:
      +            exec:
      +              command:
      +              - /bin/bash
      +              - uninstall.sh                 #Pre-stop command
      +        name: nginx
      +      imagePullSecrets:
      +      - name: default-secret
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0107.html b/docs/cce/umn/cce_10_0107.html new file mode 100644 index 00000000..c4a99c05 --- /dev/null +++ b/docs/cce/umn/cce_10_0107.html @@ -0,0 +1,49 @@ + + +

      Connecting to a Cluster Using kubectl

      +

      Scenario

      This section uses a CCE cluster as an example to describe how to connect to a CCE cluster using kubectl.

      +
      +

      Permission Description

      When you access a cluster using kubectl, CCE uses the kubeconfig.json file generated on the cluster for authentication. This file contains user information, based on which CCE determines which Kubernetes resources can be accessed by kubectl. The permissions recorded in a kubeconfig.json file vary from user to user.

      +

      For details about user permissions, see Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based).

      +
      +

      Using kubectl

      To connect to a Kubernetes cluster from a PC, you can use kubectl, a Kubernetes command line tool. You can log in to the CCE console, click the name of the cluster to be connected, and view the access address and kubectl connection procedure on the cluster details page.

      +
      CCE allows you to access a cluster through a VPC network or a public network.
      • Intra-VPC access: The client that accesses the cluster must be in the same VPC as the cluster.
      • Public access:The client that accesses the cluster must be able to access public networks and the cluster has been bound with a public network IP.

        To bind a public IP (EIP) to the cluster, go to the cluster details page and click Bind next to EIP in the Connection Information pane. In a cluster with an EIP bound, kube-apiserver will be exposed to public networks and may be attacked. You are advised to configure Advanced Anti-DDoS (AAD) for the EIP of the node where kube-apiserver resides.

        +
        +
      +
      +

      Download kubectl and the configuration file. Copy the file to your client, and configure kubectl. After the configuration is complete, you can access your Kubernetes clusters. Procedure:

      +
      1. Download kubectl.

        On the Kubernetes release page, click the corresponding link based on the cluster version, click Client Binaries, and download the corresponding platform software package. Alternatively, you can install kubectl with curl following the guide in Install Tools.
        Figure 1 Downloading kubectl
        +
        +

      2. Obtain the kubectl configuration file (kubeconfig).

        On the Connection Information pane on the cluster details page, click Learn more next to kubectl. On the window displayed, download the configuration file.

        +
        • The kubectl configuration file kubeconfig.json is used for cluster authentication. If the file is leaked, your clusters may be attacked.
        • By default, two-way authentication is disabled for domain names in the current cluster. You can run the kubectl config use-context externalTLSVerify command to enable two-way authentication. For details, see Two-Way Authentication for Domain Names. For a cluster that has been bound to an EIP, if the authentication fails (x509: certificate is valid) when two-way authentication is used, you need to bind the EIP again and download kubeconfig.json again.
        • The Kubernetes permissions assigned by the configuration file downloaded by IAM users are the same as those assigned to the IAM users on the CCE console.
        • If the KUBECONFIG environment variable is configured in the Linux OS, kubectl preferentially loads the KUBECONFIG environment variable instead of $home/.kube/config.
        +
        +

      3. Configure kubectl.

        Install and configure kubectl (A Linux OS is used as an example).
        1. Copy the kubectl downloaded in 1 and the configuration file downloaded in 2 to the /home directory on your client.
        2. Log in to your client and configure kubectl. If you have installed kubectl, skip this step.
          cd /home
          +chmod +x kubectl
          +mv -f kubectl /usr/local/bin
          +
        3. Log in to your client and configure the kubeconfig file.
          cd /home
          +mkdir -p $HOME/.kube
          +mv -f kubeconfig.json $HOME/.kube/config
          +
        4. Switch the kubectl access mode based on service scenarios.
          • Run this command to enable intra-VPC access:
            kubectl config use-context internal
            +
          • Run this command to enable public access (EIP required):
            kubectl config use-context external
            +
          • Run this command to enable public access and two-way authentication (EIP required):
            kubectl config use-context externalTLSVerify
            +

            For details about the cluster two-way authentication, see Two-Way Authentication for Domain Names.

            +
          +
        +
        +

      +
      +

      Two-Way Authentication for Domain Names

      Currently, CCE supports two-way authentication for domain names.

      +
      • Two-way authentication is disabled for domain names by default. You can run the kubectl config use-context externalTLSVerify command to switch to the externalTLSVerify context to enable it.
      • When an EIP is bound to or unbound from a cluster, or a custom domain name is configured or updated, the cluster server certificate will be added the latest cluster access address (including the EIP bound to the cluster and all custom domain names configured for the cluster).
      • Asynchronous cluster synchronization takes about 5 to 10 minutes. You can view the synchronization result in Synchronize Certificate in Operation Records.
      • For a cluster that has been bound to an EIP, if the authentication fails (x509: certificate is valid) when two-way authentication is used, you need to bind the EIP again and download kubeconfig.json again.
      • If the domain name two-way authentication is not supported, kubeconfig.json contains the "insecure-skip-tls-verify": true field, as shown in Figure 2. To use two-way authentication, you can download the kubeconfig.json file again and enable two-way authentication for the domain names.
        Figure 2 Two-way authentication disabled for domain names
        +
      +
      +

      Common Issue (Error from server Forbidden)

      When you use kubectl to create or query Kubernetes resources, the following output is returned:

      +

      # kubectl get deploy Error from server (Forbidden): deployments.apps is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "deployments" in API group "apps" in the namespace "default"

      +

      The cause is that the user does not have the permissions to operate the Kubernetes resources. For details about how to assign permissions, see Namespace Permissions (Kubernetes RBAC-based).

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0110.html b/docs/cce/umn/cce_10_0110.html new file mode 100644 index 00000000..d4afbeb3 --- /dev/null +++ b/docs/cce/umn/cce_10_0110.html @@ -0,0 +1,14 @@ + + +

      Monitoring and Alarm

      +

      +
      + + diff --git a/docs/cce/umn/cce_10_0112.html b/docs/cce/umn/cce_10_0112.html new file mode 100644 index 00000000..3634d501 --- /dev/null +++ b/docs/cce/umn/cce_10_0112.html @@ -0,0 +1,108 @@ + + +

      Setting Health Check for a Container

      +

      Scenario

      Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect application exceptions or automatically restart the application to restore it. This will result in a situation where the pod status is normal but the application in the pod is abnormal.

      +

      Kubernetes provides the following health check probes:

      +
      • Liveness probe (livenessProbe): checks whether a container is still alive. It is similar to the ps command that checks whether a process exists. If the liveness check of a container fails, the cluster restarts the container. If the liveness check is successful, no operation is executed.
      • Readiness probe (readinessProbe): checks whether a container is ready to process user requests. Upon that the container is detected unready, service traffic will not be directed to the container. It may take a long time for some applications to start up before they can provide services. This is because that they need to load disk data or rely on startup of an external module. In this case, the application process is running, but the application cannot provide services. To address this issue, this health check probe is used. If the container readiness check fails, the cluster masks all requests sent to the container. If the container readiness check is successful, the container can be accessed.
      • Startup probe (startupProbe): checks when a container application has started. If such a probe is configured, it disables liveness and readiness checks until it succeeds, ensuring that those probes do not interfere with the application startup. This can be used to adopt liveness checks on slow starting containers, avoiding them getting killed by the kubelet before they are started.
      +
      +

      Check Method

      • HTTP request

        This health check mode is applicable to containers that provide HTTP/HTTPS services. The cluster periodically initiates an HTTP/HTTPS GET request to such containers. If the return code of the HTTP/HTTPS response is within 200–399, the probe is successful. Otherwise, the probe fails. In this health check mode, you must specify a container listening port and an HTTP/HTTPS request path.

        +

        For example, for a container that provides HTTP services, the HTTP check path is /health-check, the port is 80, and the host address is optional (which defaults to the container IP address). Here, 172.16.0.186 is used as an example, and we can get such a request: GET http://172.16.0.186:80/health-check. The cluster periodically initiates this request to the container. You can also add one or more headers to an HTTP request. For example, set the request header name to Custom-Header and the corresponding value to example.

        +
      • TCP port

        For a container that provides TCP communication services, the cluster periodically establishes a TCP connection to the container. If the connection is successful, the probe is successful. Otherwise, the probe fails. In this health check mode, you must specify a container listening port.

        +

        For example, if you have a Nginx container with service port 80, after you specify TCP port 80 for container listening, the cluster will periodically initiate a TCP connection to port 80 of the container. If the connection is successful, the probe is successful. Otherwise, the probe fails.

        +
      • CLI

        CLI is an efficient tool for health check. When using the CLI, you must specify an executable command in a container. The cluster periodically runs the command in the container. If the command output is 0, the health check is successful. Otherwise, the health check fails.

        +

        The CLI mode can be used to replace the HTTP request-based and TCP port-based health check.

        +
        • For a TCP port, you can write a program script to connect to a container port. If the connection is successful, the script returns 0. Otherwise, the script returns –1.
        • For an HTTP request, you can write a program script to run the wget command for a container.

          wget http://127.0.0.1:80/health-check

          +

          Check the return code of the response. If the return code is within 200–399, the script returns 0. Otherwise, the script returns –1.

          +
          • Put the program to be executed in the container image so that the program can be executed.
          • If the command to be executed is a shell script, do not directly specify the script as the command, but add a script parser. For example, if the script is /data/scripts/health_check.sh, you must specify sh/data/scripts/health_check.sh for command execution. The reason is that the cluster is not in the terminal environment when executing programs in a container.
          +
          +
        +
      • gRPC Check
        gRPC checks can configure startup, liveness, and readiness probes for your gRPC application without exposing any HTTP endpoint, nor do you need an executable. Kubernetes can connect to your workload via gRPC and query its status.
        • The gRPC check is supported only in CCE clusters of v1.25 or later.
        • To use gRPC for check, your application must support the gRPC health checking protocol.
        • Similar to HTTP and TCP probes, if the port is incorrect or the application does not support the health checking protocol, the check fails.
        +
        +
        +
      +
      +

      Common Parameters

      +
      + + + + + + + + + + + + + + + + + + + +
      Table 1 Common parameter description

      Parameter

      +

      Description

      +

      Period (periodSeconds)

      +

      Indicates the probe detection period, in seconds.

      +

      For example, if this parameter is set to 30, the detection is performed every 30 seconds.

      +

      Delay (initialDelaySeconds)

      +

      Check delay time in seconds. Set this parameter according to the normal startup time of services.

      +

      For example, if this parameter is set to 30, the health check will be started 30 seconds after the container is started. The time is reserved for containerized services to start.

      +

      Timeout (timeoutSeconds)

      +

      Number of seconds after which the probe times out. Unit: second.

      +

      For example, if this parameter is set to 10, the timeout wait time for performing a health check is 10s. If the wait time elapses, the health check is regarded as a failure. If the parameter is left blank or set to 0, the default timeout time is 1s.

      +

      Success Threshold (successThreshold)

      +

      Minimum consecutive successes for the probe to be considered successful after having failed. For example, if this parameter is set to 1, the workload status is normal only when the health check is successful for one consecutive time after the health check fails.

      +

      The default value is 1, which is also the minimum value.

      +

      The value of this parameter is fixed to 1 in Liveness Probe and Startup Probe.

      +

      Failure Threshold (failureThreshold)

      +

      Number of retry times when the detection fails.

      +

      Giving up in case of liveness probe means to restart the container. In case of readiness probe the pod will be marked Unready.

      +

      The default value is 3. The minimum value is 1.

      +
      +
      +
      +

      YAML Example

      apiVersion: v1
      +kind: Pod
      +metadata:
      +  labels:
      +    test: liveness
      +  name: liveness-http
      +spec:
      +  containers:
      +  - name: liveness
      +    image: nginx:alpine
      +    args:
      +    - /server
      +    livenessProbe:
      +      httpGet:
      +        path: /healthz
      +        port: 80
      +        httpHeaders:
      +        - name: Custom-Header
      +          value: Awesome
      +      initialDelaySeconds: 3
      +      periodSeconds: 3
      +    readinessProbe:
      +      exec:
      +        command:
      +          - cat
      +          - /tmp/healthy
      +      initialDelaySeconds: 5
      +      periodSeconds: 5
      +    startupProbe:
      +      httpGet:
      +        path: /healthz
      +        port: 80
      +      failureThreshold: 30
      +      periodSeconds: 10
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0113.html b/docs/cce/umn/cce_10_0113.html new file mode 100644 index 00000000..c8a6d5fe --- /dev/null +++ b/docs/cce/umn/cce_10_0113.html @@ -0,0 +1,108 @@ + + +

      Setting an Environment Variable

      +

      Scenario

      An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deployed, increasing flexibility in workload configuration.

      +

      The function of setting environment variables on CCE is the same as that of specifying ENV in a Dockerfile.

      +

      After a container is started, do not modify configurations in the container. If configurations in the container are modified (for example, passwords, certificates, and environment variables of a containerized application are added to the container), the configurations will be lost after the container restarts and container services will become abnormal. An example scenario of container restart is pod rescheduling due to node anomalies.

      +

      Configurations must be imported to a container as arguments. Otherwise, configurations will be lost after the container restarts.

      +
      +

      Environment variables can be set in the following modes:

      +
      • Custom
      • Added from ConfigMap: Import all keys in a ConfigMap as environment variables.
      • Added from ConfigMap key: Import a key in a ConfigMap as the value of an environment variable. For example, if you import configmap_value of configmap_key in a ConfigMap as the value of environment variable key1, an environment variable named key1 with its value is configmap_value exists in the container.
      • Added from secret: Import all keys in a secret as environment variables.
      • Added from secret key: Import the value of a key in a secret as the value of an environment variable. For example, if you import secret_value of secret_key in secret secret-example as the value of environment variable key2, an environment variable named key2 with its value secret_value exists in the container.
      • Variable value/reference: Use the field defined by a pod as the value of the environment variable, for example, the pod name.
      • Resource Reference: Use the field defined by a container as the value of the environment variable, for example, the CPU limit of the container.
      +
      +

      Adding Environment Variables

      1. Log in to the CCE console. When creating a workload, select Environment Variables under Container Settings.
      2. Set environment variables.

        +

      +
      +

      YAML Example

      apiVersion: apps/v1
      +kind: Deployment
      +metadata:
      +  name: env-example
      +  namespace: default
      +spec:
      +  replicas: 1
      +  selector:
      +    matchLabels:
      +      app: env-example
      +  template:
      +    metadata:
      +      labels:
      +        app: env-example
      +    spec:
      +      containers:
      +        - name: container-1
      +          image: nginx:alpine
      +          imagePullPolicy: Always
      +          resources:
      +            requests:
      +              cpu: 250m
      +              memory: 512Mi
      +            limits:
      +              cpu: 250m
      +              memory: 512Mi
      +          env:
      +            - name: key                     # Custom
      +              value: value
      +            - name: key1                    # Added from ConfigMap key
      +              valueFrom:
      +                configMapKeyRef:
      +                  name: configmap-example
      +                  key: key1
      +            - name: key2                    # Added from secret key
      +              valueFrom:
      +                secretKeyRef:
      +                  name: secret-example
      +                  key: key2
      +            - name: key3                    # Variable reference, which uses the field defined by a pod as the value of the environment variable.
      +              valueFrom:
      +                fieldRef:
      +                  apiVersion: v1
      +                  fieldPath: metadata.name
      +            - name: key4                    # Resource reference, which uses the field defined by a container as the value of the environment variable.
      +              valueFrom:
      +                resourceFieldRef:
      +                  containerName: container1
      +                  resource: limits.cpu
      +                  divisor: 1
      +          envFrom:
      +            - configMapRef:                 # Added from ConfigMap
      +                name: configmap-example
      +            - secretRef:                    # Added from secret
      +                name: secret-example
      +      imagePullSecrets:
      +        - name: default-secret
      +
      +

      Viewing Environment Variables

      If the contents of configmap-example and secret-example are as follows:

      +
      $ kubectl get configmap configmap-example -oyaml
      +apiVersion: v1
      +data:
      +  configmap_key: configmap_value
      +kind: ConfigMap
      +...
      +
      +$ kubectl get secret secret-example -oyaml
      +apiVersion: v1
      +data:
      +  secret_key: c2VjcmV0X3ZhbHVl              # c2VjcmV0X3ZhbHVl is the value of secret_value in Base64 mode.
      +kind: Secret
      +...
      +

      The environment variables in the pod are as follows:

      +
      $ kubectl get pod
      +NAME                           READY   STATUS    RESTARTS   AGE
      +env-example-695b759569-lx9jp   1/1     Running   0          17m
      +
      +$ kubectl exec env-example-695b759569-lx9jp  -- printenv
      +/ # env
      +key=value                             # Custom environment variable
      +ey1=configmap_value                  # Added from ConfigMap key
      +key2=secret_value                     # Added from secret key
      +key3=env-example-695b759569-lx9jp     # metadata.name defined by the pod
      +key4=1                                # limits.cpu defined by container1. The value is rounded up, in unit of cores.
      +configmap_key=configmap_value         # Added from ConfigMap. The key value in the original ConfigMap key is directly imported.
      +secret_key=secret_value               # Added from key. The key value in the original secret is directly imported.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0120.html b/docs/cce/umn/cce_10_0120.html new file mode 100644 index 00000000..6af29399 --- /dev/null +++ b/docs/cce/umn/cce_10_0120.html @@ -0,0 +1,62 @@ + + +

      Performing Replace/Rolling Upgrade

      +

      Scenario

      You can upgrade your clusters to a newer Kubernetes version on the CCE console.

      +

      Before the upgrade, learn about the target version to which each CCE cluster can be upgraded in what ways, and the upgrade impacts. For details, see Upgrade Overview and Before You Start.

      +
      +

      Precautions

      • If the coredns add-on needs to be upgraded during the cluster upgrade, ensure that the number of nodes is greater than or equal to the number of coredns instances and all coredns instances are running. Otherwise, the upgrade will fail. Before upgrading a cluster of v1.13, you need to upgrade the coredns add-on to the latest version available for the cluster.
      • When a cluster of v1.11 or earlier is upgraded to v1.13, the impacts on the cluster are as follows:
        • All cluster nodes will be restarted as their OSs are upgraded, which affects application running.
        • The cluster signature certificate mechanism is changed. As a result, the original cluster certificate becomes invalid. You need to obtain the certificate or kubeconfig file again after the cluster is upgraded.
        +
      • During the upgrade from one release of v1.13 to a later release of v1.13, applications in the cluster are interrupted for a short period of time only during the upgrade of network components.
      • During the upgrade from Kubernetes 1.9 to 1.11, the kube-dns of the cluster will be uninstalled and replaced with CoreDNS, which may cause loss of the cascading DNS configuration in the kube-dns or temporary interruption of the DNS service. Back up the DNS address configured in the kube-dns so you can configure the domain name in the CoreDNS again when domain name resolution is abnormal.
      +
      +

      Procedure

      1. Log in to the CCE console and click the cluster name to access the cluster.
      2. In the navigation pane, choose Cluster Upgrade. You can view the new version available for upgrade on the right. Click Upgrade.

        • If your cluster version is up-to-date, the Upgrade button is grayed out.
        • If your cluster status is abnormal or there are abnormal add-ons, the Upgrade button is dimmed. Perform a check by referring to Before You Start.
        +
        +

      3. In the displayed Pre-upgrade Check dialog box, click Check Now.
      4. The pre-upgrade check starts. While the pre-upgrade check is in progress, the cluster status will change to Pre-checking and new nodes/applications will not be able to be deployed on the cluster. However, existing nodes and applications will not be affected. It takes 3 to 5 minutes to complete the pre-upgrade check.
      5. When the status of the pre-upgrade check is Completed, click Upgrade.
      6. On the cluster upgrade page, review or configure basic information by referring to Table 1.

        +

        + + + + + + + + + + + + + + + + + + + +
        Table 1 Basic information

        Parameter

        +

        Description

        +

        Cluster Name

        +

        Review the name of the cluster to be upgraded.

        +

        Current Version

        +

        Review the version of the cluster to be upgraded.

        +

        Target Version

        +

        Review the target version after the upgrade.

        +

        Node Upgrade Policy

        +

        Replace (replace upgrade): Worker nodes will be reset. Their OSs will be reinstalled, and data on the system and data disks will be cleared. Exercise caution when performing this operation.

        +
        NOTE:
        • The lifecycle management function of the nodes and workloads in the cluster is unavailable.
        • APIs cannot be called temporarily.
        • Running workloads will be interrupted because nodes are reset during the upgrade.
        • Data in the system and data disks on the worker nodes will be cleared. Back up important data before resetting the nodes.
        • Data disks without LVM mounted to worker nodes need to be mounted again after the upgrade, and data on the disks will not be lost during the upgrade.
        • The EVS disk quota must be greater than 0.
        • The container IP addresses change, but the communication between containers is not affected.
        • Custom labels on the worker nodes will be cleared.
        • It takes about 12 minutes to complete the cluster upgrade.
        +
        +

        Login Mode

        +

        Key Pair

        +

        Select the key pair used to log in to the node. You can select a shared key.

        +

        A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create Key Pair.

        +
        +
        +

      7. Click Next. In the dialog box displayed, click OK.
      8. Upgrade add-ons. If an add-on needs to be upgraded, a red dot is displayed. Click the Upgrade button in the lower left corner of the add-on card view. After the upgrade is complete, click Upgrade in the lower right corner of the page.

        • Master nodes will be upgraded first, and then the worker nodes will be upgraded concurrently. If there are a large number of worker nodes, they will be upgraded in different batches.
        • Select a proper time window for the upgrade to reduce impacts on services.
        • Clicking OK will start the upgrade immediately, and the upgrade cannot be canceled. Do not shut down or restart nodes during the upgrade.
        +
        +

      9. In the displayed Upgrade dialog box, read the information and click OK. Note that the cluster cannot be rolled back after the upgrade.
      10. Back to the cluster list, you can see that the cluster status is Upgrading. Wait until the upgrade is completed.

        After the upgrade is successful, you can view the cluster status and version on the cluster list or cluster details page.

        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0127.html b/docs/cce/umn/cce_10_0127.html new file mode 100644 index 00000000..aeeace22 --- /dev/null +++ b/docs/cce/umn/cce_10_0127.html @@ -0,0 +1,21 @@ + + +

      storage-driver (System Resource Add-On, Discarded)

      +

      Introduction

      storage-driver functions as a standard Kubernetes FlexVolume plug-in to allow containers to use EVS, SFS, OBS, and SFS Turbo storage resources. By installing and upgrading storage-driver, you can quickly install and update cloud storage capabilities.

      +

      storage-driver is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.13 or earlier is created.

      +
      +

      Notes and Constraints

      • For clusters created in CCE, Kubernetes v1.15.11 is a transitional version in which the FlexVolume plug-in (storage-driver) is compatible with the CSI plug-in (everest). Clusters of v1.17 and later versions do not support FlexVolume anymore. You need to use the everest add-on.
      • The FlexVolume plug-in will be maintained by Kubernetes developers, but new functionality will only be added to CSI. You are advised not to create storage that connects to the FlexVolume plug-in (storage-driver) in CCE anymore. Otherwise, the storage resources may not function normally.
      • This add-on can be installed only in clusters of v1.13 or earlier. By default, the everest add-on is installed when clusters of v1.15 or later are created.

        In a cluster of v1.13 or earlier, when an upgrade or bug fix is available for storage functionalities, you only need to install or upgrade the storage-driver add-on. Upgrading the cluster or creating a cluster is not required.

        +
        +
      +
      +

      Installing the Add-on

      This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

      +

      If storage-driver is not installed in a cluster, perform the following steps to install it:

      +
      1. Log in to the CCE console, click the cluster name, and access the cluster console. Choose Add-ons in the navigation pane, locate storage-driver on the right, and click Install.
      2. Click Install to install the add-on. Note that the storage-driver has no configurable parameters and can be directly installed.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0129.html b/docs/cce/umn/cce_10_0129.html new file mode 100644 index 00000000..5f1cac2e --- /dev/null +++ b/docs/cce/umn/cce_10_0129.html @@ -0,0 +1,188 @@ + + +

      coredns (System Resource Add-On, Mandatory)

      +

      Introduction

      The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.

      +

      coredns is an open-source software and has been a part of CNCF. It provides a means for cloud services to discover each other in cloud-native deployments. Each of the plug-ins chained by coredns provides a particular DNS function. You can integrate coredns with only the plug-ins you need to make it fast, efficient, and flexible. When used in a Kubernetes cluster, coredns can automatically discover services in the cluster and provide domain name resolution for these services. By working with DNS server, coredns can resolve external domain names for workloads in a cluster.

      +

      coredns is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.11 or later is created.

      +

      Kubernetes v1.11 and later back CoreDNS as the official default DNS for all clusters going forward.

      +

      CoreDNS official website: https://coredns.io/

      +

      Open source community: https://github.com/coredns/coredns

      +

      For details, see DNS.

      +
      +
      +

      Notes and Constraints

      When coredns is running properly or being upgraded, ensure that the number of available nodes is greater than or equal to the number of coredns instances and all coredns instances are running. Otherwise, the upgrade will fail.

      +
      +

      Installing the Add-on

      This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

      +
      1. Log in to the CCE console, click the cluster name, and access the cluster console. Choose Add-ons in the navigation pane, locate coredns on the right, and click Install.
      2. On the Install Add-on page, select the add-on specifications and set related parameters.

        +

        + + + + + + + + + + + + + + + + +
        Table 1 coredns add-on parameters

        Parameter

        +

        Description

        +

        Add-on Specifications

        +

        Concurrent domain name resolution ability. Select add-on specifications that best fit your needs.

        +

        If you select Custom qps, the domain name resolution QPS provided by CoreDNS is positively correlated with the CPU consumption. Adjust the number of pods and container CPU/memory quotas as required.

        +

        Pods

        +

        Number of pods that will be created to match the selected add-on specifications.

        +

        Containers

        +

        CPU and memory quotas of the container allowed for the selected add-on specifications.

        +

        Parameters

        +
        • parameterSyncStrategy: indicates whether to configure consistency check when an add-on is upgraded.
          • ensureConsistent: indicates that the configuration consistency check is enabled. If the configuration recorded in the cluster is inconsistent with the actual configuration, the add-on cannot be upgraded.
          • force: indicates that the configuration consistency check is ignored during an upgrade. Ensure that the current effective configuration is the same as the original configuration. After the add-on is upgraded, restore the value of parameterSyncStrategy to ensureConsistent and enable the configuration consistency check again.
          +
        • stub_domains: A domain name server for a user-defined domain name. The format is a key-value pair. The key is a suffix of DNS domain name, and the value is one or more DNS IP addresses.
        • upstream_nameservers: IP address of the upstream DNS server.
        • servers: The servers configuration is available since coredns 1.23.1. You can customize the servers configuration. For details, see dns-custom-nameservers. plugins indicates the configuration of each component in coredns (https://coredns.io/manual/plugins/). You are advised to retain the default configurations in common scenarios to prevent CoreDNS from being unavailable due to configuration errors. Each plugin component contains name, parameters (optional), and configBlock (optional). The format of the generated Corefile is as follows:

          $name $parameters {

          +

          $configBlock

          +

          }

          +

          Table 2 describes common plugins.

          +
        +

        Example:

        +
        {
        +     "servers": [
        +		   {
        +			"plugins": [
        +				{
        +					"name": "bind",
        +					"parameters": "{$POD_IP}"
        +				},
        +				{
        +					"name": "cache",
        +					"parameters": 30
        +				},
        +				{
        +					"name": "errors"
        +				},
        +				{
        +					"name": "health",
        +					"parameters": "{$POD_IP}:8080"
        +				},
        +				{
        +					"configBlock": "pods insecure\nfallthrough in-addr.arpa ip6.arpa",
        +					"name": "kubernetes",
        +					"parameters": "cluster.local in-addr.arpa ip6.arpa"
        +				},
        +				{
        +					"name": "loadbalance",
        +					"parameters": "round_robin"
        +				},
        +				{
        +					"name": "prometheus",
        +					"parameters": "{$POD_IP}:9153"
        +				},
        +				{
        +					"configBlock": "policy random",
        +					"name": "forward",
        +					"parameters": ". /etc/resolv.conf"
        +				},
        +				{
        +					"name": "reload"
        +				},
        +				{
        +					"name": "log"
        +				}
        +			],
        +			"port": 5353,
        +			"zones": [
        +				{
        +					"zone": "."
        +				}
        +			]
        +		}
        +	],
        +	"stub_domains": {
        +		"acme.local": [
        +			"1.2.3.4",
        +			"6.7.8.9"
        +		]
        +	},
        +	"upstream_nameservers": ["8.8.8.8", "8.8.4.4"]
        +}
        +
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 2 Default plugin configuration of the active zone of coredns

        plugin Name

        +

        Description

        +

        bind

        +

        Host IP address listened by coredns. You are advised to retain the default value {$POD_IP}.

        +

        cache

        +

        DNS cache is enabled.

        +

        errors

        +

        Errors are logged to stdout.

        +

        health

        +

        Health check configuration. The current listening IP address is {$POD_IP}:8080. Retain the default value. Otherwise, the coredns health check fails and coredns restarts repeatedly.

        +

        kubernetes

        +

        CoreDNS Kubernetes plug-in, which provides the service parsing capability in a cluster.

        +

        loadbalance

        +

        Round-robin DNS load balancer that randomizes the order of A, AAAA, and MX records in the answer.

        +

        prometheus

        +

        Port for obtaining coredns metrics. The default zone listening IP address is {$POD_IP}:9153. Retain the default value. Otherwise, CloudScope cannot collect coredns metrics.

        +

        forward

        +

        Any queries that are not within the cluster domain of Kubernetes will be forwarded to predefined resolvers (/etc/resolv.conf).

        +

        reload

        +

        The changed Corefile can be automatically reloaded. After editing the ConfigMap, wait for two minutes for the modification to take effect.

        +
        +
        +

      3. After the preceding configurations are complete, click Install.
      +
      +

      How Does Domain Name Resolution Work in Kubernetes?

      DNS policies can be set on a per-pod basis. Currently, Kubernetes supports four types of DNS policies: Default, ClusterFirst, ClusterFirstWithHostNet, and None. For details, see https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/. These policies are specified in the dnsPolicy field in the pod-specific.

      +
      +
      • Default: Pods inherit the name resolution configuration from the node that the pods run on. The custom upstream DNS server and the stub domain cannot be used together with this policy.
      • ClusterFirst: Any DNS query that does not match the configured cluster domain suffix, such as www.kubernetes.io, is forwarded to the upstream name server inherited from the node. Cluster administrators may have extra stub domains and upstream DNS servers configured.
      • ClusterFirstWithHostNet: For pods running with hostNetwork, set its DNS policy ClusterFirstWithHostNet.
      • None: It allows a pod to ignore DNS settings from the Kubernetes environment. All DNS settings are supposed to be provided using the dnsPolicy field in the pod-specific.
      +
      • Clusters of Kubernetes v1.10 and later support Default, ClusterFirst, ClusterFirstWithHostNet, and None. Clusters earlier than Kubernetes v1.10 support only Default, ClusterFirst, and ClusterFirstWithHostNet.
      • Default is not the default DNS policy. If dnsPolicy is not explicitly specified, ClusterFirst is used.
      +
      +

      Routing

      +

      Without stub domain configurations: Any query that does not match the configured cluster domain suffix, such as www.kubernetes.io, is forwarded to the upstream DNS server inherited from the node.

      +

      With stub domain configurations: If stub domains and upstream DNS servers are configured, DNS queries are routed according to the following flow:

      +
      1. The query is first sent to the DNS caching layer in coredns.
      2. From the caching layer, the suffix of the request is examined and then the request is forwarded to the corresponding DNS:
        • Names with the cluster suffix, for example, .cluster.local: The request is sent to coredns.
        +
        • Names with the stub domain suffix, for example, .acme.local: The request is sent to the configured custom DNS resolver that listens, for example, on 1.2.3.4.
        • Names that do not match the suffix (for example, widget.com): The request is forwarded to the upstream DNS.
        +
      +
      Figure 1 Routing
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0130.html b/docs/cce/umn/cce_10_0130.html new file mode 100644 index 00000000..4484e9f5 --- /dev/null +++ b/docs/cce/umn/cce_10_0130.html @@ -0,0 +1,33 @@ + + +

      Configuring a Container

      +
      + + diff --git a/docs/cce/umn/cce_10_0132.html b/docs/cce/umn/cce_10_0132.html new file mode 100644 index 00000000..6e6ab49e --- /dev/null +++ b/docs/cce/umn/cce_10_0132.html @@ -0,0 +1,525 @@ + + +

      npd

      +

      Introduction

      node-problem-detector (npd for short) is an add-on that monitors abnormal events of cluster nodes and connects to a third-party monitoring platform. It is a daemon running on each node. It collects node issues from different daemons and reports them to the API server. The npd add-on can run as a DaemonSet or a daemon.

      +

      For more information, see node-problem-detector.

      +
      +

      Notes and Constraints

      • When using this add-on, do not format or partition node disks.
      • Each npd process occupies 30 mCPU and 100 MB memory.
      +
      +

      Permission Description

      To monitor kernel logs, the npd add-on needs to read the host /dev/kmsg. Therefore, the privileged mode must be enabled. For details, see privileged.

      +

      In addition, CCE mitigates risks according to the least privilege principle. Only the following privileges are available for npd running:

      +
      • cap_dac_read_search: permission to access /run/log/journal.
      • cap_sys_admin: permission to access /dev/kmsg.
      +
      +

      Installing the Add-on

      1. Log in to the CCE console, click the cluster name, and access the cluster console. Choose Add-ons in the navigation pane, locate npd on the right, and click Install.
      2. On the Install Add-on page, select the add-on specifications and set related parameters.

        • Pods: Set the number of pods based on service requirements.
        • Containers: Select a proper container quota based on service requirements.
        +

      3. Set the parameters according to the following table and click Install.

        Only 1.16.0 and later versions support the configurations.

        +

        npc.enable: indicates whether to enable Node-problem-controller.

        +

      +
      +

      npd Check Items

      Check items are supported only in 1.16.0 and later versions.

      +
      +

      Check items cover events and statuses.

      +
      • Event-related

        For event-related check items, when a problem occurs, npd reports an event to the API server. The event type can be Normal (normal event) or Warning (abnormal event).

        + +
        + + + + + + + + + + + + + + + + + + + + + +
        Table 1 Event-related check items

        Check Item

        +

        Function

        +

        Description

        +

        OOMKilling

        +

        Check whether OOM events occur and are reported.

        +

        Warning event

        +

        TaskHung

        +

        Check whether taskHung events occur and are reported.

        +

        Warning event

        +

        KernelOops

        +

        Check kernel nil pointer panic errors.

        +

        Warning event

        +

        ConntrackFull

        +

        Check whether the conntrack table is full.

        +

        Warning event

        +

        Interval: 30 seconds

        +

        Threshold: 80%

        +
        +
        +
      • Status-related

        For status-related check items, when a problem occurs, npd reports an event to the API server and changes the node status synchronously. This function can be used together with Node-problem-controller fault isolation to isolate nodes.

        +

        If the check period is not specified in the following check items, the default period is 30 seconds.

        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 2 Application and OS check items

        Check Item

        +

        Function

        +

        Description

        +

        FrequentKubeletRestart

        +

        Check whether kubelet restarts frequently by listening to journald logs.

        +
        • Interval: 5 minutes
        • Backtracking: 10 minutes
        • Threshold: 10 times

          If the system restarts for 10 times within the backtracking period, it indicates that the system restarts frequently and a fault alarm is generated.

          +
        • Listening object: logs in the /run/log/journal directory
        +
        NOTE:

        The Ubuntu OS does not support the preceding check items due to incompatible log formats.

        +
        +

        FrequentDockerRestart

        +

        Check whether Docker restarts frequently by listening to journald logs.

        +

        FrequentContainerdRestart

        +

        Check whether containerd restarts frequently by listening to journald logs.

        +

        CRIProblem

        +

        Check the CRI component status.

        +

        Check object: Docker or containerd

        +

        KUBELETProblem

        +

        Check the kubelet status.

        +

        None

        +

        NTPProblem

        +

        Check the NTP and Chrony service status.

        +

        Check whether the node clock offsets.

        +

        Threshold of the clock offset: 8000 ms

        +

        PIDProblem

        +

        Check whether PIDs are sufficient.

        +
        • Threshold: 90%
        • Usage: nr_threads in /proc/loadavg
        • Maximum value: smaller value between /proc/sys/kernel/pid_max and /proc/sys/kernel/threads-max.
        +

        FDProblem

        +

        Check whether file handles are sufficient.

        +
        • Threshold: 90%
        • Usage: the first value in /proc/sys/fs/file-nr
        • Maximum value: the third value in /proc/sys/fs/file-nr
        +

        MemoryProblem

        +

        Check whether the overall node memory is sufficient.

        +
        • Threshold: 90%
        • Usage: MemTotal-MemAvailable in /proc/meminfo
        • Maximum value: MemTotal in /proc/meminfo
        +

        ResolvConfFileProblem

        +

        Check whether the ResolvConf file is lost.

        +

        Check whether the ResolvConf file is normal.

        +

        Exception definition: No upstream domain name resolution server (nameserver) is included.

        +

        Object: /etc/resolv.conf

        +

        ProcessD

        +

        Check whether there is a process D on the node.

        +

        Source:

        +
        • /proc/{PID}/stat
        • Alternately, you can run ps aux.
        +

        Exception scenario: ProcessD ignores the resident processes (heartbeat and update) that are in the D state that the SDI driver on the BMS node depends on.

        +

        ProcessZ

        +

        Check whether the node has processes in Z state.

        +

        ScheduledEvent

        +

        Check whether host plan events exist on the node.

        +

        Typical scenario: The host is faulty, for example, the fan is damaged or the disk has bad sectors. As a result, cold and live migration is triggered for VMs.

        +

        Source:

        +
        • http://169.254.169.254/meta-data/latest/events/scheduled
        +

        This check item is an Alpha feature and is disabled by default.

        +
        +
        + +
        + + + + + + + + + + + + + +
        Table 3 Network connection check items

        Check Item

        +

        Function

        +

        Description

        +

        CNIProblem

        +

        Check whether the CNI component is running properly.

        +

        None

        +

        KUBEPROXYProblem

        +

        Check whether kube-proxy is running properly.

        +

        None

        +
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 4 Storage check items

        Check Item

        +

        Function

        +

        Description

        +

        ReadonlyFilesystem

        +

        Check whether the Remount root filesystem read-only error occurs in the system kernel by listening to the kernel logs.

        +

        Typical scenario: A user detaches a data disk from a node by mistake on the ECS, and applications continuously write data to the mount point of the data disk. As a result, an I/O error occurs in the kernel and the disk is reattached as a read-only disk.

        +

        Listening object: /dev/kmsg

        +

        Matching rule: Remounting filesystem read-only

        +

        DiskReadonly

        +

        Check whether the system disk, Docker disk, and kubelet disk are read-only.

        +

        Detection paths:

        +
        • /mnt/paas/kubernetes/kubelet/
        • /var/lib/docker/
        • /var/lib/containerd/
        • /var/paas/sys/log/cceaddon-npd/
        +

        The temporary file npd-disk-write-ping is generated in the detection path.

        +

        Currently, additional data disks are not supported.

        +

        DiskProblem

        +

        Check the usage of the system disk, Docker disk, and kubelet disk.

        +

        +
        • Threshold: 80%
        • Source:
          df -h
          +
        +

        Currently, additional data disks are not supported.

        +

        EmptyDirVolumeGroupStatusError

        +

        Check whether the ephemeral volume group on the node is normal.

        +

        Impact: The pod that depends on the storage pool cannot write data to the temporary volume. The temporary volume is remounted as a read-only file system by the kernel due to an I/O error.

        +

        Typical scenario: When creating a node, a user configures two data disks as a temporary volume storage pool. The user deletes some data disks by mistake. As a result, the storage pool becomes abnormal.

        +
        • Detection period: 60s
        • Source:
          vgs -o vg_name, vg_attr
          +
        • Principle: Check whether the VG (storage pool) is in the P state. If yes, some PVs (data disks) are lost.
        • Joint scheduling: The scheduler can automatically identify an abnormal node and prevent pods that depend on the storage pool from being scheduled to the node.
        • Exception scenario: The npd add-on cannot detect the loss of all PVs (data disks), resulting in the loss of VGs (storage pools). In this case, kubelet automatically isolates the node, detects the loss of VGs (storage pools), and updates the corresponding resources in nodestatus.allocatable to 0. This prevents pods that depend on the storage pool from being scheduled to the node. The damage of a single PV cannot be detected. In this case, the ReadonlyFilesystem detection is abnormal.
        +

        LocalPvVolumeGroupStatusError

        +

        Check the PV group on the node.

        +

        Impact: Pods that depend on the storage pool cannot write data to the persistent volume. The persistent volume is remounted as a read-only file system by the kernel due to an I/O error.

        +

        Typical scenario: When creating a node, a user configures two data disks as a persistent volume storage pool. Some data disks are deleted by mistake.

        +

        MountPointProblem

        +

        Check the mount point on the node.

        +

        Exception definition: You cannot access the mount point by running the cd command.

        +

        Typical scenario: Network File System (NFS), for example, obsfs and s3fs is mounted to a node. When the connection is abnormal due to network or peer NFS server exceptions, all processes that access the mount point are suspended. For example, during a cluster upgrade, a kubelet is restarted, and all mount points are scanned. If the abnormal mount point is detected, the upgrade fails.

        +

        Alternatively, you can run the following command:

        +
        for dir in `df -h | grep -v "Mounted on" | awk "{print \\$NF}"`;do cd $dir; done && echo "ok"
        +

        DiskHung

        +

        Check whether I/O faults occur on the disk of the node.

        +

        Definition of I/O faults: The system does not respond to disk I/O requests, and some processes are in the D state.

        +

        Typical Scenario: Disks cannot respond due to abnormal OS hard disk drivers or severe faults on the underlying network.

        +
        • Check object: all data disks
        • Source:

          /proc/diskstat

          +
          Alternatively, you can run the following command:
          iostat -xmt 1
          +
          +
        • Threshold:
          • Average usage. The value of ioutil is greater than or equal to 0.99.
          • Average I/O queue length. avgqu-sz >=1
          • Average I/O transfer volume, iops (w/s) + ioth (wMB/s) < = 1
          +
          NOTE:

          In some OSs, no data changes during I/O. In this case, calculate the CPU I/O time usage. The value of iowait is greater than 0.8.

          +
          +
        +

        DiskSlow

        +

        Check whether slow I/O occurs on the disk of the node.

        +

        Definition of slow I/O: The average response time exceeds the threshold.

        +

        Typical scenario: EVS disks have slow I/Os due to network fluctuation.

        +
        • Check object: all data disks
        • Source:

          /proc/diskstat

          +
          Alternatively, you can run the following command:
          iostat -xmt 1
          +
          +
        • Threshold:

          Average I/O latency: await > = 5000 ms

          +
        +
        NOTE:

        If I/O requests are not responded and the await data is not updated. In this case, this check item is invalid.

        +
        +
        +
        +

        The kubelet component has the following default check items, which have bugs or defects. You can fix them by upgrading the cluster or using npd.

        + +
        + + + + + + + + + + + + + + + + + +
        Table 5 Default kubelet check items

        Check Item

        +

        Function

        +

        Description

        +

        PIDPressure

        +

        Check whether PIDs are sufficient.

        +
        • Interval: 10 seconds
        • Threshold: 90%
        • Defect: In community version 1.23.1 and earlier versions, this check item becomes invalid when over 65535 PIDs are used. For details, see issue 107107. In community version 1.24 and earlier versions, thread-max is not considered in this check item.
        +

        MemoryPressure

        +

        Check whether the allocable memory for the containers is sufficient.

        +
        • Interval: 10 seconds
        • Threshold: Max. 100 MiB
        • Allocable = Total memory of a node – Reserved memory of a node
        • Defect: This check item checks only the memory consumed by containers, and does not consider that consumed by other elements on the node.
        +

        DiskPressure

        +

        Check the disk usage and inodes usage of the kubelet and Docker disks.

        +

        Interval: 10 seconds

        +

        Threshold: 90%

        +
        +
        +
      +
      +

      Node-problem-controller Fault Isolation

      Fault isolation is supported only by add-ons of 1.16.0 and later versions.

      +

      When installing the npd add-on, set npc.enable to true to deploy dual Node-problem-controller (NPC). You can deploy NPC as single-instance but such NPC does not ensure high availability.

      +

      By default, if multiple nodes become faulty, NPC adds taints to only one node. You can set npc.maxTaintedNode to increase the threshold. When the fault is rectified, NPC is not running and taints remain. You need to manually clear the taints or start NPC.

      +
      +

      The open source NPD plug-in provides fault detection but not fault isolation. CCE enhances the node-problem-controller (NPC) based on the open source NPD. This component is implemented based on the Kubernetes node controller. For faults reported by NPD, NPC automatically adds taints to nodes for node fault isolation.

      +

      You can modify add-onnpc.customConditionToTaint according to the following table to configure fault isolation rules.

      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 6 Parameters

      Parameter

      +

      Description

      +

      Default

      +

      npc.enable

      +

      Whether to enable NPC

      +

      true

      +

      npc.customCondtionToTaint

      +

      Fault isolation rules

      +

      See Table 7.

      +

      npc.customConditionToTaint[i]

      +

      Fault isolation rule items

      +

      N/A

      +

      npc.customConditionToTaint[i].

      +

      condition.status

      +

      Fault status

      +

      true

      +

      npc.customConditionToTaint[i].

      +

      condition.type

      +

      Fault type

      +

      N/A

      +

      npc.customConditionToTaint[i].

      +

      enable

      +

      Whether to enable the fault isolation rule.

      +

      false

      +

      npc.customConditionToTaint[i].

      +

      .taint.effect

      +

      Fault isolation effect

      +

      NoSchedule, PreferNoSchedule, or NoExecute

      +

      NoSchedule

      +

      Value options: NoSchedule, PreferNoSchedule, and NoExecute

      +

      npc. maxTaintedNode

      +

      Number of nodes in a cluster that can be tainted by NPC

      +

      The int format and percentage format are supported.

      +

      1

      +

      Values:

      +
      • The value is in int format and ranges from 1 to infinity.
      • The value ranges from 1% to 100%, in percentage. The minimum value of this parameter multiplied by the number of cluster nodes is 1.
      +

      Npc.affinity

      +

      Node affinity of the controller

      +

      N/A

      +
      +
      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 7 Fault isolation rule configuration

      Fault

      +

      Fault Details

      +

      Taint

      +

      DiskReadonly

      +

      Disk read-only

      +

      NoSchedule: No new pods allowed.

      +

      DiskProblem

      +

      The disk space is insufficient, and key logical disks are detached.

      +

      NoSchedule: No new pods allowed.

      +

      FrequentKubeletRestart

      +

      kubelet restarts frequently.

      +

      NoSchedule: No new pods allowed.

      +

      FrequentDockerRestart

      +

      Docker restarts frequently.

      +

      NoSchedule: No new pods allowed.

      +

      FrequentContainerdRestart

      +

      containerd restarts frequently.

      +

      NoSchedule: No new pods allowed.

      +

      KUBEPROXYProblem

      +

      kube-proxy is abnormal.

      +

      NoSchedule: No new pods allowed.

      +

      PIDProblem

      +

      Insufficient PIDs

      +

      NoSchedule: No new pods allowed.

      +

      FDProblem

      +

      Insufficient file handles

      +

      NoSchedule: No new pods allowed.

      +

      MemoryProblem

      +

      Insufficient node memory

      +

      NoSchedule: No new pods allowed.

      +
      +
      +
      +

      Collecting Prometheus Metrics

      The NPD daemon pod exposes Prometheus metric data on port 19901. By default, the NPD pod is added with the annotation metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"prometheus","path":"/metrics","port":"19901","names":""}]'. You can build a Prometheus collector to identify and obtain NPD metrics from http://{{NpdPodIP}}:{{NpdPodPort}}/metrics.

      +

      If the npd add-on version is earlier than 1.16.5, the exposed port of Prometheus metrics is 20257.

      +
      +

      Currently, the metric data includes problem_counter and problem_gauge, as shown below.

      +
      # HELP problem_counter Number of times a specific type of problem have occurred.
      +# TYPE problem_counter counter
      +problem_counter{reason="DockerHung"} 0
      +problem_counter{reason="DockerStart"} 0
      +problem_counter{reason="EmptyDirVolumeGroupStatusError"} 0
      +...
      +# HELP problem_gauge Whether a specific type of problem is affecting the node or not.
      +# TYPE problem_gauge gauge
      +problem_gauge{reason="CNIIsDown",type="CNIProblem"} 0
      +problem_gauge{reason="CNIIsUp",type="CNIProblem"} 0
      +problem_gauge{reason="CRIIsDown",type="CRIProblem"} 0
      +problem_gauge{reason="CRIIsUp",type="CRIProblem"} 0
      +..
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0139.html b/docs/cce/umn/cce_10_0139.html new file mode 100644 index 00000000..cf5ed78e --- /dev/null +++ b/docs/cce/umn/cce_10_0139.html @@ -0,0 +1,186 @@ + + +

      Common kubectl Commands

      +

      Getting Started

      get

      +

      The get command displays one or many resources of a cluster.

      +

      This command prints a table of the most important information about all resources, including cluster nodes, running pods, Deployments, and Services.

      +

      A cluster can have multiple namespaces. If no namespace is specified, this command will run with the --namespace=default flag.

      +
      +

      Examples:

      +

      To list all pods with detailed information:

      +
      kubectl get po -o wide
      +

      To display pods in all namespaces:

      +
      kubectl get po --all-namespaces
      +

      To list labels of pods in all namespaces:

      +
      kubectl get po --show-labels
      +

      To list all namespaces of the node:

      +
      kubectl get namespace
      +

      To list information of other nodes, run this command with the -s flag. To list a specified type of resources, add the resource type to this command, for example, kubectl get svc, kubectl get nodes, and kubectl get deploy.

      +
      +

      To list a pod with a specified name in YAML output format:

      +
      kubectl get po <podname> -o yaml
      +

      To list a pod with a specified name in JSON output format:

      +
      kubectl get po <podname> -o json
      +
      kubectl get po rc-nginx-2-btv4j -o=custom-columns=LABELS:.metadata.labels.app
      +

      LABELS indicates a comma separated list of user-defined column titles. metadata.labels.app indicates the data to be listed in either YAML or JSON output format.

      +
      +

      create

      +

      The create command creates a cluster resource from a file or input.

      +

      If there is already a resource descriptor (a YAML or JSON file), you can create the resource from the file by running the following command:

      +
      kubectl create -f filename
      +

      expose

      +

      The expose command exposes a resource as a new Kubernetes service. Possible resources include a pod, Service, and Deployment.

      +
      kubectl expose deployment deployname --port=81 --type=NodePort --target-port=80 --name=service-name
      +

      In the preceding command, --port indicates the port exposed by the Service, --type indicates the Service type, and --target-port indicates the port of the pod backing the Service. Visiting ClusterIP:Port allows you to access the applications in the cluster.

      +
      +

      run

      +

      Examples:

      +

      To run a particular image in the cluster:

      +
      kubectl run deployname --image=nginx:latest
      +

      To run a particular image using a specified command:

      +
      kubectl run deployname -image=busybox --command -- ping baidu.com
      +

      set

      +

      The set command configures object resources.

      +

      Example:

      +

      To change the image of a deployment with the name specified in deployname to image 1.0:

      +
      kubectl set image deploy deployname containername=containername:1.0
      +

      edit

      +

      The edit command edits a resource from the default editor.

      +

      Examples:

      +

      To update a pod:

      +
      kubectl edit po po-nginx-btv4j
      +

      The example command yields the same effect as the following command:

      +
      kubectl get po po-nginx-btv4j -o yaml >> /tmp/nginx-tmp.yaml
      +vim /tmp/nginx-tmp.yaml
      +/*do some changes here */
      +kubectl replace -f /tmp/nginx-tmp.yaml
      +

      explain

      +

      The explain command views documents or reference documents.

      +

      Example:

      +

      To get documentation of pods:

      +
      kubectl explain pod
      +

      delete

      +

      The delete command deletes resources by resource name or label.

      +

      Example:

      +

      To delete a pod with minimal delay:

      +
      kubectl delete po podname --now 
      +
      kubectl delete -f nginx.yaml
      +kubectl delete deployment deployname
      +
      +

      Deployment Commands

      rolling-update*

      +

      rolling-update is a very important command. It updates a running service with zero downtime. Pods are incrementally replaced by new ones. One pod is updated at a time. The old pod is deleted only after the new pod is up. New pods must be distinct from old pods by name, version, and label. Otherwise, an error message will be reported.

      +
      kubectl rolling-update poname -f newfilename
      +kubectl rolling-update poname -image=image:v2
      +

      If any problem occurs during the rolling update, run the command with the -rollback flag to abort the rolling update and revert to the previous pod.

      +
      kubectl rolling-update poname -rollback
      +

      rollout

      +

      The rollout command manages the rollout of a resource.

      +

      Examples:

      +

      To check the rollout status of a particular deployment:

      +
      kubectl rollout status deployment/deployname
      +

      To view the rollout history of a particular deployment:

      +
      kubectl rollout history deployment/deployname
      +

      To roll back to the previous deployment: (by default, a resource is rolled back to the previous version)

      +
      kubectl rollout undo deployment/test-nginx
      +

      scale

      +

      The scale command sets a new size for a resource by adjusting the number of resource replicas.

      +
      kubectl scale deployment deployname --replicas=newnumber
      +

      autoscale

      +

      The autoscale command automatically chooses and sets the number of pods. This command specifies the range for the number of pod replicas maintained by a replication controller. If there are too many pods, the replication controller terminates the extra pods. If there is too few, the replication controller starts more pods.

      +
      kubectl autoscale deployment deployname --min=minnumber --max=maxnumber
      +
      +

      Cluster Management Commands

      cordon, drain, uncordon*

      +

      If a node to be upgraded is running many pods or is already down, perform the following steps to prepare the node for maintenance:

      +
      1. Run the cordon command to mark a node as unschedulable. This means that new pods will not be scheduled onto the node.

        kubectl cordon nodename
        +

        Note: In CCE, nodename indicates the private network IP address of a node.

        +

      2. Run the drain command to smoothly migrate the running pods from the node to another node.

        kubectl drain nodename --ignore-daemonsets --ignore-emptydir
        +

        ignore-emptydir ignores the pods that use emptyDirs.

        +

      3. Perform maintenance operations on the node, such as upgrading the kernel and upgrading Docker.
      4. After node maintenance is completed, run the uncordon command to mark the node as schedulable.

        kubectl uncordon nodename
        +

      +

      cluster-info

      +

      To display the add-ons running in the cluster:

      +
      kubectl cluster-info
      +

      To dump current cluster information to stdout:

      +
      kubectl cluster-info dump
      +

      top*

      +

      The top command displays resource (CPU/memory/storage) usage. This command requires Heapster to be correctly configured and working on the server.

      +

      taint*

      +

      The taint command updates the taints on one or more nodes.

      +

      certificate*

      +

      The certificate command modifies the certificate resources.

      +
      +

      Fault Diagnosis and Debugging Commands

      describe

      +

      The describe command is similar to the get command. The difference is that the describe command shows details of a specific resource or group of resources, whereas the get command lists one or more resources in a cluster. The describe command does not support the -o flag. For resources of the same type, resource details are printed out in the same format.

      +

      If the information about a resource is queried, you can use the get command to obtain more detailed information. If you want to check the status of a specific resource, for example, to check if a pod is in the running state, run the describe command to show more detailed status information.

      +
      kubectl describe po <podname>
      +
      +

      logs

      +

      The logs command prints logs for a container in a pod or specified resource to stdout. To display logs in the tail -f mode, run this command with the -f flag.

      +
      kubectl logs -f podname
      +

      exec

      +

      The kubectl exec command is similar to the Docker exec command and executes a command in a container. If there are multiple containers in a pod, use the -c flag to choose a container.

      +
      kubectl exec -it podname bash
      +kubectl exec -it podname -c containername bash
      +

      port-forward*

      +

      The port-forward command forwards one or more local ports to a pod.

      +

      Example:

      +

      To listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod:

      +
      kubectl port-forward podname 5000:6000
      +

      proxy*

      +

      The proxy command creates a proxy server between localhost and the Kubernetes API server.

      +

      Example:

      +

      To enable the HTTP REST APIs on the master node:

      +
      kubectl proxy -accept-hosts= '.*' -port=8001 -address= '0.0.0.0'
      +

      cp

      +

      The cp command copies files and directories to and from containers.

      +
      cp filename newfilename
      +

      auth*

      +

      The auth command inspects authorization.

      +

      attach*

      +

      The attach command is similar to the logs -f command and attaches to a process that is already running inside an existing container. To exit, run the ctrl-c command. If a pod contains multiple containers, to view the output of a specific container, use the -c flag and containername following podname to specify a container.

      +
      kubectl attach podname -c containername
      +
      +

      Advanced Commands

      replace

      +

      The replace command updates or replaces an existing resource by attributes including the number of replicas, labels, image versions, and ports. You can directly modify the original YAML file and then run the replace command.

      +
      kubectl replace -f filename
      +

      Resource names cannot be updated.

      +
      +

      apply*

      +

      The apply command provides a more strict control on resource updating than patch and edit commands. The apply command applies a configuration to a resource and maintains a set of configuration files in source control. Whenever there is an update, the configuration file is pushed to the server, and then the kubectl apply command applies the latest configuration to the resource. The Kubernetes compares the new configuration file with the original one and updates only the changed configuration instead of the whole file. The configuration that is not contained in the -f flag will remain unchanged. Unlike the replace command which deletes the resource and creates a new one, the apply command directly updates the original resource. Similar to the git operation, the apply command adds an annotation to the resource to mark the current apply.

      +
      kubectl apply -f
      +

      patch

      +

      If you want to modify attributes of a running container without first deleting the container or using the replace command, the patch command is to the rescue. The patch command updates field(s) of a resource using strategic merge patch, a JSON merge patch, or a JSON patch. For example, to change a pod label from app=nginx1 to app=nginx2 while the pod is running, use the following command:

      +
      kubectl patch pod podname -p '{"metadata":{"labels":{"app":"nginx2"}}}'
      +

      convent*

      +

      The convert command converts configuration files between different API versions.

      +
      +

      Configuration Commands

      label

      +

      The label command update labels on a resource.

      +
      kubectl label pods my-pod new-label=newlabel
      +

      annotate

      +

      The annotate command update annotations on a resource.

      +
      kubectl annotate pods my-pod icon-url=http://......
      +

      completion

      +

      The completion command provides autocompletion for shell.

      +
      +

      Other Commands

      api-versions

      +

      The api-versions command prints the supported API versions.

      +
      kubectl api-versions
      +

      api-resources

      +

      The api-resources command prints the supported API resources.

      +
      kubectl api-resources
      +

      config*

      +

      The config command modifies kubeconfig files. An example use case of this command is to configure authentication information in API calls.

      +

      help

      +

      The help command gets all command references.

      +

      version

      +

      The version command prints the client and server version information for the current context.

      +
      kubectl version
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_01_0140.html b/docs/cce/umn/cce_10_0140.html similarity index 50% rename from docs/cce/umn/cce_01_0140.html rename to docs/cce/umn/cce_10_0140.html index 40bd22f4..db323a0b 100644 --- a/docs/cce/umn/cce_01_0140.html +++ b/docs/cce/umn/cce_10_0140.html @@ -1,19 +1,19 @@ - +

      Using kubectl to Run a Cluster

      diff --git a/docs/cce/umn/cce_10_0141.html b/docs/cce/umn/cce_10_0141.html new file mode 100644 index 00000000..71492b27 --- /dev/null +++ b/docs/cce/umn/cce_10_0141.html @@ -0,0 +1,32 @@ + + +

      gpu-beta

      +

      Introduction

      gpu-beta is a device management add-on that supports GPUs in containers. If GPU nodes are used in the cluster, the gpu-beta add-on must be installed.

      +
      +

      Notes and Constraints

      • The driver to be downloaded must be a .run file.
      • Only NVIDIA Tesla drivers are supported, not GRID drivers.
      • When installing or reinstalling the add-on, ensure that the driver download address is correct and accessible. CCE does not verify the address validity.
      • The gpu-beta add-on only enables you to download the driver and execute the installation script. The add-on status only indicates that how the add-on is running, not whether the driver is successfully installed.
      +
      +

      Installing the Add-on

      1. Log in to the CCE console and access the cluster console. Choose Add-ons in the navigation pane, locate gpu-beta on the right, and click Install.
      2. Configure the driver link.

        • If the download link is a public network address, for example, https://us.download.nvidia.com/tesla/470.103.01/NVIDIA-Linux-x86_64-470.103.01.run, bind an EIP to each GPU node. For details about how to obtain the driver link, see Obtaining the Driver Link from Public Network.
        • If the download link is an OBS URL, you do not need to bind an EIP to GPU nodes.
        • Ensure that the NVIDIA driver version matches the GPU node.
        • After the driver version is changed, restart the node for the change to take effect.
        +
        +

      3. Click Install.
      +
      +

      Verifying the Add-on

      After the add-on is installed, run the nvidia-smi command on the GPU node and the container that schedules GPU resources to verify the availability of the GPU device and driver.

      +
      GPU node:
      cd /opt/cloud/cce/nvidia/bin && ./nvidia-smi
      +
      +

      Container:

      +
      cd /usr/local/nvidia/bin && ./nvidia-smi
      +

      If GPU information is returned, the device is available and the add-on is successfully installed.

      +

      +
      +

      Obtaining the Driver Link from Public Network

      1. Log in to the CCE console.
      2. Click Create Node and select the GPU node to be created in the Specifications area. The GPU card model of the node is displayed in the lower part of the page.
      1. Visit https://www.nvidia.com/Download/Find.aspx?lang=en.
      2. Select the driver information on the NVIDIA Driver Downloads page, as shown in Figure 1. Operating System must be Linux 64-bit.

        Figure 1 Setting parameters
        +

      3. After confirming the driver information, click SEARCH. A page is displayed, showing the driver information, as shown in Figure 2. Click DOWNLOAD.

        Figure 2 Driver information
        +

      4. Obtain the driver link in either of the following ways:

        • Method 1: As shown in Figure 3, find url=/tesla/470.103.01/NVIDIA-Linux-x86_64-470.103.01.run in the browser address box. Then, supplement it to obtain the driver link https://us.download.nvidia.com/tesla/470.103.01/NVIDIA-Linux-x86_64-470.103.01.run. By using this method, you must bind an EIP to each GPU node.
        • Method 2: As shown in Figure 3, click AGREE & DOWNLOAD to download the driver. Then, upload the driver to OBS and record the OBS URL. By using this method, you do not need to bind an EIP to GPU nodes.
          Figure 3 Obtaining the link
          +
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0142.html b/docs/cce/umn/cce_10_0142.html new file mode 100644 index 00000000..988a1f98 --- /dev/null +++ b/docs/cce/umn/cce_10_0142.html @@ -0,0 +1,136 @@ + + +

      NodePort

      +

      Scenario

      A Service is exposed on each node's IP address at a static port (NodePort). A ClusterIP Service, to which the NodePort Service will route, is automatically created. By requesting <NodeIP>:<NodePort>, you can access a NodePort Service from outside the cluster.

      +
      Figure 1 NodePort access
      +
      +

      Notes and Constraints

      • By default, a NodePort Service is accessed within a VPC. If you need to use an EIP to access a NodePort Service through public networks, bind an EIP to the node in the cluster in advance.
      • After a Service is created, if the affinity setting is switched from the cluster level to the node level, the connection tracing table will not be cleared. You are advised not to modify the Service affinity setting after the Service is created. If you need to modify it, create a Service again.
      • CCE Turbo clusters support only cluster-level service affinity.
      • In VPC network mode, when container A is published through a NodePort service and the service affinity is set to the node level (that is, externalTrafficPolicy is set to local), container B deployed on the same node cannot access container A through the node IP address and NodePort service.
      • When a NodePort service is created in a cluster of v1.21.7 or later, the port on the node is not displayed using netstat by default. If the cluster forwarding mode is iptables, run the iptables -t nat -L command to view the port. If the cluster forwarding mode is ipvs, run the ipvsadm -nL command to view the port.
      +
      +

      Creating a NodePort Service

      1. Log in to the CCE console and click the cluster name to access the cluster.
      2. Choose Networking in the navigation pane and click Create Service in the upper right corner.
      3. Set intra-cluster access parameters.

        • Service Name: Specify a Service name, which can be the same as the workload name.
        • Service Type: Select NodePort.
        • Namespace: Namespace to which the workload belongs.
        • Service Affinity: For details, see externalTrafficPolicy (Service Affinity).
          • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
          • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
          +
        • Selector: Add a label and click Add. A Service selects a pod based on the added label. You can also click Reference Workload Label to reference the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
        • Port Settings
          • Protocol: protocol used by the Service.
          • Service Port: port used by the Service. The port number ranges from 1 to 65535.
          • Container Port: port on which the workload listens. For example, Nginx uses port 80 by default.
          • Node Port: You are advised to select Auto. You can also specify a port. The default port ranges from 30000 to 32767.
          +
        +

      4. Click OK.
      +
      +

      Using kubectl

      You can run kubectl commands to set the access type. This section uses a Nginx workload as an example to describe how to set a NodePort Service using kubectl.

      +
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create and edit the nginx-deployment.yaml and nginx-nodeport-svc.yaml files.

        The file names are user-defined. nginx-deployment.yaml and nginx-nodeport-svc.yaml are merely example file names.

        +

        vi nginx-deployment.yaml

        +
        apiVersion: apps/v1
        +kind: Deployment
        +metadata:
        +  name: nginx
        +spec:
        +  replicas: 1
        +  selector:
        +    matchLabels:
        +      app: nginx
        +  template:
        +    metadata:
        +      labels:
        +        app: nginx
        +    spec:
        +      containers:
        +      - image: nginx:latest
        +        name: nginx
        +      imagePullSecrets:
        +      - name: default-secret
        +

        vi nginx-nodeport-svc.yaml

        +
        apiVersion: v1
        +kind: Service
        +metadata:
        +  labels:
        +    app: nginx
        +  name: nginx-nodeport
        +spec:
        +  ports:
        +  - name: service
        +    nodePort: 30000     # Node port. The value ranges from 30000 to 32767.
        +    port: 8080          # Port for accessing a Service.
        +    protocol: TCP       # Protocol used for accessing a Service. The value can be TCP or UDP.
        +    targetPort: 80      # Port used by a Service to access the target container. This port is closely related to the applications running in a container. In this example, the Nginx image uses port 80 by default.
        +  selector:             # Label selector. A Service selects a pod based on the label and forwards the requests for accessing the Service to the pod. In this example, select the pod with the app:nginx label.
        +    app: nginx
        +  type: NodePort        # Service type. NodePort indicates that the Service is accessed through a node port.
        +

      3. Create a workload.

        kubectl create -f nginx-deployment.yaml

        +

        If information similar to the following is displayed, the workload has been created.

        +
        deployment "nginx" created
        +

        kubectl get po

        +

        If information similar to the following is displayed, the workload is running.

        +
        NAME                     READY     STATUS             RESTARTS   AGE
        +nginx-2601814895-qhxqv   1/1       Running            0          9s
        +

      4. Create a Service.

        kubectl create -f nginx-nodeport-svc.yaml

        +

        If information similar to the following is displayed, the Service is being created.

        +
        service "nginx-nodeport" created
        +

        kubectl get svc

        +

        If information similar to the following is displayed, the Service has been created.

        +
        # kubectl get svc
        +NAME             TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE
        +kubernetes       ClusterIP   10.247.0.1     <none>        443/TCP          4d8h
        +nginx-nodeport   NodePort    10.247.30.40   <none>        8080:30000/TCP   18s
        +

      5. Access the Service.

        By default, a NodePort Service can be accessed by using Any node IP address:Node port.

        +

        The Service can be accessed from a node in another cluster in the same VPC or in another pod in the cluster. If a public IP address is bound to the node, you can also use the public IP address to access the Service. Create a container in the cluster and access the container by using Node IP address:Node port.

        +
        # kubectl get node -owide
        +NAME           STATUS   ROLES    AGE    INTERNAL-IP    EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
        +10.100.0.136   Ready    <none>   152m   10.100.0.136   <none>        CentOS Linux 7 (Core)   3.10.0-1160.25.1.el7.x86_64   docker://18.9.0
        +10.100.0.5     Ready    <none>   152m   10.100.0.5     <none>        CentOS Linux 7 (Core)   3.10.0-1160.25.1.el7.x86_64   docker://18.9.0
        +# kubectl run -i --tty --image nginx:alpine test --rm /bin/sh
        +If you do not see a command prompt, try pressing Enter.
        +/ # curl 10.100.0.136:30000
        +<!DOCTYPE html>
        +<html>
        +<head>
        +<title>Welcome to nginx!</title>
        +<style>
        +    body {
        +        width: 35em;
        +        margin: 0 auto;
        +        font-family: Tahoma, Verdana, Arial, sans-serif;
        +    }
        +</style>
        +</head>
        +<body>
        +<h1>Welcome to nginx!</h1>
        +<p>If you see this page, the nginx web server is successfully installed and
        +working. Further configuration is required.</p>
        +
        +<p>For online documentation and support please refer to
        +<a href="http://nginx.org/">nginx.org</a>.<br/>
        +Commercial support is available at
        +<a href="http://nginx.com/">nginx.com</a>.</p>
        +
        +<p><em>Thank you for using nginx.</em></p>
        +</body>
        +</html>
        +/ # 
        +

      +
      +

      externalTrafficPolicy (Service Affinity)

      For a NodePort Service, requests are first sent to the node port, then the Service, and finally the pod backing the Service. The backing pod may be not located in the node receiving the requests. By default, the backend workload can be accessed from any node IP address and service port. If the pod is not on the node that receives the request, the request will be redirected to the node where the pod is located, which may cause performance loss.

      +

      externalTrafficPolicy is a configuration parameter of the Service.

      +
      apiVersion: v1
      +kind: Service
      +metadata:
      +  name: nginx-nodeport
      +spec:
      +  externalTrafficPolicy: local
      +  ports:
      +  - name: service
      +    nodePort: 30000
      +    port: 80
      +    protocol: TCP
      +    targetPort: 80
      +  selector:
      +    app: nginx
      +  type: NodePort
      +

      If the value of externalTrafficPolicy is local, requests sent from Node IP address:Service port will be forwarded only to the pod on the local node. If the node does not have a pod, the requests are suspended.

      +

      The other value of externalTrafficPolicy is cluster (default value), which indicates that requests are forwarded in a cluster.

      +

      You can set this parameter when creating a Service of the NodePort type on the CCE console.

      +

      +

      The values of externalTrafficPolicy are as follows:

      +
      • cluster: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
      • local: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0146.html b/docs/cce/umn/cce_10_0146.html new file mode 100644 index 00000000..e18d05cd --- /dev/null +++ b/docs/cce/umn/cce_10_0146.html @@ -0,0 +1,117 @@ + + +

      Deploying an Application from a Chart

      +

      On the CCE console, you can upload a Helm chart package, deploy it, and manage the deployed pods.

      +

      Notes and Constraints

      • The number of charts that can be uploaded by a single user is limited. The value displayed on the console of each region is the allowed quantity.
      • A chart with multiple versions consumes the same amount of portion of chart quota.
      • Users with chart operation permissions can perform multiple operations on clusters. Therefore, exercise caution when assigning users the chart lifecycle management permissions, including uploading charts and creating, deleting, and updating chart releases.
      +
      +

      Chart Specifications

      The Redis workload is used as an example to illustrate the chart specifications.

      +
      • Naming Requirement

        A chart package is named in the format of {name}-{version}.tgz, where {version} indicates the version number in the format of Major version number.Minor version number.Revision number, for example, redis-0.4.2.tgz.

        +

        The chart name {name} can contain a maximum of 64 characters.

        +

        The version number must comply with the semantic versioning rules.

        +
        • The main and minor version numbers are mandatory, and the revision number is optional.
        • The major and minor version numbers and revision number must be integers, greater than or equal to 0, and less than or equal to 99.
        +
        +
      • Directory Structure

        The directory structure of a chart is as follows:

        +
        redis/
        +  templates/
        +  values.yaml
        +  README.md
        +  Chart.yaml
        +  .helmignore
        +
        As listed in Table 1, the parameters marked with * are mandatory. +
        + + + + + + + + + + + + + + + + + + + +
        Table 1 Parameters in the directory structure of a chart

        Parameter

        +

        Description

        +

        * templates

        +

        Stores all templates.

        +

        * values.yaml

        +

        Describes configuration parameters required by templates.

        +
        NOTICE:

        Make sure that the image address set in the values.yaml file is the same as the image address in the container image repository. Otherwise, an exception occurs when you create a workload, and the system displays a message indicating that the image fails to be pulled.

        +

        To obtain the image address, perform the following operations: Log in to the CCE console. In the navigation pane, choose Image Repository to access the SWR console. Choose My Images > Private Images and click the name of the uploaded image. On the Image Tags tab page, obtain the image address from the pull command. You can click to copy the command in the Image Pull Command column.

        +
        +

        README.md

        +

        A markdown file, including:

        +
        • The workload or services provided by the chart.
        • Prerequisites for running the chart.
        • Configurations in the values.yaml file.
        • Information about chart installation and configuration.
        +

        * Chart.yaml

        +

        Basic information about the chart.

        +

        Note: Helm v3 bumps the apiVersion from v1 to v2.

        +

        .helmignore

        +

        Files or data that does not need to read templates during workload installation.

        +
        +
        +
        +
      +
      +

      Uploading a Chart

      1. Log in to the CCE console, click the cluster name, and access the cluster console. Choose Charts in the navigation pane and click Upload Chart in the upper right corner.
      2. Click Select File, select the chart to be uploaded, and click Upload.

        When you upload a chart, the naming rule of the OBS bucket is changed from cce-charts-{region}-{domain_name} to cce-charts-{region}-{domain_id}. In the old naming rule, the system converts the domain_name value into a Base64 string and uses the first 63 characters. If you cannot find the chart in the OBS bucket with the new name, search for the bucket with the old name.

        +
        +

      +
      +

      Creating a Release

      1. Log in to the CCE console, click the cluster name, and access the cluster console. In the navigation pane, choose Charts.
      2. On the My Charts tab page, click Install of the target chart.
      3. Set workload installation parameters by referring to Table 2.

        +

        + + + + + + + + + + + + + + + + +
        Table 2 Installation parameters

        Parameter

        +

        Description

        +

        Instance

        +

        Unique name of the chart release.

        +

        Namespace

        +

        Namespace to which the workload will be deployed.

        +

        Select Version

        +

        Version of a chart.

        +

        Configuration File

        +

        You can import and replace the values.yaml file or directly edit the chart parameters online.

        +
        NOTE:

        An imported values.yaml file must comply with YAML specifications, that is, KEY:VALUE format. The fields in the file are not restricted.

        +

        The key value of the imported values.yaml must be the same as that of the selected chart package. Otherwise, the values.yaml does not take effect. That is, the key cannot be changed.

        +
        +
        1. Click Select File.
        2. Select the corresponding values.yaml file and click Open.
        +
        +
        +

      4. Click Install.

        On the Releases tab page, you can view the installation status of the release.

        +

      +
      +

      Upgrading a Chart-based Workload

      1. Log in to the CCE console, click the cluster name, and access the cluster console. Choose Charts in the navigation pane and click the Releases tab.
      2. Click Upgrade in the row where the desired workload resides and set the parameters for the workload.
      3. Select a chart version for Chart Version.
      4. Follow the prompts to modify the chart parameters. Click Upgrade, and then click Submit.
      5. Click Back to Release List. If the chart status changes to Upgrade successful, the workload is successfully upgraded.
      +
      +

      Rolling Back a Chart-based Workload

      1. Log in to the CCE console, click the cluster name, and access the cluster console. Choose Charts in the navigation pane and click the Releases tab.
      2. Click More > Roll Back for the workload to be rolled back, select the workload version, and click Roll back to this version.

        In the workload list, if the status is Rollback successful, the workload is rolled back successfully.

        +

      +
      +

      Uninstalling a Chart-based Workload

      1. Log in to the CCE console, click the cluster name, and access the cluster console. Choose Charts in the navigation pane and click the Releases tab.
      2. Click More > Uninstall next to the release to be uninstalled, and click Yes. Exercise caution when performing this operation because releases cannot be restored after being uninstalled.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0150.html b/docs/cce/umn/cce_10_0150.html new file mode 100644 index 00000000..e9bc8369 --- /dev/null +++ b/docs/cce/umn/cce_10_0150.html @@ -0,0 +1,133 @@ + + +

      Creating a Job

      +

      Scenario

      Jobs are short-lived and run for a certain time to completion. They can be executed immediately after being deployed. It is completed after it exits normally (exit 0).

      +

      A job is a resource object that is used to control batch tasks. It is different from a long-term servo workload (such as Deployment and StatefulSet).

      +

      A job is started and terminated at specific times, while a long-term servo workload runs unceasingly unless being terminated. The pods managed by a job automatically exit after successfully completing the job based on user configurations. The success flag varies according to the spec.completions policy.

      +
      • One-off jobs: A single pod runs once until successful termination.
      • Jobs with a fixed success count: N pods run until successful termination.
      • A queue job is considered completed based on the global success confirmed by the application.
      +
      +

      Prerequisites

      Resources have been created. For details, see Creating a Node. If clusters and nodes are available, you need not create them again.

      +
      +

      Using the CCE Console

      1. Log in to the CCE console.
      2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
      3. Set basic information about the workload.

        Basic Info
        • Workload Type: Select Job. For details about workload types, see Overview.
        • Workload Name: Enter the name of the workload.
        • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
        • Pods: Enter the number of pods.
        • Container Runtime: A CCE cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences between runC and Kata, see Kata Containers and Common Containers.
        +
        +
        Container Settings
        • Container Information
          Multiple containers can be configured in a pod. You can click Add Container on the right to configure multiple containers for the pod. +
          +
        • Image Access Credential: Select the credential used for accessing the image repository. The default value is default-secret. You can use default-secret to access images in SWR. For details about default-secret, see default-secret.
        • GPU graphics card: All is selected by default. The workload instance will be scheduled to the node with the specified GPU graphics card type.
        +
        +
        Advanced Settings
        • Labels and Annotations: See Pod Labels and Annotations.
        • Job Settings:
          • Parallel Pods: Maximum number of pods that can run in parallel during job execution. The value cannot be greater than the total number of pods in the job.
          • Timeout (s): Once a job reaches this time, the job status becomes failed and all pods in this job will be deleted. If you leave this parameter blank, the job will never time out.
          +
        +
        +

      4. Click Create Workload in the lower right corner.
      +
      +

      Using kubectl

      A job has the following configuration parameters:

      +
      • spec.template: has the same schema as a pod.
      • RestartPolicy: can only be set to Never or OnFailure.
      • For a single-pod job, the job ends after the pod runs successfully by default.
      • .spec.completions: indicates the number of pods that need to run successfully to end a job. The default value is 1.
      • .spec.parallelism: indicates the number of pods that run concurrently. The default value is 1.
      • spec.backoffLimit: indicates the maximum number of retries performed if a pod fails. When the limit is reached, the pod will not try again.
      • .spec.activeDeadlineSeconds: indicates the running time of pods. Once the time is reached, all pods of the job are terminated. The priority of .spec.activeDeadlineSeconds is higher than that of .spec.backoffLimit. That is, if a job reaches the .spec.activeDeadlineSeconds, the spec.backoffLimit is ignored.
      +

      Based on the .spec.completions and .spec.Parallelism settings, jobs are classified into the following types.

      + +
      + + + + + + + + + + + + + + + + + + + + + +
      Table 1 Job types

      Job Type

      +

      Description

      +

      Example

      +

      One-off jobs

      +

      A single pod runs once until successful termination.

      +

      Database migration

      +

      Jobs with a fixed completion count

      +

      One pod runs until reaching the specified completions count.

      +

      Work queue processing pod

      +

      Parallel jobs with a fixed completion count

      +

      Multiple pods run until reaching the specified completions count.

      +

      Multiple pods for processing work queues concurrently

      +

      Parallel jobs

      +

      One or more pods run until successful termination.

      +

      Multiple pods for processing work queues concurrently

      +
      +
      +

      The following is an example job, which calculates π till the 2000th digit and prints the output.

      +
      apiVersion: batch/v1
      +kind: Job
      +metadata:
      +  name: myjob
      +spec:
      +  completions: 50        # 50 pods need to be run to finish a job. In this example, π is printed for 50 times.
      +  parallelism: 5        # 5 pods are run in parallel.
      +  backoffLimit: 5        # The maximum number of retry times is 5.
      +  template:
      +    spec:
      +      containers:
      +      - name: pi
      +        image: perl
      +        command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
      +      restartPolicy: Never
      +

      Description

      +
      • apiVersion: batch/v1 indicates the version of the current job.
      • kind: Job indicates that the current resource is a job.
      • restartPolicy: Never indicates the current restart policy. For jobs, this parameter can only be set to Never or OnFailure. For other controllers (for example, Deployments), you can set this parameter to Always.
      +

      Run the job.

      +
      1. Start the job.

        [root@k8s-master k8s]# kubectl apply -f myjob.yaml
        +job.batch/myjob created
        +

      2. View the job details.

        kubectl get job

        +
        [root@k8s-master k8s]# kubectl get job
        +NAME    COMPLETIONS   DURATION   AGE
        +myjob   50/50         23s        3m45s
        +

        If the value of COMPLETIONS is 50/50, the job is successfully executed.

        +

      3. Query the pod status.

        kubectl get pod

        +
        [root@k8s-master k8s]# kubectl get pod
        +NAME          READY   STATUS      RESTARTS   AGE
        +myjob-29qlw   0/1     Completed   0          4m5s
        +...
        +

        If the status is Completed, the job is complete.

        +

      4. View the pod logs.

        kubectl logs

        +
        # kubectl logs myjob-29qlw
        +3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632788659361533818279682303019520353018529689957736225994138912497217752834791315155748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035637076601047101819429555961989467678374494482553797747268471040475346462080466842590694912933136770289891521047521620569660240580381501935112533824300355876402474964732639141992726042699227967823547816360093417216412199245863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818347977535663698074265425278625518184175746728909777727938000816470600161452491921732172147723501414419735685481613611573525521334757418494684385233239073941433345477624168625189835694855620992192221842725502542568876717904946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886269456042419652850222106611863067442786220391949450471237137869609563643719172874677646575739624138908658326459958133904780275901
        +

      +
      +

      Related Operations

      After a one-off job is created, you can perform operations listed in Table 2.

      + +
      + + + + + + + + + + +
      Table 2 Other operations

      Operation

      +

      Description

      +

      Editing a YAML file

      +

      Click More > Edit YAML next to the job name to edit the YAML file corresponding to the current job.

      +

      Deleting a job

      +
      1. Select the job to be deleted and click Delete in the Operation column.
      2. Click Yes.

        Deleted jobs cannot be restored. Exercise caution when deleting a job.

        +
      +
      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0151.html b/docs/cce/umn/cce_10_0151.html new file mode 100644 index 00000000..f599f983 --- /dev/null +++ b/docs/cce/umn/cce_10_0151.html @@ -0,0 +1,108 @@ + + +

      Creating a Cron Job

      +

      Scenario

      A cron job runs on a repeating schedule. You can perform time synchronization for all active nodes at a fixed time point.

      +
      A cron job runs periodically at the specified time. It is similar with Linux crontab. A cron job has the following characteristics:
      • Runs only once at the specified time.
      • Runs periodically at the specified time.
      +
      +

      The typical usage of a cron job is as follows:

      +
      • Schedules jobs at the specified time.
      • Creates jobs to run periodically, for example, database backup and email sending.
      +
      +

      Prerequisites

      Resources have been created. For details, see Creating a Node.

      +
      +

      Using the CCE Console

      1. Log in to the CCE console.
      2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
      3. Set basic information about the workload.

        Basic Info
        • Workload Type: Select Cron Job. For details about workload types, see Overview.
        • Workload Name: Enter the name of the workload.
        • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
        • Container Runtime: A CCE cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences between runC and Kata, see Kata Containers and Common Containers.
        +
        +
        Container Settings
        • Container Information
          Multiple containers can be configured in a pod. You can click Add Container on the right to configure multiple containers for the pod. +
          +
        • Image Access Credential: Select the credential used for accessing the image repository. The default value is default-secret. You can use default-secret to access images in SWR. For details about default-secret, see default-secret.
        • GPU graphics card: All is selected by default. The workload instance will be scheduled to the node with the specified GPU graphics card type.
        +
        +

        Schedule

        +
        • Concurrency Policy: The following three modes are supported:
          • Forbid: A new job cannot be created before the previous job is completed.
          • Allow: The cron job allows concurrently running jobs, which preempt cluster resources.
          • Replace: A new job replaces the previous job when it is time to create a job but the previous job is not completed.
          +
        • Policy Settings: specifies when a new cron job is executed. Policy settings in YAML are implemented using cron expressions.
          • A cron job is executed at a fixed interval. The unit can be minute, hour, day, or month. For example, if a cron job is executed every 30 minutes, the cron expression is */30 * * * *, the execution time starts from 0 in the unit range, for example, 00:00:00, 00:30:00, 01:00:00, and ....
          • The cron job is executed at a fixed time (by month). For example, if a cron job is executed at 00:00 on the first day of each month, the cron expression is 0 0 1 */1 *, and the execution time is ****-01-01 00:00:00, ****-02-01 00:00:00, and ....
          • The cron job is executed at a fixed time (by week). For example, if a cron job is executed at 00:00 every Monday, the cron expression is 0 0 * * 1, and the execution time is ****-**-01 00:00:00 on Monday, ****-**-08 00:00:00 on Monday, and ....
          • For details about how to use cron expressions, see cron.
          +
          • If a cron job is executed at a fixed time (by month) and the number of days in a month does not exist, the cron job will not be executed in this month. For example, if the number of days is set to 30 but February does not have the 30th day, the cron job skips this month and continues on March 30.
          • Due to the definition of the cron expression, the fixed period is not a strict period. The time unit range is divided from 0 by period. For example, if the unit is minute, the value ranges from 0 to 59. If the value cannot be exactly divided, the last period is reset. Therefore, an accurate period can be represented only when the period can evenly divide its time unit range.

            For example, the unit of the period is hour. Because /2, /3, /4, /6, /8, and /12 can be divided by 24, the accurate period can be represented. If another period is used, the last period will be reset at the beginning of a new day. For example, if the cron expression is * */12 * * *, the execution time is 00:00:00 and 12:00:00 every day. If the cron expression is * */13 * * *, the execution time is 00:00:00 and 13:00:00 every day. At 00:00 on the next day, the execution time is updated even if the period does not reach 13 hours.

            +
          +
          +
        • Job Records: You can set the number of jobs that are successfully executed or fail to be executed. Setting a limit to 0 corresponds to keeping none of the jobs after they finish.
        +
        Advanced Settings +
        +

      4. Click Create Workload in the lower right corner.
      +
      +

      Using kubectl

      A cron job has the following configuration parameters:

      +
      • .spec.schedule: takes a Cron format string, for example, 0 * * * * or @hourly, as schedule time of jobs to be created and executed.
      • .spec.jobTemplate: specifies jobs to be run, and has the same schema as when you are Creating a Job Using kubectl.
      • .spec.startingDeadlineSeconds: specifies the deadline for starting a job.
      • .spec.concurrencyPolicy: specifies how to treat concurrent executions of a job created by the Cron job. The following options are supported:
        • Allow (default value): allows concurrently running jobs.
        • Forbid: forbids concurrent runs, skipping next run if previous has not finished yet.
        • Replace: cancels the currently running job and replaces it with a new one.
        +
      +

      The following is an example cron job, which is saved in the cronjob.yaml file.

      +
      apiVersion: batch/v1beta1
      +kind: CronJob
      +metadata:
      +  name: hello
      +spec:
      +  schedule: "*/1 * * * *"
      +  jobTemplate:
      +    spec:
      +      template:
      +        spec:
      +          containers:
      +          - name: hello
      +            image: busybox
      +            args:
      +            - /bin/sh
      +            - -c
      +            - date; echo Hello from the Kubernetes cluster
      +          restartPolicy: OnFailure
      +

      Run the job.

      +
      1. Create a cron job.

        kubectl create -f cronjob.yaml

        +

        Information similar to the following is displayed:

        +
        cronjob.batch/hello created
        +

      2. Query the running status of the cron job:

        kubectl get cronjob

        +
        NAME      SCHEDULE      SUSPEND   ACTIVE    LAST SCHEDULE   AGE
        +hello     */1 * * * *   False     0         <none>          9s
        +

        kubectl get jobs

        +
        NAME               COMPLETIONS   DURATION   AGE
        +hello-1597387980   1/1           27s        45s
        +

        kubectl get pod

        +
        NAME                           READY     STATUS      RESTARTS   AGE
        +hello-1597387980-tjv8f         0/1       Completed   0          114s
        +hello-1597388040-lckg9         0/1       Completed   0          39s
        +

        kubectl logs hello-1597387980-tjv8f

        +
        Fri Aug 14 06:56:31 UTC 2020
        +Hello from the Kubernetes cluster
        +

        kubectl delete cronjob hello

        +
        cronjob.batch "hello" deleted
        +

        When a cron job is deleted, the related jobs and pods are deleted too.

        +
        +

      +
      +

      Related Operations

      After a cron job is created, you can perform operations listed in Table 1.

      + +
      + + + + + + + + + + + + + +
      Table 1 Other operations

      Operation

      +

      Description

      +

      Editing a YAML file

      +

      Click More > Edit YAML next to the cron job name to edit the YAML file of the current job.

      +

      Stopping a cron job

      +
      1. Select the job to be stopped and click Stop in the Operation column.
      2. Click Yes.
      +

      Deleting a cron job

      +
      1. Select the cron job to be deleted and click More > Delete in the Operation column.
      2. Click Yes.

        Deleted jobs cannot be restored. Therefore, exercise caution when deleting a job.

        +
      +
      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0152.html b/docs/cce/umn/cce_10_0152.html new file mode 100644 index 00000000..faea51a6 --- /dev/null +++ b/docs/cce/umn/cce_10_0152.html @@ -0,0 +1,107 @@ + + +

      Creating a ConfigMap

      +

      Scenario

      A ConfigMap is a type of resource that stores configuration information required by a workload. Its content is user-defined. After creating ConfigMaps, you can use them as files or environment variables in a containerized workload.

      +

      ConfigMaps allow you to decouple configuration files from container images to enhance the portability of workloads.

      +

      Benefits of ConfigMaps:

      +
      • Manage configurations of different environments and services.
      • Deploy workloads in different environments. Multiple versions are supported for configuration files so that you can update and roll back workloads easily.
      • Quickly import configurations in the form of files to containers.
      +
      +

      Procedure

      1. Log in to the CCE console and access the cluster console.
      2. Choose ConfigMaps and Secrets in the navigation pane and click Create ConfigMap in the upper right corner.
      3. Set parameters.

        +

        + + + + + + + + + + + + + + + + + + + +
        Table 1 Parameters for creating a ConfigMap

        Parameter

        +

        Description

        +

        Name

        +

        Name of a ConfigMap, which must be unique in a namespace.

        +

        Namespace

        +

        Namespace to which the ConfigMap belongs. If you do not specify this parameter, the value default is used by default.

        +

        Description

        +

        Description of the ConfigMap.

        +

        Data

        +

        Data of a ConfigMap, in the key-value pair format.

        +

        Click to add data. The value can be in string, JSON, or YAML format.

        +

        Label

        +

        Label of the ConfigMap. Enter a key-value pair and click Add.

        +
        +
        +

      4. After the configuration is complete, click OK.

        The new ConfigMap is displayed in the ConfigMap list.

        +

      +
      +

      ConfigMap Requirements

      A ConfigMap resource file must be in YAML format, and the file size cannot exceed 2 MB.

      +
      The file name is configmap.yaml and the following shows a configuration example.
      apiVersion: v1
      +kind: ConfigMap
      +metadata:
      +  name: test-configmap
      +data:
      +  data-1: value-1
      +  data-2: value-2
      +
      +
      +

      Creating a ConfigMap Using kubectl

      1. Configure the kubectl command to connect an ECS to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create and edit the cce-configmap.yaml file.

        vi cce-configmap.yaml

        +
        apiVersion: v1
        +kind: ConfigMap
        +metadata:
        +  name: cce-configmap
        +data:
        +  SPECIAL_LEVEL: Hello
        +  SPECIAL_TYPE: CCE
        +

      3. Run the following commands to create a ConfigMap.

        kubectl create -f cce-configmap.yaml

        +

        kubectl get cm

        +
        NAME               DATA            AGE
        +cce-configmap      3               3h
        +cce-configmap1     3               7m
        +

      +
      +

      Related Operations

      After creating a configuration item, you can update or delete it as described in Table 2. +
      + + + + + + + + + + + + + +
      Table 2 Related operations

      Operation

      +

      Description

      +

      Editing a YAML file

      +

      Click Edit YAML in the row where the target ConfigMap resides to edit its YAML file.

      +

      Updating a ConfigMap

      +
      1. Select the name of the ConfigMap to be updated and click Update.
      2. Modify the secret data. For more information, see Table 1.
      3. Click OK.
      +

      Deleting a ConfigMap

      +

      Select the configuration you want to delete and click Delete.

      +

      Follow the prompts to delete the ConfigMap.

      +
      +
      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0153.html b/docs/cce/umn/cce_10_0153.html new file mode 100644 index 00000000..23205a40 --- /dev/null +++ b/docs/cce/umn/cce_10_0153.html @@ -0,0 +1,128 @@ + + +

      Creating a Secret

      +

      Scenario

      A secret is a type of resource that holds sensitive data, such as authentication and key information. Its content is user-defined. After creating secrets, you can use them as files or environment variables in a containerized workload.

      +
      +

      Procedure

      1. Log in to the CCE console and access the cluster console.
      2. Choose ConfigMaps and Secrets in the navigation pane, click the Secrets tab, and click Create Secret in the upper right corner.
      3. Set parameters.

        +

        + + + + + + + + + + + + + + + + + + + + + + +
        Table 1 Parameters for creating a secret

        Parameter

        +

        Description

        +

        Name

        +

        Name of the secret you create, which must be unique.

        +

        Namespace

        +

        Namespace to which the secret belongs. If you do not specify this parameter, the value default is used by default.

        +

        Description

        +

        Description of a secret.

        +

        Type

        +

        Type of the secret you create.

        +
        • Opaque: common secret.
        • kubernetes.io/dockerconfigjson: a secret that stores the authentication information required for pulling images from a private repository.
        • IngressTLS: a secret that stores the certificate required by ingresses (layer-7 load balancing Services).
        • Other: another type of secret, which is specified manually.
        +

        Secret Data

        +

        Workload secret data can be used in containers.

        +
        • If Secret Type is Opaque, click . In the dialog box displayed, enter a key-value pair and select Auto Base64 Encoding.
        • If the secret type is kubernetes.io/dockerconfigjson, enter the account and password of the private image repository.
        • If the secret type is IngressTLS, upload the certificate file and private key file.
          NOTE:
          • A certificate is a self-signed or CA-signed credential used for identity authentication.
          • A certificate request is a request for a signature with a private key.
          +
          +
        +

        Secret Label

        +

        Label of the secret. Enter a key-value pair and click Add.

        +
        +
        +

      4. After the configuration is complete, click OK.

        The new secret is displayed in the key list.

        +

      +
      +

      Secret Resource File Configuration

      This section describes configuration examples of secret resource description files.

      +

      For example, you can retrieve the username and password for a workload through a secret.

      +
      • YAML format

        The secret.yaml file is defined as shown below. The value must be based on the Base64 coding method. For details about the method, see Base64 Encoding.

        +
        apiVersion: v1
        +kind: Secret
        +metadata:
        +  name: mysecret           #Secret name
        +  namespace: default       #Namespace. The default value is default.
        +data:
        +  username: ******  #The value must be Base64-encoded.
        +  password: ******  #The value must be encoded using Base64.
        +type: Opaque     #You are advised not to change this parameter value.
        +
      +
      +

      Creating a Secret Using kubectl

      1. According to Connecting to a Cluster Using kubectl, configure the kubectl command to connect an ECS to the cluster.
      2. Create and edit the Base64-encoded cce-secret.yaml file.

        # echo -n "content to be encoded" | base64
        +******
        +

        vi cce-secret.yaml

        +
        apiVersion: v1
        +kind: Secret
        +metadata:
        +  name: mysecret
        +type: Opaque
        +data:
        +  username: ******
        +  password: ******
        +

      3. Create a secret.

        kubectl create -f cce-secret.yaml

        +

        You can query the secret after creation.

        +

        kubectl get secret

        +

      +
      +

      Related Operations

      After creating a secret, you can update or delete it as described in Table 2.

      The secret list contains system secret resources that can be queried only. The system secret resources cannot be updated or deleted.

      +
      + +
      + + + + + + + + + + + + + + + + +
      Table 2 Related Operations

      Operation

      +

      Description

      +

      Editing a YAML file

      +

      Click Edit YAML in the row where the target secret resides to edit its YAML file.

      +

      Updating a secret

      +
      1. Select the name of the secret to be updated and click Update.
      2. Modify the secret data. For more information, see Table 1.
      3. Click OK.
      +

      Deleting a secret

      +

      Select the secret you want to delete and click Delete.

      +

      Follow the prompts to delete the secret.

      +

      Deleting secrets in batches

      +
      1. Select the secrets to be deleted.
      2. Click Delete above the secret list.
      3. Follow the prompts to delete the secrets.
      +
      +
      +
      +
      +

      Base64 Encoding

      To Base64-encode a string, run the echo -n content to be encoded | base64 command. The following is an example:

      +
      root@ubuntu:~# echo -n "content to be encoded" | base64
      +******
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0154.html b/docs/cce/umn/cce_10_0154.html new file mode 100644 index 00000000..81b1e509 --- /dev/null +++ b/docs/cce/umn/cce_10_0154.html @@ -0,0 +1,98 @@ + + +

      autoscaler

      +

      Introduction

      Autoscaler is an important Kubernetes controller. It supports microservice scaling and is key to serverless design.

      +

      When the CPU or memory usage of a microservice is too high, horizontal pod autoscaling is triggered to add pods to reduce the load. These pods can be automatically reduced when the load is low, allowing the microservice to run as efficiently as possible.

      +

      CCE simplifies the creation, upgrade, and manual scaling of Kubernetes clusters, in which traffic loads change over time. To balance resource usage and workload performance of nodes, Kubernetes introduces the autoscaler add-on to automatically resize a cluster based on the resource usage required for workloads deployed in the cluster. For details, see Creating a Node Scaling Policy.

      +

      Open source community: https://github.com/kubernetes/autoscaler

      +
      +

      How the Add-on Works

      autoscaler controls auto scale-out and scale-in.

      +
      • Auto scale-out
        You can choose either of the following methods:
        • If pods in a cluster cannot be scheduled due to insufficient worker nodes, cluster scaling is triggered to add nodes. The nodes to be added have the same specification as configured for the node pool to which the nodes belong.
          Auto scale-out will be performed when:
          • Node resources are insufficient.
          • No node affinity policy is set in the pod scheduling configuration. That is, if a node has been configured as an affinity node for pods, no node will not be automatically added when pods cannot be scheduled. For details about how to configure the node affinity policy, see Scheduling Policy (Affinity/Anti-affinity).
          +
          +
        • When the cluster meets the node scaling policy, cluster scale-out is also triggered. For details, see Creating a Node Scaling Policy.
        +

        The add-on follows the "No Less, No More" policy. For example, if three cores are required for creating a pod and the system supports four-core and eight-core nodes, autoscaler will preferentially create a four-core node.

        +
        +
        +
      • Auto scale-in

        When a cluster node is idle for a period of time (10 minutes by default), cluster scale-in is triggered, and the node is automatically deleted. However, a node cannot be deleted from a cluster if the following pods exist:

        +
        • Pods that do not meet specific requirements set in PodDisruptionBudget
        • Pods that cannot be scheduled to other nodes due to constraints such as affinity and anti-affinity policies
        • Pods that have the cluster-autoscaler.kubernetes.io/safe-to-evict: 'false' annotation
        • Pods (except those created by kube-system DaemonSet) that exist in the kube-system namespace on the node
        • Pods that are not created by the controller (Deployment/ReplicaSet/job/StatefulSet)
        +
      +
      +

      Notes and Constraints

      • Only clusters of v1.9.10-r2 and later support autoscaler.
      • Ensure that there are sufficient resources for installing the add-on.
      • The default node pool does not support auto scaling. For details, see Description of DefaultPool.
      +
      +

      Installing the Add-on

      1. Log in to the CCE console, click the cluster name, and access the cluster console. Choose Add-ons in the navigation pane, locate autoscaler on the right, and click Install.
      2. Configure add-on installation parameters.

        +

        + + + + + + + +
        Table 1 Specifications configuration

        Parameter

        +

        Description

        +

        Add-on Specifications

        +

        The add-on can be deployed in the following specifications:

        +
        NOTE:

        When the autoscaler add-on is deployed in HA or customized mode, anti-affinity policies exist between add-on instances and the add-on instances are deployed on different nodes. Therefore, the number of available nodes in the cluster must be greater than or equal to the number of add-on instances to ensure high availability of the add-on.

        +
        +
        • Single: The add-on is deployed with only one pod.
        • HA50: The add-on is deployed with two pods, serving a cluster with 50 nodes and ensuring high availability.
        • HA200: The add-on is deployed with two pods, serving a cluster with 50 nodes and ensuring high availability. Each pod uses more resources than those of the HA50 specification.
        • Custom: You can customize the number of pods and specifications as required.
        +
        +
        + +
        + + + + + + + + + + + + + + + + +
        Table 2 Parameter configuration

        Parameter

        +

        Description

        +

        Scaling

        +

        You can select the following options as required:

        +
        • Nodes are automatically added (from the node pool) when pods in the cluster cannot be scheduled.

          That is, when a pod is in Pending state, automatic scale-out is performed. If a node has been configured as an affinity node for pods, no node will not be automatically added when pods cannot be scheduled. Generally, an HPA policy works with such scaling. For details, see Using HPA and CA for Auto Scaling of Workloads and Nodes.

          +

          If this parameter is not selected, scaling can be performed only through node scaling policies.

          +
        • Auto node scale-in
          • Node Idle Time (min): Time for which a node should be unneeded before it is eligible for scale-down. Default value: 10 minutes.
          • Scale-in Threshold: If the percentage of both requested CPU and memory on a node is below this threshold, auto scale-down will be triggered to delete the node from the cluster. The default value is 0.5, which means 50%.
          • Stabilization Window (s)

            How long after a scale-out that a scale-in evaluation resumes. Default value: 10 minutes.

            +
            NOTE:

            If both auto scale-out and scale-in exist in a cluster, you are advised to set How long after a scale-out that a scale-in evaluation resumes to 0 minutes. This can prevent the node scale-in from being blocked due to continuous scale-out of some node pools or retries upon a scale-out failure, resulting in unexpected waste of node resources.

            +
            +

            How long after the node deletion that a scale-in evaluation resumes. Default value: 10 minutes.

            +

            How long after a scale-in failure that a scale-in evaluation resumes. Default value: 3 minutes. For details about the impact and relationship between the scale-in cooling intervals configured in the node pool and autoscaler, see Description of the Scale-In Cool-Down Period.

            +
          • Max. Nodes for Batch Deletion: Maximum number of empty nodes that can be deleted at the same time. Default value: 10.
            This feature applies only to idle nodes. Idle nodes can be concurrently scaled in. Nodes that are not idle can only be scaled in one by one.
            NOTE:

            During node scale-in, if the pod on the node does not need to be evicted (such as the pods of DaemonSet), the node is idle. Otherwise, the node is not idle.

            +
            +
            +
          • Check Interval: Interval for checking again a node that could not be removed before. Default value: 5 minutes.
          +
        +

        Total Nodes

        +

        Maximum number of nodes that can be managed by the cluster, within which cluster scale-out is performed.

        +

        Total CPUs

        +

        Maximum sum of CPU cores of all nodes in a cluster, within which cluster scale-out is performed.

        +

        Total Memory (GB)

        +

        Maximum sum of memory of all nodes in a cluster, within which cluster scale-out is performed.

        +
        +
        +

      3. When the configuration is complete, click Install.
      +
      +

      Description of the Scale-In Cool-Down Period

      Scale-in cooling intervals can be configured in the node pool settings and the autoscaler add-on settings.

      +

      Scale-in cooling interval configured in a node pool

      +

      This interval indicates the period during which nodes added to the current node pool after a scale-out operation cannot be deleted. This interval takes effect at the node pool level.

      +

      Scale-in cooling interval configured in the autoscaler add-on

      +

      The interval after a scale-out indicates the period during which the entire cluster cannot be scaled in after the autoscaler add-on triggers scale-out (due to the unschedulable pods, metrics, and scaling policies). This interval takes effect at the cluster level.

      +

      The interval after a node is deleted indicates the period during which the cluster cannot be scaled in after the autoscaler add-on triggers scale-in. This interval takes effect at the cluster level.

      +

      The interval after a failed scale-in indicates the period during which the cluster cannot be scaled in after the autoscaler add-on triggers scale-in. This interval takes effect at the cluster level.

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0163.html b/docs/cce/umn/cce_10_0163.html new file mode 100644 index 00000000..2074629d --- /dev/null +++ b/docs/cce/umn/cce_10_0163.html @@ -0,0 +1,73 @@ + + +

      Setting Container Specifications

      +

      Scenario

      CCE allows you to set resource limits for added containers during workload creation. You can apply for and limit the CPU and memory quotas used by each pod in a workload.

      +
      +

      Meanings

      For CPU and Memory, the meanings of Request and Limit are as follows:
      • Request: Schedules the pod to the node that meets the requirements for workload deployment.
      • Limit: Limits the resources used by the workload.
      +
      +

      When creating a workload, you are advised to set the upper and lower limits of CPU and memory resources. If the upper and lower resource limits are not set for a workload, a resource leak of this workload will make resources unavailable for other workloads deployed on the same node. In addition, workloads that do not have upper and lower resource limits cannot be accurately monitored.

      +
      +
      +

      Configuration Description

      In actual production services, the recommended ratio of Request to Limit is about 1:1.5. For some sensitive services, the recommended ratio is 1:1. If the Request is too small and the Limit is too large, node resources are overcommitted. During service peaks, the memory or CPU of a node may be used up. As a result, the node is unavailable.

      +
      • CPU quotas: +
        + + + + + + + + + + +
        Table 1 Description of CPU quotas

        Parameter

        +

        Description

        +

        CPU request

        +

        Minimum number of CPU cores required by a container. Resources are scheduled for the container based on this value. The container can be scheduled to this node only when the total available CPU on the node is greater than or equal to the number of containerized CPU applications.

        +

        CPU limit

        +

        Maximum number of CPU cores available for a container.

        +
        +
        +

        Recommended configuration

        +

        Actual available CPU of a node ≥ Sum of CPU limits of all containers on the current node ≥ Sum of CPU requests of all containers on the current node. You can view the actual available CPUs of a node on the CCE console (Resource Management > Nodes > Allocatable).

        +
      +
      • Memory quotas: +
        + + + + + + + + + + +
        Table 2 Description of memory quotas

        Parameter

        +

        Description

        +

        Memory request

        +

        Minimum amount of memory required by a container. Resources are scheduled for the container based on this value. The container can be scheduled to this node only when the total available memory on the node is greater than or equal to the number of containerized memory applications.

        +

        Memory Limit

        +

        Maximum amount of memory available for a container. When the memory usage exceeds the configured memory limit, the instance may be restarted, which affects the normal use of the workload.

        +
        +
        +

        Recommended configuration

        +

        Actual available memory of a node ≥ Sum of memory limits of all containers on the current node ≥ Sum of memory requests of all containers on the current node. You can view the actual available memory of a node on the CCE console (Resource Management > Nodes > Allocatable).

        +
      +

      The allocatable resources are calculated based on the resource request value (Request), which indicates the upper limit of resources that can be requested by pods on this node, but does not indicate the actual available resources of the node. The calculation formula is as follows:

      +
      • Allocatable CPU = Total CPU – Requested CPU of all pods – Reserved CPU for other resources
      • Allocatable memory = Total memory – Requested memory of all pods – Reserved memory for other resources
      +
      +
      +

      Example

      Assume that a cluster contains a node with 4 cores and 8 GB. A workload containing two pods has been deployed on the cluster. The resources of the two pods (pods 1 and 2) are as follows: {CPU request, CPU limit, memory request, memory limit} = {1 core, 2 cores, 2 GB, 2 GB}.

      +

      The CPU and memory usage of the node is as follows:

      +
      • Allocatable CPU = 4 cores - (1 core requested by pod 1 + 1 core requested by pod 2) = 2 cores
      • Allocatable memory = 8 GB - (2 GB requested by pod 1 + 2 GB requested by pod 2) = 4 GB
      +

      Therefore, the remaining 2 cores and 4 GB can be used by the next new pod.

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0164.html b/docs/cce/umn/cce_10_0164.html new file mode 100644 index 00000000..de65027f --- /dev/null +++ b/docs/cce/umn/cce_10_0164.html @@ -0,0 +1,23 @@ + + +

      Permissions Management

      +
      + + diff --git a/docs/cce/umn/cce_10_0175.html b/docs/cce/umn/cce_10_0175.html new file mode 100644 index 00000000..08b1c73c --- /dev/null +++ b/docs/cce/umn/cce_10_0175.html @@ -0,0 +1,17 @@ + + +

      Obtaining a Cluster Certificate

      +

      Scenario

      This section describes how to obtain the cluster certificate from the console and use it to access Kubernetes clusters.

      +
      +

      Procedure

      1. Log in to the CCE console and access the cluster console.
      2. Choose Cluster Information from the navigation pane and click Download next to Authentication Mode in the Connection Information area.
      3. In the Download X.509 Certificate dialog box displayed, select the certificate expiration time and download the X.509 certificate of the cluster as prompted.

        Figure 1 Downloading a certificate
        +
        • The downloaded certificate contains three files: client.key, client.crt, and ca.crt. Keep these files secure.
        • Certificates are not required for mutual access between containers in a cluster.
        +
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0178.html b/docs/cce/umn/cce_10_0178.html new file mode 100644 index 00000000..b3fda086 --- /dev/null +++ b/docs/cce/umn/cce_10_0178.html @@ -0,0 +1,218 @@ + + +

      Formula for Calculating the Reserved Resources of a Node

      +

      Some of the resources on the node need to run some necessary Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total number of node resources and the number of assignable node resources in Kubernetes are different. The larger the node specifications, the more the containers deployed on the node. Therefore, Kubernetes needs to reserve more resources.

      +

      To ensure node stability, a certain amount of CCE node resources will be reserved for Kubernetes components (such as kubelet, kube-proxy, and docker) based on the node specifications.

      +

      CCE calculates the resources that can be allocated to user nodes as follows:

      +

      Allocatable resources = Total amount - Reserved amount - Eviction threshold

      +

      The memory eviction threshold is fixed at 100 MB.

      +

      When the memory consumed by all pods on a node increases, the following behaviors may occur:

      +
      1. If the memory is greater than or equal to the allocatable amount on the node, kubelet is triggered to evict pods.
      2. When the memory approaches the allocatable amount and eviction threshold (total minus reserved), OS OOM is triggered.
      +

      Rules for Reserving Node Memory

      You can use the following formula calculate how much memory you should reserve for running containers on a node:

      +

      Total reserved amount = Reserved memory for system components + Reserved memory for kubelet to manage pods

      + +
      + + + + + + + + + + + + + + + + +
      Table 1 Reservation rules for system components

      Total Memory (TM)

      +

      Reserved Memory for System Components

      +

      TM ≤ 8 GB

      +

      0 MB

      +

      8 GB < TM ≤ 16 GB

      +

      [(TM – 8 GB) x 1024 x 10%] MB

      +

      16 GB < TM ≤ 128 GB

      +

      [8 GB x 1024 x 10% + (TM – 16 GB) x 1024 x 6%] MB

      +

      TM > 128 GB

      +

      (8 GB x 1024 x 10% + 112 GB x 1024 x 6% + (TM – 128 GB) x 1024 x 2%) MB

      +
      +
      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 2 Reservation rules for kubelet

      Total Memory (TM)

      +

      Number of Pods

      +

      Reserved Memory for kubelet

      +

      TM ≤ 2 GB

      +

      -

      +

      TM x 25%

      +

      TM > 2 GB

      +

      0 < Max. pods on a node ≤ 16

      +

      700 MB

      +

      16 < Max. pods on a node ≤ 32

      +

      [700 + (Max. pods on a node – 16) x 18.75] MB

      +

      32 < Max. pods on a node ≤ 64

      +

      [1024 + (Max. pods on a node – 32) x 6.25] MB

      +

      64 < Max. pods on a node ≤ 128

      +

      [1230 + (Max. pods on a node – 64) x 7.80] MB

      +

      Max. pods on a node > 128

      +

      [1740 + (Max. pods on a node – 128) x 11.20] MB

      +
      +
      +

      For a small-capacity node, adjust the maximum number of instances based on the site requirements. Alternatively, when creating a node on the CCE console, you can adjust the maximum number of instances for the node based on the node specifications.

      +
      +
      +

      Rules for Reserving Node Memory (v2)

      For clusters of v1.21.4-r0, v1.23.3-r0, or later, the node memory reservation model is optimized to V2 and can be dynamically adjusted using the node pool parameters kube-reserved-mem and system-reserved-mem. For details, see Managing a Node Pool.

      +

      The total reserved node memory of the V2 model is equal to the sum of that reserved for the OS and that reserved for CCE to manage pods.

      +

      Reserved memory includes basic and floating parts. For the OS, the floating memory depends on the node specifications. For CCE, the floating memory depends on the number of pods on a node.

      + +
      + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 3 Rules for reserving node memory (v2)

      Reserved for

      +

      Basic/Floating

      +

      Reservation

      +

      Used by

      +

      OS

      +

      Basic

      +

      400 MB (fixed)

      +

      OS service components such as sshd and systemd-journald.

      +

      Floating (depending on the node memory)

      +

      25MB/GB

      +

      Kernel

      +

      CCE

      +

      Basic

      +

      500 MB (fixed)

      +

      Container engine components, such as kubelet and kube-proxy, when the node is unloaded

      +

      Floating (depending on the number of pods on the node)

      +

      Docker: 20 MB/pod

      +

      containerd: 5 MB/pod

      +

      Container engine components when the number of pods increases

      +
      NOTE:

      When the v2 model reserves memory for a node by default, the default maximum number of pods is estimated based on the memory. For details, see Default Maximum Number of Pods on a Node.

      +
      +
      +
      +
      +

      Rules for Reserving Node CPU

      +
      + + + + + + + + + + + + + + + + +
      Table 4 Node CPU reservation rules

      Total CPU Cores (Total)

      +

      Reserved CPU Cores

      +

      Total ≤ 1 core

      +

      Total x 6%

      +

      1 core < Total ≤ 2 cores

      +

      1 core x 6% + (Total – 1 core) x 1%

      +

      2 cores < Total ≤ 4 cores

      +

      1 core x 6% + 1 core x 1% + (Total – 2 cores) x 0.5%

      +

      Total > 4 cores

      +

      1 core x 6% + 1 core x 1% + 2 cores x 0.5% + (Total – 4 cores) x 0.25%

      +
      +
      +
      +

      Default Maximum Number of Pods on a Node

      +
      + + + + + + + + + + + + + + + + + + + +
      Table 5 Default maximum number of pods on a node

      Memory

      +

      Default Maximum Number of Pods

      +

      4 GB

      +

      20

      +

      8 GB

      +

      40

      +

      16 GB

      +

      60

      +

      32 GB

      +

      80

      +

      64 GB or above

      +

      110

      +
      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0180.html b/docs/cce/umn/cce_10_0180.html new file mode 100644 index 00000000..4714e058 --- /dev/null +++ b/docs/cce/umn/cce_10_0180.html @@ -0,0 +1,25 @@ + + +

      Node Overview

      +
      + + diff --git a/docs/cce/umn/cce_10_0182.html b/docs/cce/umn/cce_10_0182.html new file mode 100644 index 00000000..a1e81f00 --- /dev/null +++ b/docs/cce/umn/cce_10_0182.html @@ -0,0 +1,94 @@ + + +

      Monitoring Overview

      +

      CCE works with AOM to comprehensively monitor clusters. When a node is created, the ICAgent (the DaemonSet named icagent in the kube-system namespace of the cluster) of AOM is installed by default. The ICAgent collects monitoring data of underlying resources and workloads running on the cluster. It also collects monitoring data of custom metrics of the workload.

      +
      • Resource metrics

        Basic resource monitoring includes CPU, memory, and disk monitoring. For details, see Resource Metrics. You can view these metrics of clusters, nodes, and workloads on the CCE or AOM console.

        +
      • Custom metrics

        The ICAgent collects custom metrics of applications and uploads them to AOM. For details, see Custom Monitoring.

        +
      +

      Resource Metrics

      On the CCE console, you can view the following metrics.

      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 1 Resource metrics

      Metric

      +

      Description

      +

      CPU Allocation Rate

      +

      Indicates the percentage of CPUs allocated to workloads.

      +

      Memory Allocation Rate

      +

      Indicates the percentage of memory allocated to workloads.

      +

      CPU Usage

      +

      Indicates the CPU usage.

      +

      Memory Usage

      +

      Indicates the memory usage.

      +

      Disk Usage

      +

      Indicates the disk usage.

      +

      Down

      +

      Indicates the speed at which data is downloaded to a node. The unit is KB/s.

      +

      Up

      +

      Indicates the speed at which data is uploaded from a node. The unit is KB/s.

      +

      Disk Read Rate

      +

      Indicates the data volume read from a disk per second. The unit is KB/s.

      +

      Disk Write Rate

      +

      Indicates the data volume written to a disk per second. The unit is KB/s.

      +
      +
      +

      On the AOM console, you can view host metrics and container metrics.

      +
      +

      Viewing Cluster Monitoring Data

      Click the cluster name and access the cluster console. In the navigation pane, choose Cluster Information. In the right pane, you can view the CPU and memory usage of all nodes (excluding master nodes) in the cluster in the last hour.

      +

      +

      Explanation of monitoring metrics:

      +
      • CPU allocation rate = Sum of CPU quotas requested by pods in the cluster/Sum of CPU quotas that can be allocated of all nodes (excluding master nodes) in the cluster
      • Memory allocation rate = Sum of memory quotas requested by pods in the cluster/Sum of memory quotas that can be allocated of all nodes (excluding master nodes) in the cluster
      • CPU usage: Average CPU usage of all nodes (excluding master nodes) in a cluster
      • Memory usage: Average memory usage of all nodes (excluding master nodes) in a cluster
      +

      Allocatable node resources (CPU or memory) = Total amount – Reserved amount – Eviction thresholds. For details, see Formula for Calculating the Reserved Resources of a Node.

      +
      +
      +

      CCE provides the status, availability zone (AZ), CPU usage, and memory usage of master nodes.

      +

      Viewing Monitoring Data of Worker Nodes

      In addition to viewing monitoring data of all nodes, you can also view monitoring data of a single node. Click the cluster name and access the cluster console. Choose Nodes in the navigation pane and click Monitor in the Operation column of the target node.

      +

      Monitoring data comes from AOM. You can view the monitoring data of a node, including the CPU, memory, disk, network, and GPU.

      +

      +
      +

      Viewing Workload Monitoring Data

      You can view monitoring data of a workload on the Monitoring tab page of the workload details page. Click the cluster name and access the cluster console. Choose Workloads in the navigation pane and click Monitor in the Operation column of the target workload.

      +

      Monitoring data comes from AOM. You can view the monitoring data of a workload, including the CPU, memory, network, and GPU, on the AOM console.

      +
      Explanation of monitoring metrics:
      • Workload CPU usage = Maximum CPU usage in each pod of the workload
      • Workload memory usage = Maximum memory usage in each pod of the workload
      +
      +

      You can also click View More to go to the AOM console and view monitoring data of the workload.

      +
      +

      Viewing Pod Monitoring Data

      You can view monitoring data of a pod on the Pods tab page of the workload details page.

      +
      Explanation of monitoring metrics:
      • Pod CPU usage = The used CPU cores/The sum of all CPU limits of the pods (If not specified, all node CPU cores are used.)
      • Pod memory usage = The used physical memory/The sum of all memory limits of pods (If not specified, all node memory is used.)
      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0183.html b/docs/cce/umn/cce_10_0183.html new file mode 100644 index 00000000..e0875304 --- /dev/null +++ b/docs/cce/umn/cce_10_0183.html @@ -0,0 +1,33 @@ + + +

      Nodes

      +
      + + diff --git a/docs/cce/umn/cce_10_0184.html b/docs/cce/umn/cce_10_0184.html new file mode 100644 index 00000000..03f8377b --- /dev/null +++ b/docs/cce/umn/cce_10_0184.html @@ -0,0 +1,20 @@ + + +

      Synchronizing Data with Cloud Servers

      +

      Scenario

      Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required.

      +

      Some information about CCE nodes is maintained independently from the ECS console. After you change the name, EIP, or specifications of an ECS on the ECS console, you need to synchronize the ECS information to the corresponding node on the CCE console. After the synchronization, information on both consoles is consistent.

      +
      +

      Notes and Constraints

      • Data, including the VM status, ECS names, number of CPUs, size of memory, ECS specifications, and public IP addresses, can be synchronized.

        If an ECS name is specified as the Kubernetes node name, the change of the ECS name cannot be synchronized to the CCE console.

        +
      • Data, such as the OS and image ID, cannot be synchronized. (Such parameters cannot be modified on the ECS console.)
      +
      +

      Procedure

      1. Log in to the CCE console.
      2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane.
      3. Choose More > Sync Server Data next to the node.

        Figure 1 Synchronizing server data
        +

        After the synchronization is complete, the ECS data synchronization requested message is displayed in the upper right corner.

        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0185.html b/docs/cce/umn/cce_10_0185.html new file mode 100644 index 00000000..8eaf698f --- /dev/null +++ b/docs/cce/umn/cce_10_0185.html @@ -0,0 +1,55 @@ + + +

      Logging In to a Node

      +

      Notes and Constraints

      • If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).
      • Only login to a running ECS is allowed.
      • Only the user linux can log in to a Linux server.
      +
      +

      Login Modes

      You can log in to an ECS in either of the following modes:

      +
      • Management console (VNC)

        If an ECS has no EIP, log in to the ECS console and click Remote Login in the same row as the ECS.

        +

        For details, see Login Using VNC.

        +
      • SSH

        This mode applies only to ECSs running Linux. Usually, you can use a remote login tool, such as PuTTY, Xshell, and SecureCRT, to log in to your ECS. If none of the remote login tools can be used, log in to the ECS console and click Remote Login in the same row as the ECS to view the connection status and running status of the ECS.

        +
        • When you use the Windows OS to log in to a Linux node, set Auto-login username to linux.
        • The CCE console does not support node OS upgrade. Do not upgrade the node OS using the yum update command. Otherwise, the container networking components will be unavailable.
        +
        +
      +
      + +
      + + + + + + + + + + + + + + + + + +
      Table 1 Linux ECS login modes

      EIP Binding

      +

      On-Premises OS

      +

      Connection Method

      +

      Yes

      +

      Windows

      +

      Use a remote login tool, such as PuTTY or XShell.

      + +

      Yes

      +

      Linux

      +

      Run commands.

      + +

      Yes/No

      +

      Windows/Linux

      +

      Remote login using the management console: Login Using VNC

      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0186.html b/docs/cce/umn/cce_10_0186.html new file mode 100644 index 00000000..d7eb7267 --- /dev/null +++ b/docs/cce/umn/cce_10_0186.html @@ -0,0 +1,22 @@ + + +

      Deleting a Node

      +

      Scenario

      When a node in a CCE cluster is deleted, services running on the node will also be deleted. Exercise caution when performing this operation.

      +
      +

      Notes and Constraints

      • After a CCE cluster is deleted, the ECS nodes in the cluster are also deleted.
      • For clusters of v1.17.11 or later, after a VM is deleted on the ECS console, the corresponding node in the CCE cluster is automatically deleted.

        +
        +
      +
      +

      Precautions

      • Deleting a node will lead to pod migration, which may affect services. Perform this operation during off-peak hours.
      • Unexpected risks may occur during the operation. Back up related data in advance.
      • During the operation, the backend will set the node to the unschedulable state.
      • Only worker nodes can be deleted.
      +
      +

      Procedure

      1. Log in to the CCE console and click the cluster name to access the cluster.
      2. In the navigation pane, choose Nodes. In the same row as the node you will delete, choose More > Delete.
      3. In the Delete Node dialog box, click Yes.

        • After the node is deleted, pods on it are automatically migrated to other available nodes.
        • If the disks and EIPs bound to the node are important resources, unbind them first. Otherwise, they will be deleted with the node.
        +
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0187.html b/docs/cce/umn/cce_10_0187.html new file mode 100644 index 00000000..7393d70a --- /dev/null +++ b/docs/cce/umn/cce_10_0187.html @@ -0,0 +1,57 @@ + + +

      Permissions Overview

      +

      CCE permissions management allows you to assign permissions to IAM users and user groups under your tenant accounts. CCE combines the advantages of Identity and Access Management (IAM) and Kubernetes Role-based Access Control (RBAC) authorization to provide a variety of authorization methods, including IAM fine-grained authorization, IAM token authorization, cluster-scoped authorization, and namespace-wide authorization.

      +

      CCE allows you to manage permissions on clusters and related resources at a finer granularity, for example, to control the access of employees in different departments to cloud resources.

      +

      This section describes the CCE permissions management mechanism and related concepts. If your account has met your service requirements, you can skip the configurations in this chapter.

      +

      CCE Permissions Management

      CCE permissions are described as follows:
      • Cluster-level permissions: Cluster-level permissions management evolves out of the system policy authorization feature of IAM. IAM users in the same user group have the same permissions. On IAM, you can configure system policies to describe which IAM user groups can perform which operations on cluster resources. For example, you can grant user group A to create and delete cluster X, add a node, or install an add-on, while granting user group B to view information about cluster X.

        Cluster-level permissions involve CCE non-Kubernetes APIs and support fine-grained IAM policies.

        +
      • Namespace-level permissions: You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles. CCE has also been enhanced based on open-source capabilities. It supports RBAC authorization based on IAM user or user group, and RBAC authentication on access to APIs using IAM tokens.

        Namespace-level permissions involve CCE Kubernetes APIs and are enhanced based on the Kubernetes RBAC capabilities. Namespace-level permissions can be granted to IAM users or user groups for authentication and authorization, but are independent of fine-grained IAM policies.

        +

        Starting from version 1.11.7-r2, CCE clusters allow you to configure namespace permissions. Clusters earlier than v1.11.7-r2 have all namespace permissions by default.

        +
      +
      +

      In general, you configure CCE permissions in two scenarios. The first is creating and managing clusters and related resources, such as nodes. The second is creating and using Kubernetes resources in the cluster, such as workloads and Services.

      +
      Figure 1 Illustration on CCE permissions
      +

      These permissions allow you to manage resource users at a finer granularity.

      +
      +

      Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based)

      Users with different cluster permissions (assigned using IAM) have different namespace permissions (assigned using Kubernetes RBAC). Table 1 lists the namespace permissions of different users.

      + +
      + + + + + + + + + + + + + + + + +
      Table 1 Differences in namespace permissions

      User

      +

      Clusters of v1.13 and Later

      +

      User with the Tenant Administrator permissions

      +

      All namespace permissions

      +

      IAM user with the CCE Administrator role

      +

      All namespace permissions

      +

      IAM user with the CCE FullAccess or CCE ReadOnlyAccess role

      +

      Requires Kubernetes RBAC authorization.

      +

      IAM user with the Tenant Guest role

      +

      Requires Kubernetes RBAC authorization.

      +
      +
      +
      +

      kubectl Permissions

      You can use kubectl to access Kubernetes resources in a cluster.

      +

      When you access a cluster using kubectl, CCE uses the kubeconfig.json file generated on the cluster for authentication. This file contains user information, based on which CCE determines which Kubernetes resources can be accessed by kubectl. The permissions recorded in a kubeconfig.json file vary from user to user. The permissions that a user has are listed in Table 1.

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0188.html b/docs/cce/umn/cce_10_0188.html new file mode 100644 index 00000000..72c4cb06 --- /dev/null +++ b/docs/cce/umn/cce_10_0188.html @@ -0,0 +1,96 @@ + + +

      Cluster Permissions (IAM-based)

      +

      CCE cluster-level permissions are assigned based on IAM system policies and custom policies. You can use user groups to assign permissions to IAM users.

      +

      Cluster permissions are configured only for cluster-related resources (such as clusters and nodes). You must also configure namespace permissions to operate Kubernetes resources (such as workloads and Services).

      +
      +

      Prerequisites

      • A user with the Security Administrator role (for example, your account) has all IAM permissions except role switching. Only these users can view user groups and their permissions on the Permissions page on the CCE console.
      +
      +

      Configuration

      On the CCE console, when you choose Permissions > Cluster-Level Permissions to create a user group, you will be directed to the IAM console to complete the process. After the user group is created and its permissions are configured, you can view the information on the Cluster-Level Permissions tab page. This section describes the operations in IAM.

      +
      +

      Process Flow

      Figure 1 Process of assigning CCE permissions
      +

      +
      1. Create a user group and assign permissions to it.

        Create a user group on the IAM console, and assign CCE permissions, for example, the CCEReadOnlyAccess policy to the group.

        +

        CCE is deployed by region. On the IAM console, select Region-specific projects when assigning CCE permissions.

        +
        +
      2. Create a user and add it to a user group.

        Create a user on the IAM console and add the user to the group created in 1.

        +
      3. Log in and verify permissions.

        Log in to the management console as the user you created, and verify that the user has the assigned permissions.

        +
        • Log in to the management console, switch to the CCE console, and buy a cluster. If you fail to do so (assuming that only the CCEReadOnlyAccess permission is assigned), the permission control policy takes effect.
        • Switch to the console of any other service. If a message appears indicating that you do not have the required permissions to access the service, the CCEReadOnlyAccess policy takes effect.
        +
      +
      +

      System-defined Roles

      Roles are a type of coarse-grained authorization mechanism that defines service-level permissions based on user responsibilities. Only a limited number of service-level roles are available for authorization. Roles are not ideal for fine-grained authorization and least privilege access.

      +

      The preset system role for CCE in IAM is CCEAdministrator. When assigning this role to a user group, you must also select other roles and policies on which this role depends, such as Tenant Guest, Server Administrator, ELB Administrator, OBS Administrator, SFS Administrator, SWR Admin, and APM FullAccess.

      +
      +

      System-defined Policies

      The system policies preset for CCE in IAM are CCEFullAccess and CCEReadOnlyAccess.

      +
      • CCE FullAccess: common operation permissions on CCE cluster resources, excluding the namespace-level permissions for the clusters (with Kubernetes RBAC enabled) and the privileged administrator operations, such as agency configuration and cluster certificate generation
      • CCE ReadOnlyAccess: permissions to view CCE cluster resources, excluding the namespace-level permissions of the clusters (with Kubernetes RBAC enabled)
      +

      The CCE Admin and CCE Viewer roles will be discarded soon. You are advised to use CCE FullAccess and CCE ReadOnlyAccess.

      +
      +
      +

      Custom Policies

      Custom policies can be created as a supplement to the system-defined policies of CCE.

      +

      You can create custom policies in either of the following ways:

      +
      • Visual editor: Select cloud services, actions, resources, and request conditions. This does not require knowledge of policy syntax.
      • JSON: Edit JSON policies from scratch or based on an existing policy.
      +

      This section provides examples of common custom CCE policies.

      +
      +

      Example Custom Policies:

      +
      • Example 1: Creating a cluster named test
        {
        +    "Version": "1.1",
        +    "Statement": [
        +        {
        +            "Effect": "Allow",
        +            "Action": [
        +                "cce:cluster:create"
        +            ]
        +        }
        +    ]
        +}
        +
      • Example 2: Denying node deletion

        A policy with only "Deny" permissions must be used in conjunction with other policies to take effect. If the permissions assigned to a user contain both "Allow" and "Deny", the "Deny" permissions take precedence over the "Allow" permissions.

        +

        The following method can be used if you need to assign permissions of the CCEFullAccess policy to a user but you want to prevent the user from deleting nodes (cce:node:delete). Create a custom policy for denying node deletion, and attach both policies to the group to which the user belongs. Then, the user can perform all operations on CCE except deleting nodes. The following is an example of a deny policy:

        +
        {
        +    "Version": "1.1",
        +    "Statement": [
        +        {
        +            "Effect": "Deny",
        +            "Action": [
        +                "cce:node:delete"
        +            ]
        +        }
        +    ]
        +}
        +
      • Example 3: Defining permissions for multiple services in a policy

        A custom policy can contain the actions of multiple services that are of the global or project-level type. The following is an example policy containing actions of multiple services:

        +
        {
        +    "Version": "1.1",
        +    "Statement": [
        +        {
        +            "Action": [
        +                "ecs:cloudServers:resize",
        +                "ecs:cloudServers:delete",
        +                "ecs:cloudServers:delete",
        +                "ims:images:list",
        +                "ims:serverImages:create"
        +            ],
        +            "Effect": "Allow"
        +        }
        +    ]
        +}
        +
      +

      CCE Cluster Permissions and IAM RBAC

      CCE is compatible with IAM system roles for permissions management. You are advised to use fine-grained policies provided by IAM to simplify permissions management.

      +

      CCE supports the following roles:

      +
      • Basic IAM roles:
        • te_admin (Tenant Administrator): Users with this role can call all APIs of all services except IAM.
        • readonly (Tenant Guest): Users with this role can call APIs with the read-only permissions of all services except IAM.
        +
      • Custom CCE administrator role: CCE Administrator
      +
      • Tenant Administrator and Tenant Guest are special IAM system roles. After any system or custom policy is configured, Tenant Administrator and Tenant Guest take effect as system policies to achieve compatibility with IAM RBAC and ABAC scenarios.
      • If a user has the Tenant Administrator or CCE Administrator system role, the user has the cluster-admin permissions in Kubernetes RBAC and the permissions cannot be removed after the cluster is created.
        If the user is the cluster creator, the cluster-admin permissions in Kubernetes RBAC are granted to the user by default. The permissions can be manually removed after the cluster is created.
        • Method 1: Choose Permissions Management > Namespace-Level Permissions > Delete at the same role as cluster-creator on the CCE console.
        • Method 2: Delete ClusterRoleBinding: cluster-creator through the API or kubectl.
        +
        +
      +
      +

      When RBAC and IAM policies co-exist, the backend authentication logic for open APIs or console operations on CCE is as follows:

      +

      +

      Certain CCE APIs involve namespace-level permissions or key operations and therefore, they require special permissions:

      +

      Using clusterCert to obtain the cluster kubeconfig: cceadm/teadmin

      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0189.html b/docs/cce/umn/cce_10_0189.html new file mode 100644 index 00000000..47eca800 --- /dev/null +++ b/docs/cce/umn/cce_10_0189.html @@ -0,0 +1,221 @@ + + +

      Namespace Permissions (Kubernetes RBAC-based)

      +

      Namespace Permissions (Kubernetes RBAC-based)

      You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles. The RBAC API declares four kinds of Kubernetes objects: Role, ClusterRole, RoleBinding, and ClusterRoleBinding, which are described as follows:

      +
      • Role: defines a set of rules for accessing Kubernetes resources in a namespace.
      • RoleBinding: defines the relationship between users and roles.
      • ClusterRole: defines a set of rules for accessing Kubernetes resources in a cluster (including all namespaces).
      • ClusterRoleBinding: defines the relationship between users and cluster roles.
      +

      Role and ClusterRole specify actions that can be performed on specific resources. RoleBinding and ClusterRoleBinding bind roles to specific users, user groups, or ServiceAccounts. Illustration:

      +
      Figure 1 Role binding
      +

      On the CCE console, you can assign permissions to a user or user group to access resources in one or multiple namespaces. By default, the CCE console provides the following ClusterRoles:

      +
      • view (read-only): read-only permission on most resources in all or selected namespaces.
      • edit (development): read and write permissions on most resources in all or selected namespaces. If this ClusterRole is configured for all namespaces, its capability is the same as the O&M permission.
      • admin (O&M): read and write permissions on most resources in all namespaces, and read-only permission on nodes, storage volumes, namespaces, and quota management.
      • cluster-admin (administrator): read and write permissions on all resources in all namespaces.
      +
      +

      Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based)

      Users with different cluster permissions (assigned using IAM) have different namespace permissions (assigned using Kubernetes RBAC). Table 1 lists the namespace permissions of different users.

      + +
      + + + + + + + + + + + + + + + + +
      Table 1 Differences in namespace permissions

      User

      +

      Clusters of v1.13 and Later

      +

      User with the Tenant Administrator permissions

      +

      All namespace permissions

      +

      IAM user with the CCE Administrator role

      +

      All namespace permissions

      +

      IAM user with the CCE FullAccess or CCE ReadOnlyAccess role

      +

      Requires Kubernetes RBAC authorization.

      +

      IAM user with the Tenant Guest role

      +

      Requires Kubernetes RBAC authorization.

      +
      +
      +
      +

      Precautions

      • Kubernetes RBAC authorization can be used for clusters of v1.11.7-r2 and later. Ensure that you have deployed a supported cluster version. For details about upgrading a cluster, see Performing Replace/Rolling Upgrade.
      • After you create a cluster of v1.11.7-r2 or later, CCE automatically assigns the cluster-admin permission to you, which means you have full control on all resources in all namespaces in the cluster. The ID of a federated user changes upon each login and logout. Therefore, the user with the permissions is displayed as deleted. In this case, do not delete the permissions. Otherwise, the authentication fails. You are advised to grant the cluster-admin permission to a user group on CCE and add federated users to the user group.
      • A user with the Security Administrator role has all IAM permissions except role switching. For example, an account in the admin user group has this role by default. Only these users can assign permissions on the Permissions page on the CCE console.
      +
      +

      Configuring Namespace Permissions (on the Console)

      You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles.

      +
      1. Log in to the CCE console. In the navigation pane, choose Permissions.
      2. Select a cluster for which you want to add permissions from the drop-down list on the right.
      3. Click Add Permissions in the upper right corner.
      4. Confirm the cluster name and select the namespace to assign permissions for. For example, select All namespaces, the target user or user group, and select the permissions.

        If you do not have IAM permissions, you cannot select users or user groups when configuring permissions for other users or user groups. In this case, you can enter a user ID or user group ID.

        +
        +

        Permissions can be customized as required. After selecting Custom for Permission Type, click Add Custom Role on the right of the Custom parameter. In the dialog box displayed, enter a name and select a rule. After the custom rule is created, you can select a value from the Custom drop-down list box.

        +

      5. Click OK.
      +
      +

      Using kubectl to Configure Namespace Permissions

      When you access a cluster using kubectl, CCE uses the kubeconfig.json file generated on the cluster for authentication. This file contains user information, based on which CCE determines which Kubernetes resources can be accessed by kubectl. The permissions recorded in a kubeconfig.json file vary from user to user. The permissions that a user has are listed in Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based).

      +
      +

      In addition to cluster-admin, admin, edit, and view, you can define Roles and RoleBindings to configure the permissions to add, delete, modify, and query resources, such as pods, Deployments, and Services, in the namespace.

      +

      The procedure for creating a Role is very simple. To be specific, specify a namespace and then define rules. The rules in the following example are to allow GET and LIST operations on pods in the default namespace.

      +
      kind: Role
      +apiVersion: rbac.authorization.k8s.io/v1
      +metadata:
      +  namespace: default                          # Namespace
      +  name: role-example
      +rules:
      +- apiGroups: [""]
      +  resources: ["pods"]                         # The pod can be accessed.
      +  verbs: ["get", "list"]                      # The GET and LIST operations can be performed.
      +
      • apiGroups indicates the API group to which the resource belongs.
      • resources indicates the resources that can be operated. Pods, Deployments, ConfigMaps, and other Kubernetes resources are supported.
      • verbs indicates the operations that can be performed. get indicates querying a specific object, and list indicates listing all objects of a certain type. Other value options include create, update, and delete.
      +

      For details, see Using RBAC Authorization.

      +

      After creating a Role, you can bind the Role to a specific user, which is called RoleBinding. The following is an example.

      +
      kind: RoleBinding
      +apiVersion: rbac.authorization.k8s.io/v1
      +metadata:
      +  name: RoleBinding-example
      +  namespace: default
      +  annotations:
      +    CCE.com/IAM: 'true'
      +roleRef:
      +  kind: Role
      +  name: role-example
      +  apiGroup: rbac.authorization.k8s.io
      +subjects:
      +- kind: User
      +  name: 0c97ac3cb280f4d91fa7c0096739e1f8 # User ID of the user-example
      +  apiGroup: rbac.authorization.k8s.io
      +

      The subjects section binds a Role with an IAM user so that the IAM user can obtain the permissions defined in the Role, as shown in the following figure.

      +
      Figure 2 A RoleBinding binds the Role to the user.
      +

      You can also specify a user group in the subjects section. In this case, all users in the user group obtain the permissions defined in the Role.

      +
      subjects:
      +- kind: Group
      +  name: 0c96fad22880f32a3f84c009862af6f7    # User group ID
      +  apiGroup: rbac.authorization.k8s.io
      +

      Use the IAM user user-example to connect to the cluster and obtain the pod information. The following is an example of the returned pod information.

      +
      # kubectl get pod
      +NAME                                   READY   STATUS    RESTARTS   AGE
      +deployment-389584-2-6f6bd4c574-2n9rk   1/1     Running   0          4d7h
      +deployment-389584-2-6f6bd4c574-7s5qw   1/1     Running   0          4d7h
      +deployment-3895841-746b97b455-86g77    1/1     Running   0          4d7h
      +deployment-3895841-746b97b455-twvpn    1/1     Running   0          4d7h
      +nginx-658dff48ff-7rkph                 1/1     Running   0          4d9h
      +nginx-658dff48ff-njdhj                 1/1     Running   0          4d9h
      +# kubectl get pod nginx-658dff48ff-7rkph
      +NAME                     READY   STATUS    RESTARTS   AGE
      +nginx-658dff48ff-7rkph   1/1     Running   0          4d9h
      +

      Try querying Deployments and Services in the namespace. The output shows user-example does not have the required permissions. Try querying the pods in namespace kube-system. The output shows user-example does not have the required permissions, neither. This indicates that the IAM user user-example has only the GET and LIST Pod permissions in the default namespace, which is the same as expected.

      +
      # kubectl get deploy
      +Error from server (Forbidden): deployments.apps is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "deployments" in API group "apps" in the namespace "default"
      +# kubectl get svc
      +Error from server (Forbidden): services is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "services" in API group "" in the namespace "default"
      +# kubectl get pod --namespace=kube-system
      +Error from server (Forbidden): pods is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "pods" in API group "" in the namespace "kube-system"
      +
      +

      Example: Assigning All Cluster Permissions (cluster-admin)

      You can use the cluster-admin role to assign all permissions on a cluster. This role contains the permissions for cluster resources (such as PVs and StorageClasses).

      +

      In the following example kubectl output, a ClusterRoleBinding has been created and binds the cluster-admin role to the user group cce-role-group.

      +
      # kubectl get clusterrolebinding
      +NAME                                                              ROLE                           AGE
      +clusterrole_cluster-admin_group0c96fad22880f32a3f84c009862af6f7   ClusterRole/cluster-admin      61s
      +
      +# kubectl get clusterrolebinding clusterrole_cluster-admin_group0c96fad22880f32a3f84c009862af6f7 -oyaml
      +apiVersion: rbac.authorization.k8s.io/v1
      +kind: ClusterRoleBinding
      +metadata:
      +  annotations:
      +    CCE.com/IAM: "true"
      +  creationTimestamp: "2021-06-23T09:15:22Z"
      +  name: clusterrole_cluster-admin_group0c96fad22880f32a3f84c009862af6f7
      +  resourceVersion: "36659058"
      +  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/clusterrole_cluster-admin_group0c96fad22880f32a3f84c009862af6f7
      +  uid: d6cd43e9-b4ca-4b56-bc52-e36346fc1320
      +roleRef:
      +  apiGroup: rbac.authorization.k8s.io
      +  kind: ClusterRole
      +  name: cluster-admin
      +subjects:
      +- apiGroup: rbac.authorization.k8s.io
      +  kind: Group
      +  name: 0c96fad22880f32a3f84c009862af6f7
      +

      Connect to the cluster as an authorized user. If the PVs and StorageClasses can be queried, the permission configuration takes effect.

      +
      # kubectl get pv
      +No resources found
      +# kubectl get sc
      +NAME                PROVISIONER                     RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
      +csi-disk            everest-csi-provisioner         Delete          Immediate              true                   75d
      +csi-disk-topology   everest-csi-provisioner         Delete          WaitForFirstConsumer   true                   75d
      +csi-nas             everest-csi-provisioner         Delete          Immediate              true                   75d
      +csi-obs             everest-csi-provisioner         Delete          Immediate              false                  75d
      +
      +

      Example: Assigning All Namespace Permissions (admin)

      admin has all permissions on namespaces. You can grant this role to a user or user group to manage one or all namespaces.

      +

      In the following example kubectl output, a RoleBinding has been created, the admin role is bound to the user group cce-role-group, and the target namespace is the default namespace.

      +
      # kubectl get rolebinding
      +NAME                                                      ROLE                AGE
      +clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7   ClusterRole/admin   18s
      +# kubectl get rolebinding clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7 -oyaml
      +apiVersion: rbac.authorization.k8s.io/v1
      +kind: RoleBinding
      +metadata:
      +  annotations:
      +    CCE.com/IAM: "true"
      +  creationTimestamp: "2021-06-24T01:30:08Z"
      +  name: clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7
      +  namespace: default
      +  resourceVersion: "36963685"
      +  selfLink: /apis/rbac.authorization.k8s.io/v1/namespaces/default/rolebindings/clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7
      +  uid: 6c6f46a6-8584-47da-83f5-9eef1f7b75d6
      +roleRef:
      +  apiGroup: rbac.authorization.k8s.io
      +  kind: ClusterRole
      +  name: admin
      +subjects:
      +- apiGroup: rbac.authorization.k8s.io
      +  kind: Group
      +  name: 0c96fad22880f32a3f84c009862af6f7
      +

      Connect to a cluster as an authorized user. In this example, you can create and query resources in the default namespace, but cannot query resources in the kube-system namespace or cluster resources.

      +
      # kubectl get pod
      +NAME                    READY   STATUS    RESTARTS   AGE
      +test-568d96f4f8-brdrp   1/1     Running   0          33m
      +test-568d96f4f8-cgjqp   1/1     Running   0          33m
      +# kubectl get pod -nkube-system
      +Error from server (Forbidden): pods is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "pods" in API group "" in the namespace "kube-system"
      +# kubectl get pv
      +Error from server (Forbidden): persistentvolumes is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "persistentvolumes" in API group "" at the cluster scope
      +
      +

      Example: Assigning Read-Only Namespace Permissions (view)

      The view role has the read-only permissions on a namespace. You can assign permissions to users to view one or multiple namespaces.

      +

      In the following example kubectl output, a RoleBinding has been created, the view role is bound to the user group cce-role-group, and the target namespace is the default namespace.

      +
      # kubectl get rolebinding
      +NAME                                                     ROLE               AGE
      +clusterrole_view_group0c96fad22880f32a3f84c009862af6f7   ClusterRole/view   7s
      +
      +# kubectl get rolebinding clusterrole_view_group0c96fad22880f32a3f84c009862af6f7 -oyaml
      +apiVersion: rbac.authorization.k8s.io/v1
      +kind: RoleBinding
      +metadata:
      +  annotations:
      +    CCE.com/IAM: "true"
      +  creationTimestamp: "2021-06-24T01:36:53Z"
      +  name: clusterrole_view_group0c96fad22880f32a3f84c009862af6f7
      +  namespace: default
      +  resourceVersion: "36965800"
      +  selfLink: /apis/rbac.authorization.k8s.io/v1/namespaces/default/rolebindings/clusterrole_view_group0c96fad22880f32a3f84c009862af6f7
      +  uid: b86e2507-e735-494c-be55-c41a0c4ef0dd
      +roleRef:
      +  apiGroup: rbac.authorization.k8s.io
      +  kind: ClusterRole
      +  name: view
      +subjects:
      +- apiGroup: rbac.authorization.k8s.io
      +  kind: Group
      +  name: 0c96fad22880f32a3f84c009862af6f7
      +

      Connect to the cluster as an authorized user. In this example, you can query resources in the default namespace but cannot create resources.

      +
      # kubectl get pod
      +NAME                    READY   STATUS    RESTARTS   AGE
      +test-568d96f4f8-brdrp   1/1     Running   0          40m
      +test-568d96f4f8-cgjqp   1/1     Running   0          40m
      +# kubectl run -i --tty --image tutum/dnsutils dnsutils --restart=Never --rm /bin/sh
      +Error from server (Forbidden): pods is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot create resource "pods" in API group "" in the namespace "default"
      +
      +

      Example: Assigning Permissions for a Specific Kubernetes Resource Object

      You can assign permissions on a specific Kubernetes resource object, such as pod, Deployment, and Service. For details, see Using kubectl to Configure Namespace Permissions.

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0190.html b/docs/cce/umn/cce_10_0190.html new file mode 100644 index 00000000..d73afefd --- /dev/null +++ b/docs/cce/umn/cce_10_0190.html @@ -0,0 +1,136 @@ + + +

      Permission Dependency of the CCE Console

      +

      Some CCE permissions policies depend on the policies of other cloud services. To view or use other cloud resources on the CCE console, you need to enable the system policy access control feature of IAM and assign dependency policies for the other cloud services.

      +
      • Dependency policies are assigned based on the CCE FullAccess or CCE ReadOnlyAccess policy you configure.
      • Only users and user groups with namespace permissions can gain the view access to resources in clusters of v1.11.7-r2 and later.
        • If a user is granted the view access to all namespaces of a cluster, the user can view all namespaced resources (except secrets) in the cluster. To view secrets in the cluster, the user must gain the admin or edit role in all namespaces of the cluster.
        • HPA policies take effect only after the cluster-admin permissions are configured for the namespace.
        • The view role within a single namespace allows users to view resources only in the specified namespace.
        +
      +

      Dependency Policy Configuration

      To grant an IAM user the permissions to view or use resources of other cloud services on the CCE console, you must first grant the CCE Administrator, CCE FullAccess, or CCE ReadOnlyAccess policy to the user group to which the user belongs and then grant the dependency policies listed in Table 1 to the user. These dependency policies will allow the IAM user to access resources of other cloud services.

      +

      CCE supports fine-grained permissions configuration, but has the following restrictions:

      +
      • AOM does not support resource-level monitoring. After operation permissions on specific resources are configured using IAM's fine-grained cluster resource management function, IAM users can view cluster monitoring information on the Dashboard page of the CCE console, but cannot view the data on non-fine-grained metrics.
      +
      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 1 Dependency policies

      Console Function

      +

      Dependent Services

      +

      Roles or Policies Required

      +

      Dashboard

      +

      Application Operations Management (AOM)

      +
      • An IAM user with CCE Administrator assigned can use this function only after AOM FullAccess policy is assigned.
      • IAM users with IAM ReadOnlyAccess, CCE FullAccess, or CCE ReadOnlyAccess assigned can directly use this function.
      +

      Workload management

      +

      Elastic Load Balance (ELB)

      +

      Application Performance Management (APM)

      +

      Application Operations Management (AOM)

      +

      NAT Gateway

      +

      Object Storage Service (OBS)

      +

      Scalable File Service (SFS)

      +

      Except in the following cases, the user does not require any additional role to create workloads.

      +
      • To create a Service using ELB, you must have ELB FullAccess or ELB Administrator plus VPC Administrator assigned.
      • To use a Java probe, you must have AOM FullAccess and APM FullAccess assigned.
      • To create a Service using NAT Gateway, you must have NAT Gateway Administrator assigned.
      • To use OBS, you must have OBS Administrator globally assigned.
        NOTE:

        Because of the cache, it takes about 13 minutes for the RBAC policy to take effect after being granted to users, enterprise projects, and user groups. After an OBS-related system policy is granted, it takes about 5 minutes for the policy to take effect.

        +
        +
      • To use SFS, you must have SFS FullAccess assigned.
      +

      Cluster management

      +

      Application Operations Management (AOM)

      +
      • Auto scale-out or scale-up requires the AOM FullAccess policy.
      +

      Node management

      +

      Elastic Cloud Server (ECS)

      +

      If the permission assigned to an IAM user is CCE Administrator, creating or deleting a node requires the ECS FullAccess or ECS Administrator policy and the VPC Administrator policy.

      +

      Network management

      +

      Elastic Load Balance (ELB)

      +

      NAT Gateway

      +

      Except in the following cases, the user does not require any additional role to create a Service.

      +
      • To create a Service using ELB, you must have ELB FullAccess or ELB Administrator plus VPC Administrator assigned.
      • To create a Service using NAT Gateway, you must have NAT Administrator assigned.
      +

      Storage management

      +

      Object Storage Service (OBS)

      +

      Scalable File Service (SFS)

      +
      • To use OBS, you must have OBS Administrator globally assigned.
        NOTE:

        Because of the cache, it takes about 13 minutes for the RBAC policy to take effect after being granted to users, enterprise projects, and user groups. After an OBS-related system policy is granted, it takes about 5 minutes for the policy to take effect.

        +
        +
      • To use SFS, you must have SFS FullAccess assigned.
      +

      The CCE Administrator role is required for importing storage devices.

      +

      Namespace management

      +

      /

      +

      /

      +

      Chart management

      +

      /

      +

      Cloud accounts and the IAM users with CCE Administrator assigned can use this function.

      +

      Add-on management

      +

      /

      +

      Cloud accounts and the IAM users with CCE Administrator, CCE FullAccess, or CCE ReadOnlyAccess assigned can use this function.

      +

      Permissions management

      +

      /

      +
      • For cloud accounts, no additional policy/role is required.
      • IAM users with CCE Administrator or global Security Administrator assigned can use this function.
      • IAM users with CCE FullAccess or CCE ReadOnlyAccess assigned can use this function.
      +

      Configuration center

      +

      /

      +
      • Creating ConfigMaps does not require any additional policy.
      • Viewing secrets requires that the cluster-admin, admin, or edit permission be configured for the namespace. The DEW KeypairFullAccess or DEW KeypairReadOnlyAccess policy must be assigned for dependent services.
      +

      Help center

      +

      /

      +

      /

      +

      Switching to other related services

      +

      Software Repository for Container (SWR)

      +

      Application Operations Management (AOM)

      +

      The CCE console provides links to other related services. To view or use these services, an IAM user must be assigned required permissions for the services.

      +
      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0191.html b/docs/cce/umn/cce_10_0191.html new file mode 100644 index 00000000..4c033d54 --- /dev/null +++ b/docs/cce/umn/cce_10_0191.html @@ -0,0 +1,19 @@ + + +

      Overview

      +

      CCE provides a console for managing Helm charts, helping you easily deploy applications using the charts and manage applications on the console.

      +

      Helm

      Helm is a package manager for Kubernetes and manages charts. A Helm chart is a series of YAML files used to encapsulate native Kubernetes applications. When deploying an application, you can customize some metadata of the application for easy application distribution. Application releasers can use Helm to package applications, manage application dependencies and application versions, and release applications to the software repository. After using Helm, users do not need to compile complex application deployment files. They can easily search for, install, upgrade, roll back, and uninstall applications on Kubernetes.

      +

      The relationship between Helm and Kubernetes is as follows:

      +
      • Helm <–> Kubernetes
      • Apt <–> Ubuntu
      • Yum <–> CentOS
      • Pip <–> Python
      +

      The following figure shows the solution architecture:

      +

      +

      Helm can help application orchestration for Kubernetes:

      +
      • Manages, edits, and updates a large number of Kubernetes configuration files.
      • Deploys a complex Kubernetes application that contains a large number of configuration files.
      • Shares and reuses Kubernetes configurations and applications.
      • Supports multiple environments with parameter-based configuration templates.
      • Manages the release of applications, including rolling back the application, finding differences (using the diff command), and viewing the release history.
      • Controls phases in a deployment cycle.
      • Tests and verifies the released version.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0193.html b/docs/cce/umn/cce_10_0193.html new file mode 100644 index 00000000..b375076e --- /dev/null +++ b/docs/cce/umn/cce_10_0193.html @@ -0,0 +1,628 @@ + + +

      volcano

      +

      Introduction

      Volcano is a batch processing platform based on Kubernetes. It provides a series of features required by machine learning, deep learning, bioinformatics, genomics, and other big data applications, as a powerful supplement to Kubernetes capabilities.

      +

      Volcano provides general-purpose, high-performance computing capabilities, such as job scheduling engine, heterogeneous chip management, and job running management, serving end users through computing frameworks for different industries, such as AI, big data, gene sequencing, and rendering. (Volcano has been open-sourced in GitHub.)

      +

      Volcano provides job scheduling, job management, and queue management for computing applications. Its main features are as follows:

      +
      • Diverse computing frameworks, such as TensorFlow, MPI, and Spark, can run on Kubernetes in containers. Common APIs for batch computing jobs through CRD, various plug-ins, and advanced job lifecycle management are provided.
      • Advanced scheduling capabilities are provided for batch computing and high-performance computing scenarios, including group scheduling, preemptive priority scheduling, packing, resource reservation, and task topology.
      • Queues can be effectively managed for scheduling jobs. Complex job scheduling capabilities such as queue priority and multi-level queues are supported.
      +

      Open source community: https://github.com/volcano-sh/volcano

      +
      +

      Installing the Add-on

      1. Log in to the CCE console, click the cluster name, and access the cluster console. Choose Add-ons in the navigation pane, locate volcano on the right, and click Install.
      2. Select Standalone, Custom, or HA for Add-on Specifications.

        If you select Custom, the recommended values of volcano-controller and volcano-scheduler are as follows:

        +
        • If the number of nodes is less than 100, retain the default configuration. That is, the CPU request value is 500m, and the limit value is 2000m. The memory request value is 500Mi, and the limit value is 2000Mi.
        • If the number of nodes is greater than 100, increase the CPU request value by 500m and the memory request value by 1000Mi each time 100 nodes (10000 pods) are added. You are advised to increase the CPU limit value by 1500m and the memory limit by 1000Mi. +
          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          Table 1 Recommended values for volcano-controller and volcano-scheduler

          Number of Node/Pod

          +

          CPU Request(m)

          +

          CPU Limit(m)

          +

          Memory Request(Mi)

          +

          Memory Limit(Mi)

          +

          50/5k

          +

          500

          +

          2000

          +

          500

          +

          2000

          +

          100/1w

          +

          1000

          +

          2500

          +

          1500

          +

          2500

          +

          200/2w

          +

          1500

          +

          3000

          +

          2500

          +

          3500

          +

          300/3w

          +

          2000

          +

          3500

          +

          3500

          +

          4500

          +

          400/4w

          +

          2500

          +

          4000

          +

          4500

          +

          5500

          +
          +
          +
        +

      3. Parameters of the volcano default scheduler. For details, see Table 2.

        ca_cert: ''
        +default_scheduler_conf:
        +  actions: 'allocate, backfill'
        +  tiers:
        +    - plugins:
        +        - name: 'priority'
        +        - name: 'gang'
        +        - name: 'conformance'
        +    - plugins:
        +        - name: 'drf'
        +        - name: 'predicates'
        +        - name: 'nodeorder'
        +    - plugins:
        +        - name: 'cce-gpu-topology-predicate'
        +        - name: 'cce-gpu-topology-priority'
        +        - name: 'cce-gpu'
        +    - plugins:
        +        - name: 'nodelocalvolume'
        +        - name: 'nodeemptydirvolume'
        +        - name: 'nodeCSIscheduling'
        +        - name: 'networkresource'
        +server_cert: ''
        +server_key: ''
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 2 Volcano Plugins

        Add-on

        +

        Function

        +

        Description

        +

        Demonstration

        +

        binpack

        +

        The binpack plugin schedules pods to nodes with high resource utilization to reduce resource fragments.

        +
        • binpack.weight: Weight of the binpack plugin.
        • binpack.cpu: Ratio of CPU resources to all resources. Defaults to 1.
        • binpack.memory: Ratio of memory resources to all resources. Defaults to 1.
        • binpack.resources:
        +
        - plugins:
        +  - name: binpack
        +    arguments:
        +      binpack.weight: 10
        +      binpack.cpu: 1
        +      binpack.memory: 1
        +      binpack.resources: nvidia.com/gpu, example.com/foo
        +      binpack.resources.nvidia.com/gpu: 2
        +      binpack.resources.example.com/foo: 3
        +

        conformance

        +

        The conformance plugin considers that the tasks in namespace kube-system have a higher priority. These tasks will not be preempted.

        +

        -

        +

        -

        +

        gang

        +

        The gang plugin considers a group of pods as a whole to allocate resources.

        +

        -

        +

        -

        +

        priority

        +

        The priority plugin schedules pods based on the custom workload priority.

        +

        -

        +

        -

        +

        overcommit

        +

        Resources in a cluster are scheduled after being accumulated in a certain multiple to improve the workload enqueuing efficiency. If all workloads are Deployments, remove this plugin or set the raising factor to 2.0.

        +

        overcommit-factor: Raising factor. Defaults to 1.2.

        +
        - plugins:
        +  - name: overcommit
        +    arguments:
        +      overcommit-factor: 2.0
        +

        drf

        +

        The DRF plugin schedules resources based on the container group Domaint Resource. The smallest Domaint Resource would be selected for priority scheduling.

        +

        -

        +

        -

        +

        predicates

        +

        Determines whether a task is bound to a node by using a series of evaluation algorithms, such as node/pod affinity, taint tolerance, node port repetition, volume limits, and volume zone matching.

        +

        -

        +

        -

        +

        nodeorder

        +

        The nodeorder plugin scores all nodes for a task by using a series of scoring algorithms.

        +
        • nodeaffinity.weight: Pods are scheduled based on the node affinity. Defaults to 1.
        • podaffinity.weight: Pods are scheduled based on the pod affinity. Defaults to 1.
        • leastrequested.weight: Pods are scheduled to the node with the least resources. Defaults to 1.
        • balancedresource.weight: Pods are scheduled to the node with balanced resource. Defaults to 1.
        • mostrequested.weight: Pods are scheduled to the node with the most requested resources. Defaults to 0.
        • tainttoleration.weight: Pods are scheduled to the node with a high taint tolerance. Defaults to 1.
        • imagelocality.weight: Pods are scheduled to the node where the required images exist. Defaults to 1.
        • selectorspread.weight: Pods are evenly scheduled to different nodes. Defaults to 0.
        • volumebinding.weight: Pods are scheduled to the node with the local PV delayed binding policy. Defaults to 1.
        • podtopologyspread.weight: Pods are scheduled based on the pod topology. Defaults to 2.
        +
        - plugins:
        +  - name: nodeorder
        +    arguments:
        +      leastrequested.weight: 1
        +      mostrequested.weight: 0
        +      nodeaffinity.weight: 1
        +      podaffinity.weight: 1
        +      balancedresource.weight: 1
        +      tainttoleration.weight: 1
        +      imagelocality.weight: 1
        +      volumebinding.weight: 1
        +      podtopologyspread.weight: 2
        +

        cce-gpu-topology-predicate

        +

        GPU-topology scheduling preselection algorithm

        +

        -

        +

        -

        +

        cce-gpu-topology-priority

        +

        GPU-topology scheduling priority algorithm

        +

        -

        +

        -

        +

        cce-gpu

        +

        Works with the gpu add-on of CCE to support GPU resource allocation and decimal GPU configuration.

        +

        -

        +

        -

        +

        numaaware

        +

        NUMA topology scheduling

        +

        weight: Weight of the numa-aware plugin.

        +

        -

        +

        networkresource

        +

        The ENI requirement node can be preselected and filtered. The parameters are transferred by CCE and do not need to be manually configured.

        +

        NetworkType: Network type (eni or vpc-router).

        +

        -

        +

        nodelocalvolume

        +

        The nodelocalvolume plugin filters out nodes that do not meet local volume requirements can be filtered out.

        +

        -

        +

        -

        +

        nodeemptydirvolume

        +

        The nodeemptydirvolume plugin filters out nodes that do not meet the emptyDir requirements.

        +

        -

        +

        -

        +

        nodeCSIscheduling

        +

        The nodeCSIscheduling plugin filters out nodes that have the everest component exception.

        +

        -

        +

        -

        +
        +
        +

      4. Click Install.
      +
      +

      Modifying the volcano-scheduler Configuration Using the Console

      Volcano allows you to configure the scheduler during installation, upgrade, and editing. The configuration will be synchronized to volcano-scheduler-configmap.

      +

      This section describes how to configure the volcano scheduler.

      +

      Only Volcano of v1.7.1 and later support this function. On the new plug-in page, options such as plugins.eas_service and resource_exporter_enable are replaced by default_scheduler_conf.

      +
      +

      Log in to the CCE console and access the cluster console. Choose Add-ons in the navigation pane. On the right of the page, locate volcano and click Install or Upgrade. In the Parameters area, configure the volcano scheduler parameters.

      +
      • Using resource_exporter:
        {
        +    "ca_cert": "",
        +    "default_scheduler_conf": {
        +        "actions": "allocate, backfill",
        +        "tiers": [
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "priority"
        +                    },
        +                    {
        +                        "name": "gang"
        +                    },
        +                    {
        +                        "name": "conformance"
        +                    }
        +                ]
        +            },
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "drf"
        +                    },
        +                    {
        +                        "name": "predicates"
        +                    },
        +                    {
        +                        "name": "nodeorder"
        +                    }
        +                ]
        +            },
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "cce-gpu-topology-predicate"
        +                    },
        +                    {
        +                        "name": "cce-gpu-topology-priority"
        +                    },
        +                    {
        +                        "name": "cce-gpu"
        +                    },
        +                    {
        +                        "name": "numa-aware" # add this also enable resource_exporter
        +                    }
        +                ]
        +            },
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "nodelocalvolume"
        +                    },
        +                    {
        +                        "name": "nodeemptydirvolume"
        +                    },
        +                    {
        +                        "name": "nodeCSIscheduling"
        +                    },
        +                    {
        +                        "name": "networkresource"
        +                    }
        +                ]
        +            }
        +        ]
        +    },
        +    "server_cert": "",
        +    "server_key": ""
        +}
        +

        After this function is enabled, you can use the functions of the numa-aware plug-in and resource_exporter at the same time.

        +
      +
      • Using eas_service:
        {
        +    "ca_cert": "",
        +    "default_scheduler_conf": {
        +        "actions": "allocate, backfill",
        +        "tiers": [
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "priority"
        +                    },
        +                    {
        +                        "name": "gang"
        +                    },
        +                    {
        +                        "name": "conformance"
        +                    }
        +                ]
        +            },
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "drf"
        +                    },
        +                    {
        +                        "name": "predicates"
        +                    },
        +                    {
        +                        "name": "nodeorder"
        +                    }
        +                ]
        +            },
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "cce-gpu-topology-predicate"
        +                    },
        +                    {
        +                        "name": "cce-gpu-topology-priority"
        +                    },
        +                    {
        +                        "name": "cce-gpu"
        +                    },
        +                    {
        +                        "name": "eas",
        +                        "custom": {
        +                            "availability_zone_id": "",
        +                            "driver_id": "",
        +                            "endpoint": "",
        +                            "flavor_id": "",
        +                            "network_type": "",
        +                            "network_virtual_subnet_id": "",
        +                            "pool_id": "",
        +                            "project_id": "",
        +                            "secret_name": "eas-service-secret"
        +                        }
        +                    }
        +                ]
        +            },
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "nodelocalvolume"
        +                    },
        +                    {
        +                        "name": "nodeemptydirvolume"
        +                    },
        +                    {
        +                        "name": "nodeCSIscheduling"
        +                    },
        +                    {
        +                        "name": "networkresource"
        +                    }
        +                ]
        +            }
        +        ]
        +    },
        +    "server_cert": "",
        +    "server_key": ""
        +}
        +
      • Using ief:
        {
        +    "ca_cert": "",
        +    "default_scheduler_conf": {
        +        "actions": "allocate, backfill",
        +        "tiers": [
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "priority"
        +                    },
        +                    {
        +                        "name": "gang"
        +                    },
        +                    {
        +                        "name": "conformance"
        +                    }
        +                ]
        +            },
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "drf"
        +                    },
        +                    {
        +                        "name": "predicates"
        +                    },
        +                    {
        +                        "name": "nodeorder"
        +                    }
        +                ]
        +            },
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "cce-gpu-topology-predicate"
        +                    },
        +                    {
        +                        "name": "cce-gpu-topology-priority"
        +                    },
        +                    {
        +                        "name": "cce-gpu"
        +                    },
        +                    {
        +                        "name": "ief",
        +                        "enableBestNode": true
        +                    }
        +                ]
        +            },
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "nodelocalvolume"
        +                    },
        +                    {
        +                        "name": "nodeemptydirvolume"
        +                    },
        +                    {
        +                        "name": "nodeCSIscheduling"
        +                    },
        +                    {
        +                        "name": "networkresource"
        +                    }
        +                ]
        +            }
        +        ]
        +    },
        +    "server_cert": "",
        +    "server_key": ""
        +}
        +
      +
      +

      Retaining the Original volcano-scheduler-configmap Configuration

      If you want to use the original configuration after the plug-in is upgraded, perform the following steps:

      +
      1. Check and back up the original volcano-scheduler-configmap configuration.

        Example:
        # kubectl edit cm volcano-scheduler-configmap -n kube-system
        +apiVersion: v1
        +data:
        +  default-scheduler.conf: |-
        +    actions: "enqueue, allocate, backfill"
        +    tiers:
        +    - plugins:
        +      - name: priority
        +      - name: gang
        +      - name: conformance
        +    - plugins:
        +      - name: drf
        +      - name: predicates
        +      - name: nodeorder
        +      - name: binpack
        +        arguments:
        +          binpack.cpu: 100
        +          binpack.weight: 10
        +          binpack.resources: nvidia.com/gpu
        +          binpack.resources.nvidia.com/gpu: 10000
        +    - plugins:
        +      - name: cce-gpu-topology-predicate
        +      - name: cce-gpu-topology-priority
        +      - name: cce-gpu
        +    - plugins:
        +      - name: nodelocalvolume
        +      - name: nodeemptydirvolume
        +      - name: nodeCSIscheduling
        +      - name: networkresource
        +
        +

      2. Enter the customized content in the Parameters on the console.

        {
        +    "ca_cert": "",
        +    "default_scheduler_conf": {
        +        "actions": "enqueue, allocate, backfill",
        +        "tiers": [
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "priority"
        +                    },
        +                    {
        +                        "name": "gang"
        +                    },
        +                    {
        +                        "name": "conformance"
        +                    }
        +                ]
        +            },
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "drf"
        +                    },
        +                    {
        +                        "name": "predicates"
        +                    },
        +                    {
        +                        "name": "nodeorder"
        +                    },
        +                    {
        +                        "name": "binpack",
        +                        "arguments": {
        +                            "binpack.cpu": 100,
        +                            "binpack.weight": 10,
        +                            "binpack.resources": "nvidia.com/gpu",
        +                            "binpack.resources.nvidia.com/gpu": 10000
        +                        }
        +                    }
        +                ]
        +            },
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "cce-gpu-topology-predicate"
        +                    },
        +                    {
        +                        "name": "cce-gpu-topology-priority"
        +                    },
        +                    {
        +                        "name": "cce-gpu"
        +                    }
        +                ]
        +            },
        +            {
        +                "plugins": [
        +                    {
        +                        "name": "nodelocalvolume"
        +                    },
        +                    {
        +                        "name": "nodeemptydirvolume"
        +                    },
        +                    {
        +                        "name": "nodeCSIscheduling"
        +                    },
        +                    {
        +                        "name": "networkresource"
        +                    }
        +                ]
        +            }
        +        ]
        +    },
        +    "server_cert": "",
        +    "server_key": ""
        +}
        +

        When this function is used, the original content in volcano-scheduler-configmap will be overwritten. Therefore, you must check whether volcano-scheduler-configmap has been modified during the upgrade. If yes, synchronize the modification to the upgrade page.

        +
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0197.html b/docs/cce/umn/cce_10_0197.html new file mode 100644 index 00000000..1c7b8d84 --- /dev/null +++ b/docs/cce/umn/cce_10_0197.html @@ -0,0 +1,180 @@ + + +

      Upgrade Overview

      +

      To enable interoperability from one Kubernetes installation to the next, you must upgrade your Kubernetes clusters before the maintenance period ends.

      +

      After the latest Kubernetes version is available in CCE, CCE will describe the changes in this version.

      +

      You can use the CCE console to upgrade the Kubernetes version of a cluster.

      +

      An upgrade flag will be displayed on the cluster card view if there is a new version for the cluster to upgrade.

      +

      How to check:

      +

      Log in to the CCE console and check whether the message "New version available" is displayed in the lower left corner of the cluster. If yes, the cluster can be upgraded. If no, the cluster cannot be upgraded.

      +
      Figure 1 Cluster with the upgrade flag
      +

      Cluster Upgrade

      The following table describes the target version to which each cluster version can be upgraded, the supported upgrade modes, and upgrade impacts.

      + +
      + + + + + + + + + + + + + + + + + + + + + +
      Table 1 Cluster upgrade paths and impacts

      Source Version

      +

      Target Version

      +

      Upgrade Modes

      +

      Impacts

      +

      v1.19

      +

      v1.21

      +

      In-place upgrade

      +

      You need to learn about the differences between versions. For details, see Precautions for Major Version Upgrade.

      +

      v1.17

      +

      v1.15

      +

      v1.19

      +

      In-place upgrade

      +

      You need to learn about the differences between versions. For details, see Precautions for Major Version Upgrade.

      +

      v1.13

      +

      v1.15

      +

      Rolling upgrade

      +

      Replace upgrade

      +
      • proxy in the coredns add-on cannot be configured and needs to be replaced with forward.
      • The storage add-on is changed from storage-driver to everest.
      +
      +
      +
      +

      Upgrade Modes

      The upgrade processes are the same for master nodes. The differences between the upgrade modes of worker nodes are described as follows:

      + +
      + + + + + + + + + + + + + + + + + + + + + +
      Table 2 Differences between upgrade modes and their advantages and disadvantages

      Upgrade Mode

      +

      Method

      +

      Advantage

      +

      Disadvantage

      +

      In-place upgrade

      +

      Kubernetes components, network components, and CCE management components are upgraded on the node. During the upgrade, service pods and networks are not affected. The SchedulingDisabled label will be added to all existing nodes. After the upgrade is complete, you can properly use existing nodes.

      +

      You do not need to migrate services, ensuring service continuity.

      +

      In-place upgrade does not upgrade the OS of a node. If you want to upgrade the OS, clear the corresponding node data after the node upgrade is complete and reset the node to upgrade the OS to a new version.

      +

      Rolling upgrade

      +

      Only the Kubernetes components and certain network components are upgraded on the node. The SchedulingDisabled label will be added to all existing nodes to ensure that the running applications are not affected.

      +
      NOTICE:
      • After the upgrade is complete, you need to manually create nodes and gradually release the old nodes, thereby migrating your applications to the new nodes. In this mode, you can control the upgrade process.
      +
      +

      Services are not interrupted.

      +
      • After the upgrade is complete, you need to manually create nodes and gradually release the old nodes. The new nodes are billed additionally. After services are migrated to the new nodes, the old nodes can be deleted.
      +
      • After the rolling upgrade is complete, if you want to continue the upgrade to a later version, you need to reset the old nodes first. Otherwise, the pre-upgrade check cannot be passed. Services may be interrupted during the upgrade.
      +

      Replace upgrade

      +

      The latest worker node image is used to reset the node OS.

      +

      This is the fastest upgrade mode and requires few manual interventions.

      +

      Data or configurations on the node will be lost, and services will be interrupted for a period of time.

      +
      +
      +
      +

      Precautions for Major Version Upgrade

      +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      Upgrade Path

      +

      Difference

      +

      Self-Check

      +

      v1.19 to v1.21

      +

      The bug of exec probe timeouts is fixed in Kubernetes 1.21. Before this bug fix, the exec probe does not consider the timeoutSeconds field. Instead, the probe will run indefinitely, even beyond its configured deadline. It will stop until the result is returned. If this field is not specified, the default value 1 is used. This field takes effect after the upgrade. If the probe runs over 1 second, the application health check may fail and the application may restart frequently.

      +

      Before the upgrade, check whether the timeout is properly set for the exec probe.

      +

      kube-apiserver of CCE 1.19 or later requires that the Subject Alternative Names (SANs) field be configured for the certificate of your webhook server. Otherwise, kube-apiserver fails to call the webhook server after the upgrade, and containers cannot be started properly.

      +

      Root cause: X.509 CommonName is discarded in Go 1.15. kube-apiserver of CCE 1.19 is compiled using Go 1.15. If your webhook certificate does not have SANs, kube-apiserver does not process the CommonName field of the X.509 certificate as the host name by default. As a result, the authentication fails.

      +

      Before the upgrade, check whether the SAN field is configured in the certificate of your webhook server.

      +
      • If you do not have your own webhook server, you can skip this check.
      • If the field is not set, you are advised to use the SAN field to specify the IP address and domain name supported by the certificate.
      +

      v1.15 to v1.19

      +

      The control plane of CCE 1.19 is incompatible with Kubelet 1.15. If the master node fails to be upgraded or the node to be upgraded restarts after the master node is successfully upgraded, there is a high probability that the node is in the NotReady status.

      +

      There is a high probability that kubelet restarts on the node that fails to be upgraded, triggering the node registration process. The default registration labels of kubelet 1.15 (failure-domain.beta.kubernetes.io/is-baremetal and kubernetes.io/availablezone) are regarded as an invalid label by kube-apiserver 1.19.

      +

      The valid labels in v1.19 are node.kubernetes.io/baremetal and failure-domain.beta.kubernetes.io/zone.

      +
      1. In normal cases, this scenario is not triggered.
      2. After the master node is upgraded, do not suspend the upgrade. Upgrade the node quickly.
      3. If a node fails to be upgraded and cannot be restored, evict applications on the node as soon as possible. Contact technical support and skip the node upgrade. After the upgrade is complete, reset the node.
      +

      In CCE 1.15 and 1.19 clusters, the Docker storage driver file system is switched from XFS to Ext4. As a result, the import package sequence in the pods of the upgraded Java application may be abnormal, causing pod exceptions.

      +

      Before the upgrade, check the Docker configuration file /etc/docker/daemon.json on the node. Check whether the value of dm.fs is xfs.

      +
      • If the value is ext4 or the storage driver is Overlay, you can skip the next steps.
      • If the value is xfs, you are advised to deploy applications in the cluster of the new version in advance to test whether the applications are compatible with the new cluster version.
      +
      {
      +      "storage-driver": "devicemapper",
      +      "storage-opts": [
      +      "dm.thinpooldev=/dev/mapper/vgpaas-thinpool",
      +      "dm.use_deferred_removal=true",
      +      "dm.fs=xfs",
      +      "dm.use_deferred_deletion=true"
      +      ]
      +}
      +

      kube-apiserver of CCE 1.19 or later requires that the Subject Alternative Names (SANs) field be configured for the certificate of your webhook server. Otherwise, kube-apiserver fails to call the webhook server after the upgrade, and containers cannot be started properly.

      +

      Root cause: X.509 CommonName is discarded in Go 1.15. kube-apiserver of CCE 1.19 is compiled using Go 1.15. The CommonName field is processed as the host name. As a result, the authentication fails.

      +

      Before the upgrade, check whether the SAN field is configured in the certificate of your webhook server.

      +
      • If you do not have your own webhook server, you can skip this check.
      • If the field is not set, you are advised to use the SAN field to specify the IP address and domain name supported by the certificate.
      +
      NOTICE:

      To mitigate the impact of version differences on cluster upgrade, CCE performs special processing during the upgrade from 1.15 to 1.19 and still supports certificates without SANs. However, no special processing is required for subsequent upgrades. You are advised to rectify your certificate as soon as possible.

      +
      +

      In clusters of v1.17.17 and later, CCE automatically creates pod security policies (PSPs) for you, which restrict the creation of pods with unsafe configurations, for example, pods for which net.core.somaxconn under a sysctl is configured in the security context.

      +

      After an upgrade, you can allow insecure system configurations as required. For details, see Configuring a Pod Security Policy.

      +

      v1.13 to v1.15

      +

      After a VPC network cluster is upgraded, the master node occupies an extra CIDR block due to the upgrade of network components. If no container CIDR block is available for the new node, the pod scheduled to the node cannot run.

      +

      Generally, this problem occurs when the nodes in the cluster are about to fully occupy the container CIDR block. For example, the container CIDR block is 10.0.0.0/16, the number of available IP addresses is 65,536, and the VPC network is allocated a CIDR block with the fixed size (using the mask to determine the maximum number of container IP addresses allocated to each node). If the upper limit is 128, the cluster supports a maximum of 512 (65536/128) nodes, including the three master nodes. After the cluster is upgraded, each of the three master nodes occupies one CIDR block. As a result, 506 nodes are supported.

      +
      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0198.html b/docs/cce/umn/cce_10_0198.html new file mode 100644 index 00000000..711b41f2 --- /dev/null +++ b/docs/cce/umn/cce_10_0198.html @@ -0,0 +1,132 @@ + + +

      Adding Nodes for Management

      +

      Scenario

      In CCE, you can Creating a Node or add existing nodes (ECSs) into your cluster.

      +
      • While an ECS is being accepted into a cluster, the operating system of the ECS will be reset to the standard OS image provided by CCE to ensure node stability. The CCE console prompts you to select the operating system and the login mode during the reset.
      • The system disk and data disk of an ECS will be formatted while the ECS is being accepted into a cluster. Ensure that information in the disks has been backed up.
      • While an ECS is being accepted into a cluster, do not perform any operation on the ECS through the ECS console.
      +
      +
      +

      Notes and Constraints

      • The cluster version must be 1.15 or later.
      • If the password or key has been set when a VM node is created, the VM node can be accepted into a cluster 10 minutes after it is available. During the management, the original password or key will become invalid. You need to reset the password or key.
      • Nodes in a CCE Turbo cluster must support sub-ENIs or be bound to at least 16 ENIs. For details about the node specifications, see the nodes that can be selected on the console when you create a node.
      +
      +

      Prerequisites

      A cloud server that meets the following conditions can be accepted:

      +
      • The node to be accepted must be in the Running state and not used by other clusters. In addition, the node to be accepted does not carry the CCE-Dynamic-Provisioning-Node tag.
      • The node to be accepted and the cluster must be in the same VPC. (If the cluster version is earlier than v1.13.10, the node to be accepted and the CCE cluster must be in the same subnet.)
      • At least one data disk is attached to the node to be accepted. The data disk capacity is greater than or equal to 100 GB.
      • The node to be accepted has 2-core or higher CPU, 4 GB or larger memory, and only one NIC.
      • Only cloud servers with the same specifications, AZ, and data disk configuration can be added in batches.
      +
      +

      Procedure

      1. Log in to the CCE console and go to the cluster where the node to be managed resides.
      2. In the navigation pane, choose Nodes. On the displayed page, click Accept Node in the upper right corner.
      3. Specify node parameters.

        Compute Settings

        + +
        + + + + + + + + + + + + + + + + +
        Table 1 Configuration parameters

        Parameter

        +

        Description

        +

        Specifications

        +

        Click Select Cloud Server and select the servers to be accepted.

        +

        You can select multiple cloud servers for batch management. However, only the cloud servers with the same specifications, AZ, and data disk configuration can be added in batches.

        +

        If a cloud server contains multiple data disks, select one of them for the container runtime and kubelet.

        +

        Container Engine

        +

        CCE clusters support Docker. Starting from CCE 1.23, containerd is supported.

        +

        For a CCE Turbo cluster, both Docker and containerd are supported. For details, see Mapping between Node OSs and Container Engines.

        +

        OS

        +

        Public image: Select an OS for the node.

        +

        Private image: You can use private images.

        +

        Login Mode

        +
        • Key Pair

          Select the key pair used to log in to the node. You can select a shared key.

          +

          A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create Key Pair.

          +
        +
        +
        +

        Storage Settings

        +
        Configure storage resources on a node for the containers running on it. +
        + + + + + + + + + + +
        Table 2 Configuration parameters

        Parameter

        +

        Description

        +

        System Disk

        +

        Directly use the system disk of the cloud server.

        +

        Data Disk

        +

        At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

        +

        Click Expand and select Allocate Disk Space to define the disk space occupied by the container runtime to store the working directories, container image data, and image metadata. For details about how to allocate data disk space, see Data Disk Space Allocation.

        +

        For other data disks, a raw disk is created without any processing by default. You can also click Expand and select Mount Disk to mount the data disk to a specified directory.

        +
        +
        +
        +

        Advanced Settings

        + +
        + + + + + + + + + + + + + + + + + + + + + + +
        Table 3 Advanced configuration parameters

        Parameter

        +

        Description

        +

        Kubernetes Label

        +

        Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

        +

        Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

        +

        Resource Tag

        +

        You can add resource tags to classify resources.

        +

        You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use these tags to improve tagging and resource migration efficiency.

        +

        CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

        +

        Taint

        +
        This field is left blank by default. You can add taints to set anti-affinity for the node. A maximum of 10 taints are allowed for each node. Each taint contains the following parameters:
        • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
        • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
        • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
        +
        NOTICE:
        • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.
        • After a node pool is created, you can click Edit to modify its configuration. The modification will be synchronized to all nodes in the node pool.
        +
        +
        +

        Max. Pods

        +

        Maximum number of pods that can run on the node, including the default system pods.

        +

        This limit prevents the node from being overloaded with pods.

        +

        Pre-installation Command

        +

        Enter commands. A maximum of 1,000 characters are allowed.

        +

        The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

        +

        Post-installation Command

        +

        Enter commands. A maximum of 1,000 characters are allowed.

        +

        The script will be executed after Kubernetes software is installed and will not affect the installation.

        +
        +
        +

      4. Click Next: Confirm. Click Submit.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0201.html b/docs/cce/umn/cce_10_0201.html new file mode 100644 index 00000000..f3e7f52e --- /dev/null +++ b/docs/cce/umn/cce_10_0201.html @@ -0,0 +1,151 @@ + + +

      Custom Monitoring

      +

      CCE allows you to upload custom metrics to AOM. The ICAgent on a node periodically calls the metric monitoring API configured on a workload to read monitoring data and then uploads the data to AOM.

      +

      +

      The custom metric API of a workload can be configured when the workload is created. This section uses an Nginx application as an example to describe how to report custom metrics to AOM.

      +

      Notes and Constraints

      • The ICAgent is compatible with the monitoring data specifications of Prometheus. The custom metrics provided by pods can be collected by the ICAgent only when they meet the monitoring data specifications of Prometheus.
      • The ICAgent supports only Gauge metrics.
      • The interval for the ICAgent to call the custom metric API is 1 minute, which cannot be changed.
      +
      +

      Prometheus Monitoring Data Collection

      Prometheus periodically calls the metric monitoring API (/metrics by default) of an application to obtain monitoring data. The application needs to provide the metric monitoring API for Prometheus to call, and the monitoring data must meet the following specifications of Prometheus:

      +
      # TYPE nginx_connections_active gauge
      +nginx_connections_active 2
      +# TYPE nginx_connections_reading gauge
      +nginx_connections_reading 0
      +

      Prometheus provides clients in various languages. For details about the clients, see Prometheus CLIENT LIBRARIES. For details about how to develop an exporter, see WRITING EXPORTERS. The Prometheus community provides various third-party exporters that can be directly used. For details, see EXPORTERS AND INTEGRATIONS.

      +
      +

      Preparing an Application

      Nginx has a module named ngx_http_stub_status_module, which provides basic monitoring functions. You can configure the nginx.conf file to provide an API for external systems to access Nginx monitoring data. As shown in the following figure, after the server configuration is added to http, Nginx can provide an API for external systems to access Nginx monitoring data.

      +
      user  nginx;
      +worker_processes  auto;
      +
      +error_log  /var/log/nginx/error.log warn;
      +pid        /var/run/nginx.pid;
      +
      +events {
      +    worker_connections  1024;
      +}
      +
      +http {
      +    include       /etc/nginx/mime.types;
      +    default_type  application/octet-stream;
      +    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
      +                      '$status $body_bytes_sent "$http_referer" '
      +                      '"$http_user_agent" "$http_x_forwarded_for"';
      +
      +    access_log  /var/log/nginx/access.log  main;
      +    sendfile        on;
      +    #tcp_nopush     on;
      +    keepalive_timeout  65;
      +    #gzip  on;
      +    include /etc/nginx/conf.d/*.conf;
      +
      +    server {
      +      listen 8080;
      +      server_name  localhost;
      +      location /stub_status {
      +         stub_status on;
      +         access_log off;
      +      }
      +    }
      +}
      +

      Save the preceding configuration to the nginx.conf file and use the configuration to create a new image. The Dockerfile file is as follows:

      +
      FROM nginx:1.21.5-alpine
      +ADD nginx.conf /etc/nginx/nginx.conf
      +EXPOSE 80
      +CMD ["nginx", "-g", "daemon off;"]
      +

      Use the preceding Dockerfile file to build an image and upload it to SWR. The image name is nginx:exporter.

      +

      docker build -t nginx:exporter .

      +

      docker tag nginx:exporter {swr-address}/{group}/nginx:exporter

      +

      docker push {swr-address}/{group}/nginx:exporter

      +

      After running a container with image nginx:exporter, you can obtain Nginx monitoring data by calling http://<ip_address>:8080/stub_status. < ip_address > indicates the IP address of the container. The monitoring data is as follows:

      +
      # curl http://127.0.0.1:8080/stub_status
      +Active connections: 3 
      +server accepts handled requests
      + 146269 146269 212 
      +Reading: 0 Writing: 1 Waiting: 2
      +
      +

      Deploying an Application

      The data format of the monitoring data provided by nginx:exporter does not meet the requirements of Prometheus. You need to convert the data format to the format required by Prometheus. To convert the format of Nginx metrics, use nginx-prometheus-exporter, as shown in the following figure.

      +

      +

      Deploy nginx:exporter and nginx-prometheus-exporter in the same pod.

      +
      kind: Deployment
      +apiVersion: apps/v1
      +metadata:
      +  name: nginx-exporter
      +  namespace: default
      +spec:
      +  replicas: 1
      +  selector:
      +    matchLabels:
      +      app: nginx-exporter
      +  template:
      +    metadata:
      +      labels:
      +        app: nginx-exporter
      +      annotations:
      +        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"prometheus","path":"/metrics","port":"9113","names":""}]'
      +    spec:
      +      containers:
      +        - name: container-0
      +          image: 'nginx:exporter'  # Replace it with the address of the image you uploaded to SWR.
      +          resources:
      +            limits:
      +              cpu: 250m
      +              memory: 512Mi
      +            requests:
      +              cpu: 250m
      +              memory: 512Mi
      +        - name: container-1
      +          image: 'nginx/nginx-prometheus-exporter:0.9.0'
      +          command:
      +            - nginx-prometheus-exporter
      +          args:
      +            - '-nginx.scrape-uri=http://127.0.0.1:8080/stub_status'
      +      imagePullSecrets:
      +        - name: default-secret
      +

      The nginx/nginx-prometheus-exporter:0.9.0 image needs to be pulled from the public network. Therefore, each node in the cluster must have a public IP address.

      +
      +

      nginx-prometheus-exporter requires a startup command. nginx-prometheus-exporter -nginx.scrape-uri=http://127.0.0.1:8080/stub_status is used to obtain Nginx monitoring data.

      +

      In addition, you need to add an annotation metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"prometheus","path":"/metrics","port":"9113","names":""}]' to the pod.

      +
      +

      Verification

      After an application is deployed, you can access Nginx to construct some access data and check whether the corresponding monitoring data can be obtained in AOM.

      +
      $ kubectl get pod
      +NAME                              READY   STATUS    RESTARTS   AGE
      +nginx-exporter-78859765db-6j8sw   2/2     Running   0          4m
      +$ kubectl exec -it nginx-exporter-78859765db-6j8sw -- /bin/sh
      +Defaulting container name to container-0.
      +Use 'kubectl describe pod/nginx-exporter-78859765db-6j8sw -n default' to see all of the containers in this pod.
      +/ # curl http://localhost
      +<!DOCTYPE html>
      +<html>
      +<head>
      +<title>Welcome to nginx!</title>
      +<style>
      +html { color-scheme: light dark; }
      +body { width: 35em; margin: 0 auto;
      +font-family: Tahoma, Verdana, Arial, sans-serif; }
      +</style>
      +</head>
      +<body>
      +<h1>Welcome to nginx!</h1>
      +<p>If you see this page, the nginx web server is successfully installed and
      +working. Further configuration is required.</p>
      +
      +<p>For online documentation and support please refer to
      +<a href="http://nginx.org/">nginx.org</a>.<br/>
      +Commercial support is available at
      +<a href="http://nginx.com/">nginx.com</a>.</p>
      +
      +<p><em>Thank you for using nginx.</em></p>
      +</body>
      +</html>
      +/ #
      +

      You can see that Nginx has been accessed once.

      +

      Log in to AOM. In the navigation pane, choose Monitoring > Metric Monitoring. You can view Nginx-related metrics, for example, nginx_connections_active.

      +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0205.html b/docs/cce/umn/cce_10_0205.html new file mode 100644 index 00000000..6515a048 --- /dev/null +++ b/docs/cce/umn/cce_10_0205.html @@ -0,0 +1,16 @@ + + +

      metrics-server

      +

      From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly accessed by users (for example, by using the kubectl top command) or used by controllers (for example, Horizontal Pod Autoscaler) in a cluster for decision-making. The specific component is metrics-server, which is used to substitute for heapster for providing the similar functions. heapster has been gradually abandoned since v1.11.

      +

      metrics-server is an aggregator for monitoring data of core cluster resources. You can quickly install this add-on on the CCE console.

      +

      After metrics-server is installed, you can create an HPA policy on the Workload Scaling tab page of the Auto Scaling page. For details, see Creating an HPA Policy for Workload Auto Scaling.

      +

      The official community project and documentation are available at https://github.com/kubernetes-sigs/metrics-server.

      +

      Installing the Add-on

      1. Log in to the CCE console and access the cluster console. Choose Add-ons in the navigation pane, locate metrics-server on the right, and click Install.
      2. Select Single or HA for Add-on Specifications, and click Install.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0207.html b/docs/cce/umn/cce_10_0207.html new file mode 100644 index 00000000..16957294 --- /dev/null +++ b/docs/cce/umn/cce_10_0207.html @@ -0,0 +1,17 @@ + + +

      Auto Scaling

      +
      + + diff --git a/docs/cce/umn/cce_10_0208.html b/docs/cce/umn/cce_10_0208.html new file mode 100644 index 00000000..e381c6ae --- /dev/null +++ b/docs/cce/umn/cce_10_0208.html @@ -0,0 +1,84 @@ + + +

      Creating an HPA Policy for Workload Auto Scaling

      +

      Horizontal Pod Autoscaling (HPA) in Kubernetes implements horizontal scaling of pods. In a CCE HPA policy, you can configure different cooldown time windows and scaling thresholds for different applications based on the Kubernetes HPA.

      +

      Prerequisites

      To use HPA policies, you need to install an add-on that can provide the metrics API, such as metrics-server and prometheus.

      +
      +

      Notes and Constraints

      • HPA policies can be created only for clusters of v1.13 or later.
      • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

        For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volume mounted, a new pod cannot be started because EVS disks cannot be attached.

        +
      +
      +

      Procedure

      1. Log in to the CCE console and access the cluster console.
      2. In the navigation pane, choose Workload Scaling. Then click Create HPA Policy in the upper right corner.
      3. Set policy parameters.

        +

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 1 HPA policy parameters

        Parameter

        +

        Description

        +

        Policy Name

        +

        Name of the policy to be created. Set this parameter as required.

        +

        Namespace

        +

        Namespace to which the workload belongs.

        +

        Associated Workload

        +

        Workload with which the HPA policy is associated.

        +

        Pod Range

        +

        Minimum and maximum numbers of pods.

        +

        When a policy is triggered, the workload pods are scaled within this range.

        +

        Cooldown Period

        +

        Interval between a scale-in and a scale-out. The unit is minute. The interval cannot be shorter than 1 minute.

        +

        This parameter is supported only from clusters of v1.15 to v1.23.

        +

        This parameter indicates the interval between consecutive scaling operations. The cooldown period ensures that a scaling operation is initiated only when the previous one is completed and the system is running stably.

        +

        Scaling Behavior

        +

        This parameter is supported only in clusters of v1.25 or later.

        +
        • Default: Scales workloads using the Kubernetes default behavior. For details, see Default Behavior.
        • Custom: Scales workloads using custom policies such as stabilization window, steps, and priorities. Unspecified parameters use the values recommended by Kubernetes.
          • Disable scale-out/scale-in: Select whether to disable scale-out or scale-in.
          • Stabilization Window: A period during which CCE continuously checks whether the metrics used for scaling keep fluctuating. CCE triggers scaling if the desired state is not maintained for the entire window. This window restricts the unwanted flapping of pod count due to metric changes.
          • Step: specifies the scaling step. You can set the number or percentage of pods to be scaled in or out within a specified period. If there are multiple policies, you can select the policy that maximizes or minimizes the number of pods.
          +
        +

        System Policy

        +
        • Metric: You can select CPU usage or Memory usage.
          NOTE:

          Usage = CPUs or memory used by pods/Requested CPUs or memory.

          +
          +
        • Desired Value: Enter the desired average resource usage.

          This parameter indicates the desired value of the selected metric. Number of pods to be scaled (rounded up) = (Current metric value/Desired value) x Number of current pods

          +
          NOTE:

          When calculating the number of pods to be added or reduced, the HPA policy uses the maximum number of pods in the last 5 minutes.

          +
          +
        • Tolerance Range: Scaling is not triggered when the metric value is within the tolerance range. The desired value must be within the tolerance range.

          If the metric value is greater than the scale-in threshold and less than the scale-out threshold, no scaling is triggered. This parameter is supported only in clusters of v1.15 or later.

          +
        +

        Custom Policy (supported only in clusters of v1.15 or later)

        +
        NOTE:

        Before setting a custom policy, you need to install an add-on that supports custom metric collection in the cluster, for example, prometheus add-on.

        +
        +
        • Metric Name: name of the custom metric. You can select a name as prompted.

          For details, see Custom Monitoring.

          +
        • Metric Source: Select an object type from the drop-down list. You can select Pod.
        • Desired Value: the average metric value of all pods. Number of pods to be scaled (rounded up) = (Current metric value/Desired value) x Number of current pods
          NOTE:

          When calculating the number of pods to be added or reduced, the HPA policy uses the maximum number of pods in the last 5 minutes.

          +
          +
        • Tolerance Range: Scaling is not triggered when the metric value is within the tolerance range. The desired value must be within the tolerance range.
        +
        +
        +

      4. Click Create.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0209.html b/docs/cce/umn/cce_10_0209.html new file mode 100644 index 00000000..7bac77d6 --- /dev/null +++ b/docs/cce/umn/cce_10_0209.html @@ -0,0 +1,208 @@ + + +

      Creating a Node Scaling Policy

      +

      CCE provides auto scaling through the autoscaler add-on. Nodes with different specifications can be automatically added across AZs on demand.

      +

      If a node scaling policy and the configuration in the autoscaler add-on take effect at the same time, for example, there are pods that cannot be scheduled and the value of a metric reaches the threshold at the same time, scale-out is performed first for the unschedulable pods.

      +
      • If the scale-out succeeds for the unschedulable pods, the system skips the metric-based rule logic and enters the next loop.
      • If the scale-out fails for the unschedulable pods, the metric-based rule is executed.
      +

      Prerequisites

      Before using the node scaling function, you must install the autoscaler add-on of v1.13.8 or later.

      +
      +

      Notes and Constraints

      • Auto scaling policies apply to node pools. When the number of nodes in a node pool is 0 and the scaling policy is based on CPU or memory usage, node scaling is not triggered.
      +
      +

      Procedure

      1. Log in to the CCE console and access the cluster console.
      2. Choose Node Scaling in the navigation pane.

        • If Uninstalled is displayed next to the add-on name, click Install, set add-on parameters as required, and click Install to install the add-on.
        • If Installed is displayed next to the add-on name, the add-on has been installed.
        +

      3. Click Create Node Scaling Policy in the upper right corner and set the parameters as follows:

        • Policy Name: name of the policy to be created, which can be customized.
        • Associated Node Pools: Select the node pool to be associated. You can associate multiple node pools to use the same scaling policy.

          Priority is now supported for node pools. CCE will select a node pool for auto scaling based on the following policies:

          +
          1. CCE uses algorithms to determine whether a node pool meets the conditions to allow scheduling of a pod in pending state, including whether the node resources are greater than requested by the pod, and whether the nodeSelect, nodeAffinity, and taints meet the conditions. In addition, the node pools that fail to be scaled (due to insufficient resources or other reasons) and are still in the 15-minute cool-down interval are filtered.
          2. If multiple node pools meet the scaling requirements, the system checks the priority of each node pool and selects the node pool with the highest priority for scaling. The value ranges from 0 to 100 and the default priority is 0. The value 100 indicates the highest priority, and the value 0 indicates the lowest priority.
          3. If multiple node pools have the same priority or no priority is configured for them, the system selects the node pool that will consume the least resources based on the configured VM specification.
          4. If the VM specifications of multiple node pools are the same but the node pools are deployed in different AZs, the system randomly selects a node pool to trigger scaling.
          5. If the resources of the preferred node pool are insufficient, the system automatically selects next node pool based on the priority.
          +

          For details about the node pool priority, see Creating a Node Pool.

          +
          +
        • Rules: Click Add Rule. In the dialog box displayed, set the following parameters:

          Rule Name: Enter a rule name.

          +

          Rule Type: You can select Metric-based or Periodic. The differences between the two types are as follows:

          +
          • Metric-based:
            Condition: Select CPU allocation rate or Memory allocation rate and enter a value. The value must be greater than the scale-in percentage configured in the autoscaler add-on.
            • Resource allocation (%) = Resources requested by pods in the node pool/Resources allocatable to pods in the node pool
            • If multiple rules meet the conditions, the rules are executed in either of the following modes:

              If rules based on the CPU allocation rate and memory allocation rate are configured and two or more rules meet the scale-out conditions, the rule that will add the most nodes will be executed.

              +

              If a rule based on the CPU allocation rate and a periodic rule are configured and they both meet the scale-out conditions, one of them will be executed randomly. The rule executed first (rule A) changes the node pool to the scaling state. As a result, the other rule (rule B) cannot be executed. After rule A is executed and the node pool status becomes normal, rule B will not be executed.

              +
            • If rules based on the CPU allocation rate and memory allocation rate are configured, the policy detection period varies with the processing logic of each loop of the autoscaler add-on. Scale-out is triggered once the conditions are met, but it is constrained by other factors such as the cool-down interval and node pool status.
            +
            +
            +
          • Periodic:

            Trigger Time: You can select a specific time point every day, every week, every month, or every year.

            +
          +

          Action: Set an action to be performed when the trigger condition is met.

          +

          You can click Add Rule again to add more node scaling policies. You can add a maximum of one CPU usage-based rule and one memory usage-based rule. The total number of rules cannot exceed 10.

          +
        +

      4. Click OK.
      +
      +

      Constraints on Scale-in

      You can set node scale-in policies only when installing the autoscaler add-on.

      +

      Node scale-in can be triggered only by the resource allocation rate. When CPU and memory allocation rates in a cluster are lower than the specified thresholds (set when the autoscaler add-on is installed or modified), scale-in is triggered for nodes in the node pool (this function can be disabled).

      +
      +

      Example YAML

      The following is a YAML example of a node scaling policy:

      +
      apiVersion: autoscaling.cce.io/v1alpha1
      +kind: HorizontalNodeAutoscaler
      +metadata:
      +  creationTimestamp: "2020-02-13T12:47:49Z"
      +  generation: 1
      +  name: xxxx
      +  namespace: kube-system
      +  resourceVersion: "11433270"
      +  selfLink: /apis/autoscaling.cce.io/v1alpha1/namespaces/kube-system/horizontalnodeautoscalers/xxxx
      +  uid: c2bd1e1d-60aa-47b5-938c-6bf3fadbe91f
      +spec:
      +  disable: false
      +  rules:
      +  - action:
      +      type: ScaleUp
      +      unit: Node
      +      value: 1
      +    cronTrigger:
      +      schedule: 47 20 * * *
      +    disable: false
      +    ruleName: cronrule
      +    type: Cron
      +  - action:
      +      type: ScaleUp
      +      unit: Node
      +      value: 2
      +    disable: false
      +    metricTrigger:
      +      metricName: Cpu
      +      metricOperation: '>'
      +      metricValue: "40"
      +      unit: Percent
      +    ruleName: metricrule
      +    type: Metric
      +  targetNodepoolIds:
      +  - 7d48eca7-3419-11ea-bc29-0255ac1001a8
      +
      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 1 Key parameters

      Parameter

      +

      Type

      +

      Description

      +

      spec.disable

      +

      Bool

      +

      Whether to enable the scaling policy. This parameter takes effect for all rules in the policy.

      +

      spec.rules

      +

      Array

      +

      All rules in a scaling policy.

      +

      spec.rules[x].ruleName

      +

      String

      +

      Rule name.

      +

      spec.rules[x].type

      +

      String

      +

      Rule type. Currently, Cron and Metric are supported.

      +

      spec.rules[x].disable

      +

      Bool

      +

      Rule switch. Currently, only false is supported.

      +

      spec.rules[x].action.type

      +

      String

      +

      Rule action type. Currently, only ScaleUp is supported.

      +

      spec.rules[x].action.unit

      +

      String

      +

      Rule action unit. Currently, only Node is supported.

      +

      spec.rules[x].action.value

      +

      Integer

      +

      Rule action value.

      +

      spec.rules[x].cronTrigger

      +

      /

      +

      Optional. This parameter is valid only in periodic rules.

      +

      spec.rules[x].cronTrigger.schedule

      +

      String

      +

      Cron expression of a periodic rule.

      +

      spec.rules[x].metricTrigger

      +

      /

      +

      Optional. This parameter is valid only in metric-based rules.

      +

      spec.rules[x].metricTrigger.metricName

      +

      String

      +

      Metric of a metric-based rule. Currently, Cpu and Memory are supported.

      +

      spec.rules[x].metricTrigger.metricOperation

      +

      String

      +

      Comparison operator of a metric-based rule. Currently, only > is supported.

      +

      spec.rules[x].metricTrigger.metricValue

      +

      String

      +

      Metric threshold of a metric-based rule. The value can be any integer from 1 to 100 and must be a character string.

      +

      spec.rules[x].metricTrigger.Unit

      +

      String

      +

      Unit of the metric-based rule threshold. Currently, only % is supported.

      +

      spec.targetNodepoolIds

      +

      Array

      +

      All node pools associated with the scaling policy.

      +

      spec.targetNodepoolIds[x]

      +

      String

      +

      ID of the node pool associated with the scaling policy.

      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0210.html b/docs/cce/umn/cce_10_0210.html new file mode 100644 index 00000000..741d00b3 --- /dev/null +++ b/docs/cce/umn/cce_10_0210.html @@ -0,0 +1,60 @@ + + +

      Migrating Services Across Clusters of Different Versions

      +

      Application Scenarios

      This section describes how to migrate services from a cluster of an earlier version to a cluster of a later version in CCE.

      +

      This operation is applicable when a cross-version cluster upgrade is required (for example, upgrade from v1.7.* or v1.9.* to 1.17.*) and new clusters can be created for service migration.

      +
      +

      Prerequisites

      +
      + + + + + + + + + + + + + + + + + + + +
      Table 1 Checklist before migration

      Category

      +

      Description

      +

      Cluster

      +

      NodeIP-related: Check whether node IP addresses (including EIPs) of the cluster before the migration have been used in other configurations or whitelists.

      +

      Workloads

      +

      Record the number of workloads for post-migration check.

      +

      Storage

      +
      1. Check whether the storage resources in use are provisioned by the cloud or by your organization.
      2. Change the automatically created storage to the existing storage in the new cluster.
      +

      Network

      +
      1. Pay special attention to the ELB and ingress.
      2. Clusters of an earlier version support only the classic load balancer. To migrate services to a new cluster, you need to change load balancer type to shared load balancer. Then, the corresponding ELB service will be re-established.
      +

      O&M

      +

      Private configuration: Check whether kernel parameters or system data have been configured on nodes in the cluster.

      +
      +
      +
      +

      Procedure

      1. Create a CCE cluster.

        Create a cluster with the same specifications and configurations as the cluster of the earlier version. For details, see Creating a CCE Cluster.

        +

      2. Add a node.

        Add nodes with the same specifications and manual configuration items. For details, see Creating a Node.

        +

      3. Create a storage volume in the new cluster.

        Use an existing storage volume to create a PVC in the new cluster. The PVC name remains unchanged. For details, see PersistentVolumeClaims (PVCs).

        +

        Storage switching supports only OBS buckets, SFS file systems, and shared EVS disks. If a non-shared EVS disk is used, you need to suspend the workloads in the old cluster to switch the storage resources. As a result, services will be interrupted.

        +
        +

      4. Create a workload in the new cluster.

        The workload name and specifications remain unchanged. For details about how to create a workload, see Creating a Deployment or Creating a StatefulSet. For details about how to attach a storage volume to the workload, see Creating a Deployment Mounted with an EVS Volume.

        +

      5. Create a Service in the new cluster.

        The Service name and specifications remain unchanged. For details about how to create a Service, see Services.

        +

      6. Commission services.

        After all resources are created, commission the containerized services. If the commissioning is successful, migrate the services to the new cluster.

        +

      7. Delete the old cluster.

        When all functions of the new cluster are stable, delete the old cluster. For details about how to delete a cluster, see Deleting a Cluster.

        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0212.html b/docs/cce/umn/cce_10_0212.html new file mode 100644 index 00000000..a88c3403 --- /dev/null +++ b/docs/cce/umn/cce_10_0212.html @@ -0,0 +1,23 @@ + + +

      Deleting a Cluster

      +

      Scenario

      This section describes how to delete a cluster.

      +
      +

      Precautions

      • Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workloads, and Services. Related services cannot be restored. Before performing this operation, ensure that data has been backed up or migrated. Deleted data cannot be restored.
        Resources that are not created in CCE will not be deleted:
        • Accepted nodes (only the nodes created in CCE are deleted);
        • ELB load balancers associated with Services and ingresses (only the automatically created load balancers are deleted);
        • Manually created cloud storage resources associated with PVs or imported cloud storage resources (only the cloud storage resources automatically created by PVCs are deleted)
        +
        +
      • A hibernated cluster cannot be deleted. Wake up the cluster and try again.
      • If a cluster whose status is Unavailable is deleted, some storage resources of the cluster may need to be manually deleted.
      +
      +

      Procedure

      1. Log in to the CCE console. In the navigation pane, choose Clusters.
      2. Click next to the cluster to be deleted.
      3. In the displayed Delete Cluster dialog box, select the resources to be released.

        • Delete cloud storage resources attached to workloads in the cluster.

          Before you delete the PVCs and volumes, pay attention to the following rules:

          +
          • The underlying storage resources are deleted according to the reclaim policy you defined.
          • If there are a large number of files (more than 1,000) in the OBS bucket, manually clear the files and then delete the cluster.
          +
          +
        • Delete networking resources, such as load balancers in a cluster. (Only automatically created load balancers can be deleted.)
        +

      4. Click Yes to start deleting the cluster.

        The delete operation takes 1 to 3 minutes to complete.

        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0213.html b/docs/cce/umn/cce_10_0213.html new file mode 100644 index 00000000..eed9b36b --- /dev/null +++ b/docs/cce/umn/cce_10_0213.html @@ -0,0 +1,296 @@ + + +

      Managing Cluster Components

      +

      Scenario

      CCE allows you to manage cluster parameters, through which you can let core components work under your very requirements.

      +
      +

      Notes and Constraints

      This function is supported only in clusters of v1.15 and later. It is not displayed for versions earlier than v1.15.

      +
      +

      Procedure

      1. Log in to the CCE console. In the navigation pane, choose Clusters.
      2. Click next to the target cluster.
      3. On the Manage Component page on the right, change the values of the following Kubernetes parameters:

        +
        + + + + + + + + + +
        Table 1 Extended controller parameters

        Parameter

        +

        Description

        +

        Value

        +

        enable-resource-quota

        +

        Whether to automatically create a resource quota object when creating a namespace.

        +
        • false: no auto creation
        • true: auto creation enabled For details about the resource quota defaults, see Setting a Resource Quota.
        +

        Default: false

        +
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 2 kube-apiserver parameters

        Parameter

        +

        Description

        +

        Value

        +

        default-not-ready-toleration-seconds

        +

        notReady tolerance time, in seconds. NoExecute that is added by default to every pod that does not already have such a toleration.

        +

        Default: 300s

        +

        default-unreachable-toleration-seconds

        +

        unreachable tolerance time, in seconds. NoExecute that is added by default to every pod that does not already have such a toleration.

        +

        Default: 300s

        +

        max-mutating-requests-inflight

        +

        Maximum number of concurrent mutating requests. When the value of this parameter is exceeded, the server rejects requests.

        +

        The value 0 indicates no limitation.

        +

        Manual configuration is no longer supported since cluster v1.21. The value is automatically specified based on the cluster scale.

        +
        • 200 for clusters with 50 or 200 nodes
        • 500 for clusters with 1,000 nodes
        • 1000 for clusters with 2,000 nodes
        +

        Default: 1000

        +

        max-requests-inflight

        +

        Maximum number of concurrent non-mutating requests. When the value of this parameter is exceeded, the server rejects requests.

        +

        The value 0 indicates no limitation.

        +

        Manual configuration is no longer supported since cluster v1.21. The value is automatically specified based on the cluster scale.

        +
        • 400 for clusters with 50 or 200 nodes
        • 1000 for clusters with 1,000 nodes
        • 2000 for clusters with 2,000 nodes
        +

        Default: 2000

        +

        service-node-port-range

        +

        Range of node port numbers.

        +

        Default:

        +

        30000-32767

        +

        Options:

        +

        min>20105

        +

        max<32768

        +

        support-overload

        +

        Cluster overload control. If enabled, concurrent requests are dynamically controlled based on the resource pressure of master nodes to keep them and the cluster available.

        +
        • false: Overload control is disabled.
        • true: Overload control is enabled.
        +
        +
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 3 kube-controller-manager parameters

        Parameter

        +

        Description

        +

        Value

        +

        concurrent-deployment-syncs

        +

        Number of Deployments that are allowed to synchronize concurrently.

        +

        Default: 5

        +

        concurrent-endpoint-syncs

        +

        Number of endpoints that are allowed to synchronize concurrently.

        +

        Default: 5

        +

        concurrent-gc-syncs

        +

        Number of garbage collector workers that are allowed to synchronize concurrently.

        +

        Default: 20

        +

        concurrent-job-syncs

        +

        Number of jobs that can be synchronized at the same time.

        +

        Default: 5

        +

        concurrent-namespace-syncs

        +

        Number of namespaces that are allowed to synchronize concurrently.

        +

        Default: 10

        +

        concurrent-replicaset-syncs

        +

        Number of ReplicaSets that are allowed to synchronize concurrently.

        +

        Default: 5

        +

        concurrent-resource-quota-syncs

        +

        Number of resource quotas that are allowed to synchronize concurrently.

        +

        Default: 5

        +

        concurrent-service-syncs

        +

        Number of Services that are allowed to synchronize concurrently.

        +

        Default: 10

        +

        concurrent-serviceaccount-token-syncs

        +

        Number of service account tokens that are allowed to synchronize concurrently.

        +

        Default: 5

        +

        concurrent-ttl-after-finished-syncs

        +

        Number of TTL-after-finished controller workers that are allowed to synchronize concurrently.

        +

        Default: 5

        +

        concurrent_rc_syncs

        +

        Number of replication controllers that are allowed to synchronize concurrently.

        +

        Default: 5

        +

        horizontal-pod-autoscaler-sync-period

        +

        How often HPA audits metrics in a cluster.

        +

        Default: 15 seconds

        +

        kube-api-qps

        +

        Query per second (QPS) to use while talking with kube-apiserver.

        +

        Default: 100

        +

        kube-api-burst

        +

        Burst to use while talking with kube-apiserver.

        +

        Default: 100

        +

        terminated-pod-gc-threshold

        +

        Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods.

        +

        If <= 0, the terminated pod garbage collector is disabled.

        +

        Default: 1000

        +
        +
        + +
        + + + + + + + + + + + + + +
        Table 4 kube-scheduler parameters

        Parameter

        +

        Description

        +

        Value

        +

        kube-api-qps

        +

        Query per second (QPS) to use while talking with kube-apiserver.

        +

        Default: 100

        +

        kube-api-burst

        +

        Burst to use while talking with kube-apiserver.

        +

        Default: 100

        +
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 5 eni parameters (supported only by CCE Turbo clusters)

        Parameter

        +

        Description

        +

        Value

        +

        nic-minimum-target

        +

        Minimum number of ENIs bound to a node at the cluster level

        +

        Default: 10

        +

        nic-maximum-target

        +

        Maximum number of ENIs pre-bound to a node at the cluster level

        +

        Default: 0

        +

        nic-warm-target

        +

        Number of ENIs pre-bound to a node at the cluster level

        +

        Default: 2

        +

        nic-max-above-warm-target

        +

        Reclaim number of ENIs pre-bound to a node at the cluster level

        +

        Default: 2

        +

        prebound-subeni-percentage

        +

        Low threshold of the number of bound ENIs:High threshold of the number of bound ENIs

        +
        NOTE:

        This parameter is discarded. Use the other four dynamic preheating parameters of the ENI.

        +
        +

        Default: 0:0

        +
        +
        +

      4. Click OK.
      +
      + +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0214.html b/docs/cce/umn/cce_10_0214.html new file mode 100644 index 00000000..6b4b34c0 --- /dev/null +++ b/docs/cce/umn/cce_10_0214.html @@ -0,0 +1,20 @@ + + +

      Hibernating and Waking Up a Cluster

      +

      Scenario

      If you do not need to use a cluster temporarily, you are advised to hibernate the cluster.

      +

      After a cluster is hibernated, resources such as workloads cannot be created or managed in the cluster.

      +

      A hibernated cluster can be quickly woken up and used normally.

      +
      +

      Notes and Constraints

      During cluster wakeup, the master node may fail to be started due to insufficient resources. As a result, the cluster fails to be woken up. Wait for a while and wake up the cluster again.

      +
      +

      Hibernating a Cluster

      1. Log in to the CCE console. In the navigation pane, choose Clusters.
      2. Click next to the cluster to be hibernated.
      3. In the dialog box displayed, check the precautions and click Yes. Wait until the cluster is hibernated.
      +
      +

      Waking Up a Cluster

      1. Log in to the CCE console. In the navigation pane, choose Clusters.
      2. Click next to the cluster to be woken up.
      3. When the cluster status changes from Waking to Running, the cluster is woken up. It takes about 3 to 5 minutes to wake up the cluster.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0215.html b/docs/cce/umn/cce_10_0215.html new file mode 100644 index 00000000..9e1c9cbc --- /dev/null +++ b/docs/cce/umn/cce_10_0215.html @@ -0,0 +1,23 @@ + + +

      Upgrading a Cluster

      +
      + + diff --git a/docs/cce/umn/cce_10_0216.html b/docs/cce/umn/cce_10_0216.html new file mode 100644 index 00000000..c0c4c3ac --- /dev/null +++ b/docs/cce/umn/cce_10_0216.html @@ -0,0 +1,76 @@ + + +

      Creating a DaemonSet

      +

      Scenario

      CCE provides deployment and management capabilities for multiple types of containers and supports features of container workloads, including creation, configuration, monitoring, scaling, upgrade, uninstall, service discovery, and load balancing.

      +

      DaemonSet ensures that only one pod runs on all or some nodes. When a node is added to a cluster, a new pod is also added for the node. When a node is removed from a cluster, the pod is also reclaimed. If a DaemonSet is deleted, all pods created by it will be deleted.

      +

      The typical application scenarios of a DaemonSet are as follows:

      +
      • Run the cluster storage daemon, such as glusterd or Ceph, on each node.
      • Run the log collection daemon, such as Fluentd or Logstash, on each node.
      • Run the monitoring daemon, such as Prometheus Node Exporter, collectd, Datadog agent, New Relic agent, or Ganglia (gmond), on each node.
      +

      You can deploy a DaemonSet for each type of daemons on all nodes, or deploy multiple DaemonSets for the same type of daemons. In the second case, DaemonSets have different flags and different requirements on memory and CPU for different hardware types.

      +
      +

      Prerequisites

      You must have one cluster available before creating a DaemonSet. For details on how to create a cluster, see Creating a CCE Cluster.

      +
      +

      Using the CCE Console

      1. Log in to the CCE console.
      2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
      3. Set basic information about the workload.

        Basic Info
        • Workload Type: Select DaemonSet. For details about workload types, see Overview.
        • Workload Name: Enter the name of the workload.
        • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
        • Container Runtime: A CCE cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences between runC and Kata, see Kata Containers and Common Containers.
        • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
        +
        +
        Container Settings
        • Container Information
          Multiple containers can be configured in a pod. You can click Add Container on the right to configure multiple containers for the pod. +
          +
        • Image Access Credential: Select the credential used for accessing the image repository. The default value is default-secret. You can use default-secret to access images in SWR. For details about default-secret, see default-secret.
        • GPU graphics card: All is selected by default. The workload instance will be scheduled to the node with the specified GPU graphics card type.
        +
        +

        Service Settings

        +

        A Service is used for pod access. With a fixed IP address, a Service forwards access traffic to pods and performs load balancing for these pods.

        +

        You can also create a Service after creating a workload. For details about the Service, see Service Overview.

        +
        Advanced Settings +
        +

      4. Click Create Workload in the lower right corner.
      +
      +

      Using kubectl

      The following procedure uses Nginx as an example to describe how to create a workload using kubectl.

      +
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create and edit the nginx-daemonset.yaml file. nginx-daemonset.yaml is an example file name, and you can change it as required.

        vi nginx-daemonset.yaml

        +

        The content of the description file is as follows: The following provides an example. For more information on DaemonSets, see Kubernetes documents.

        +
        apiVersion: apps/v1
        +kind: DaemonSet
        +metadata:
        +  name: nginx-daemonset
        +  labels:
        +    app: nginx-daemonset
        +spec:
        +  selector:
        +    matchLabels:
        +      app: nginx-daemonset
        +  template:
        +    metadata:
        +      labels:
        +        app: nginx-daemonset
        +    spec:
        +      nodeSelector:                 # Node selection. A pod is created on a node only when the node meets daemon=need.
        +        daemon: need
        +      containers:
        +      - name: nginx-daemonset
        +        image: nginx:alpine
        +        resources:
        +          limits:
        +            cpu: 250m
        +            memory: 512Mi
        +          requests:
        +            cpu: 250m
        +            memory: 512Mi
        +      imagePullSecrets:
        +      - name: default-secret
        +

        The replicas parameter used in defining a Deployment or StatefulSet does not exist in the above configuration for a DaemonSet, because each node has only one replica. It is fixed.

        +

        The nodeSelector in the preceding pod template specifies that a pod is created only on the nodes that meet daemon=need, as shown in the following figure. If you want to create a pod on each node, delete the label.

        +

      3. Create a DaemonSet.

        kubectl create -f nginx-daemonset.yaml

        +

        If the following information is displayed, the DaemonSet is being created.

        +
        daemonset.apps/nginx-daemonset created
        +

      4. Query the DaemonSet status.

        kubectl get ds

        +
        $ kubectl get ds
        +NAME              DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
        +nginx-daemonset   1         1         0       1            0           daemon=need     116s
        +

      5. If the workload will be accessed through a ClusterIP or NodePort Service, set the corresponding workload access type. For details, see Networking.
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0222.html b/docs/cce/umn/cce_10_0222.html new file mode 100644 index 00000000..cbe4ccd7 --- /dev/null +++ b/docs/cce/umn/cce_10_0222.html @@ -0,0 +1,411 @@ + + +

      Managing a Node Pool

      +

      Notes and Constraints

      The default node pool DefaultPool does not support the following management operations.

      +
      +

      Configuring Kubernetes Parameters

      CCE allows you to highly customize Kubernetes parameter settings on core components in a cluster. For more information, see kubelet.

      +

      This function is supported only in clusters of v1.15 and later. It is not displayed for clusters earlier than v1.15.

      +
      1. Log in to the CCE console.
      2. Click the cluster name and access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
      3. Choose More > Manage next to the node pool name.
      4. On the Manage Component page on the right, change the values of the following Kubernetes parameters:

        +

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 1 kubelet

        Parameter

        +

        Description

        +

        Default Value

        +

        Remarks

        +

        cpu-manager-policy

        +

        Specifies the CPU core binding configuration. For details, see CPU Core Binding.

        +
        • none: disables pods from exclusively occupying CPUs. Select this value if you want a large pool of shareable CPU cores.
        • static: enables pods to exclusively occupy CPUs. Select this value if your workload is sensitive to latency in CPU cache and scheduling.
        +

        none

        +

        The values can be modified during the node pool lifecycle.

        +

        kube-api-qps

        +

        Query per second (QPS) to use while talking with kube-apiserver.

        +

        100

        +

        kube-api-burst

        +

        Burst to use while talking with kube-apiserver.

        +

        100

        +

        max-pods

        +

        Maximum number of pods managed by kubelet.

        +

        40

        +

        20

        +

        pod-pids-limit

        +

        PID limit in Kubernetes

        +

        -1

        +

        with-local-dns

        +

        Whether to use the local IP address as the ClusterDNS of the node.

        +

        false

        +

        event-qps

        +

        QPS limit for event creation

        +

        5

        +

        allowed-unsafe-sysctls

        +

        Insecure system configuration allowed.

        +

        Starting from v1.17.17, CCE enables pod security policies for kube-apiserver. You need to add corresponding configurations to allowedUnsafeSysctls of a pod security policy to make the policy take effect. (This configuration is not required for clusters earlier than v1.17.17.) For details, see Example of Enabling Unsafe Sysctls in Pod Security Policy.

        +

        []

        +

        over-subscription-resource

        +

        Whether to enable node oversubscription.

        +

        If this parameter is set to true, the node oversubscription feature is enabled. For details, see Hybrid Deployment of Online and Offline Jobs.

        +

        true

        +

        -

        +

        colocation

        +

        Whether to enable node hybrid deployment.

        +

        If this parameter is set to true, the node hybrid deployment feature is enabled. For details, see Hybrid Deployment of Online and Offline Jobs.

        +

        true

        +

        -

        +

        kube-reserved-mem

        +

        system-reserved-mem

        +

        Reserved node memory.

        +

        Depends on node specifications. For details, see Formula for Calculating the Reserved Resources of a Node.

        +

        The sum of kube-reserved-mem and system-reserved-mem is less than half of the memory.

        +

        topology-manager-policy

        +

        Set the topology management policy.

        +

        Valid values are as follows:

        +
        • restricted: kubelet accepts only pods that achieve optimal NUMA alignment on the requested resources.
        • best-effort: kubelet preferentially selects pods that implement NUMA alignment on CPU and device resources.
        • none (default): The topology management policy is disabled.
        • single-numa-node: kubelet allows only pods that are aligned to the same NUMA node in terms of CPU and device resources.
        +

        none

        +

        The values can be modified during the node pool lifecycle.

        +
        NOTICE:

        Exercise caution when modifying topology-manager-policy and topology-manager-scope will restart kubelet and recalculate the resource allocation of pods based on the modified policy. As a result, running pods may restart or even fail to receive any resources.

        +

        +
        +

        topology-manager-scope

        +

        Set the resource alignment granularity of the topology management policy. Valid values are as follows:

        +
        • container (default)
        • pod
        +

        container

        +

        resolv-conf

        +

        DNS resolution configuration file specified by the container

        +

        The default value is null.

        +

        -

        +
        +
        + +
        + + + + + + + + + + + + + + + +
        Table 2 kube-proxy

        Parameter

        +

        Description

        +

        Default Value

        +

        Remarks

        +

        conntrack-min

        +

        sysctl -w net.nf_conntrack_max

        +

        131072

        +

        The values can be modified during the node pool lifecycle.

        +

        conntrack-tcp-timeout-close-wait

        +

        sysctl -w net.netfilter.nf_conntrack_tcp_timeout_close_wait

        +

        1h0m0s

        +
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 3 Network components (available only for CCE Turbo clusters)

        Parameter

        +

        Description

        +

        Default Value

        +

        Remarks

        +

        nic-threshold

        +

        Low threshold of the number of bound ENIs:High threshold of the number of bound ENIs

        +
        NOTE:

        This parameter is being discarded. Use the dynamic pre-binding parameters of the other four ENIs.

        +
        +

        Default: 0:0

        +

        -

        +

        nic-minimum-target

        +

        Minimum number of ENIs bound to the nodes in the node pool

        +

        Default: 10

        +

        -

        +

        nic-maximum-target

        +

        Maximum number of ENIs pre-bound to a node at the node pool level

        +

        Default: 0

        +

        -

        +

        nic-warm-target

        +

        Number of ENIs pre-bound to a node at the node pool level

        +

        Default: 2

        +

        -

        +

        nic-max-above-warm-target

        +

        Reclaim number of ENIs pre-bound to a node at the node pool level

        +

        Default: 2

        +

        -

        +
        +
        + +
        + + + + + + + + + + + +
        Table 4 Pod security group in a node pool (available only for CCE Turbo clusters)

        Parameter

        +

        Description

        +

        Default Value

        +

        Remarks

        +

        security_groups_for_nodepool

        +
        • Default security group used by pods in a node pool. You can enter the security group ID. If this parameter is not set, the default security group of the cluster container network is used. A maximum of five security group IDs can be specified at the same time, separated by semicolons (;).
        • The priority of the security group is lower than that of the security group configured for the SecurityGroup resource object.
        +

        -

        +

        -

        +
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 5 Docker (available only for node pools that use Docker)

        Parameter

        +

        Description

        +

        Default Value

        +

        Remarks

        +

        native-umask

        +

        `--exec-opt native.umask

        +

        normal

        +

        Cannot be changed.

        +

        docker-base-size

        +

        `--storage-opts dm.basesize

        +

        0

        +

        Cannot be changed.

        +

        insecure-registry

        +

        Address of an insecure image registry

        +

        false

        +

        Cannot be changed.

        +

        limitcore

        +

        Maximum size of a core file in a container. The unit is byte.

        +

        5368709120

        +

        -

        +

        default-ulimit-nofile

        +

        Limit on the number of handles in a container

        +

        {soft}:{hard}

        +

        The value cannot exceed the value of the kernel parameter nr_open and cannot be a negative number.

        +

        You can run the following command to obtain the kernel parameter nr_open:

        +
        sysctl -a | grep nr_open
        +
        +
        +

      5. Click OK.
      +
      +

      Editing a Node Pool

      1. Log in to the CCE console.
      2. Click the cluster name and access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
      3. Click Edit next to the name of the node pool you will edit. In the Edit Node Pool page, edit the following parameters:

        Basic Settings +
        + + + + + + + + + + +
        Table 6 Basic settings

        Parameter

        +

        Description

        +

        Node Pool Name

        +

        Name of the node pool.

        +

        Auto Scaling

        +

        By default, this parameter is disabled.

        +

        After you enable autoscaler by clicking , nodes in the node pool are automatically created or deleted based on service requirements.

        +
        • Maximum Nodes and Minimum Nodes: You can set the maximum and minimum number of nodes to ensure that the number of nodes to be scaled is within a proper range.
        • Priority: A larger value indicates a higher priority. For example, if this parameter is set to 1 and 4 respectively for node pools A and B, B has a higher priority than A, and auto scaling is first triggered for B. If the priorities of multiple node pools are set to the same value, for example, 2, the node pools are not prioritized and the system performs scaling based on the minimum resource waste principle.
        • Cooldown Period: Required. The unit is minute. This parameter indicates the interval between the previous scale-out action and the next scale-in action.
        +

        If the Autoscaler field is set to on, install the autoscaler add-on to use the autoscaler feature.

        +
        +
        +
        +
        Advanced Settings +
        + + + + + + + + + + + + + + + + +
        Table 7 Advanced settings

        Parameter

        +

        Description

        +

        K8s label

        +

        Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

        +

        Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

        +
        NOTE:

        After a K8s label is modified, the inventory nodes in the node pool are updated synchronously.

        +
        +

        Resource Tag

        +

        You can add resource tags to classify resources.

        +

        You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use these tags to improve tagging and resource migration efficiency.

        +

        CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

        +
        NOTE:

        After a resource tag is modified, the modification automatically takes effect when a node is added. For existing nodes, you need to manually reset the nodes for the modification to take effect.

        +
        +

        Taint

        +
        This field is left blank by default. You can add taints to set anti-affinity for the node. A maximum of 10 taints are allowed for each node. Each taint contains the following parameters:
        • Key: A key must contain 1 to 63 characters starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
        • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
        • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
        +
        +

        For details, see Managing Node Taints.

        +
        NOTE:

        After a taint is modified, the inventory nodes in the node pool are updated synchronously.

        +
        +

        Edit Key pair

        +

        Only node pools that use key pairs for login support key pair editing. You can select another key pair.

        +
        NOTE:

        The edited key pair automatically takes effect when a node is added. For existing nodes, you need to manually reset the nodes for the key pair to take effect.

        +
        +
        +
        +
        +

      4. Click OK.

        In the node pool list, the node pool status becomes Scaling. After the status changes to Completed, the node pool parameters are modified successfully. The modified configuration will be synchronized to all nodes in the node pool.

        +

      +
      +

      Deleting a Node Pool

      Deleting a node pool will delete nodes in the pool. Pods on these nodes will be automatically migrated to available nodes in other node pools. If pods in the node pool have a specific node selector and none of the other nodes in the cluster satisfies the node selector, the pods will become unschedulable.

      +
      1. Log in to the CCE console.
      2. Click the cluster name and access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
      3. Choose More > Delete next to a node pool name to delete the node pool.
      4. Read the precautions in the Delete Node Pool dialog box.
      5. In the text box, click Yes to confirm that you want to continue the deletion.
      +
      +

      Copying a Node Pool

      You can copy the configuration of an existing node pool to create a new node pool on the CCE console.

      +
      1. Log in to the CCE console.
      2. Click the cluster name and access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
      3. Choose More > Copy next to a node pool name to copy the node pool.
      4. The configurations of the selected node pool are replicated to the Clone Node Pool page. You can edit the configurations as required and click Next: Confirm.
      5. On the Confirm page, confirm the node pool configuration and click Create Now. Then, a new node pool is created based on the edited configuration.
      +
      +

      Migrating a Node

      Nodes in a node pool can be migrated. Currently, nodes in a node pool can be migrated only to the default node pool (defaultpool) in the same cluster.

      +
      1. Log in to the CCE console and click the cluster name to access the cluster.
      2. In the navigation pane, choose Nodes and switch to the Node Pools tab page.
      3. Click View Node in the Operation column of the node pool to be migrated.
      4. Select the nodes to be migrated and choose More > Migrate to migrate the nodes to the default node pool in batches.

        You can also choose More > Migrate in the Operation column of a single node to migrate the node.

        +

        +

      5. In the displayed Migrate Node window, confirm the information.

        The migration has no impacts on the original resource tags, Kubernetes labels, and taints of the node.

        +
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0232.html b/docs/cce/umn/cce_10_0232.html new file mode 100644 index 00000000..28cd9a17 --- /dev/null +++ b/docs/cce/umn/cce_10_0232.html @@ -0,0 +1,397 @@ + + +

      Scheduling Policy (Affinity/Anti-affinity)

      +

      A nodeSelector provides a very simple way to constrain pods to nodes with particular labels, as mentioned in Creating a DaemonSet. The affinity and anti-affinity feature greatly expands the types of constraints you can express.

      +

      Kubernetes supports node-level and pod-level affinity and anti-affinity. You can configure custom rules to achieve affinity and anti-affinity scheduling. For example, you can deploy frontend pods and backend pods together, deploy the same type of applications on a specific node, or deploy different applications on different nodes.

      +

      Node Affinity (nodeAffinity)

      Labels are the basis of affinity rules. Let's look at the labels on nodes in a cluster.

      +
      $ kubectl describe node 192.168.0.212
      +Name:               192.168.0.212
      +Roles:              <none>
      +Labels:             beta.kubernetes.io/arch=amd64
      +                    beta.kubernetes.io/os=linux
      +                    failure-domain.beta.kubernetes.io/is-baremetal=false
      +                    failure-domain.beta.kubernetes.io/region=******
      +                    failure-domain.beta.kubernetes.io/zone=******
      +                    kubernetes.io/arch=amd64
      +                    kubernetes.io/availablezone=******
      +                    kubernetes.io/eniquota=12
      +                    kubernetes.io/hostname=192.168.0.212
      +                    kubernetes.io/os=linux
      +                    node.kubernetes.io/subnetid=fd43acad-33e7-48b2-a85a-24833f362e0e
      +                    os.architecture=amd64
      +                    os.name=EulerOS_2.0_SP5
      +                    os.version=3.10.0-862.14.1.5.h328.eulerosv2r7.x86_64
      +

      These labels are automatically added by CCE during node creation. The following describes a few that are frequently used during scheduling.

      +
      • failure-domain.beta.kubernetes.io/region: region where the node is located.
      • failure-domain.beta.kubernetes.io/zone: availability zone to which the node belongs.
      • kubernetes.io/hostname: host name of the node.
      +

      When you deploy pods, you can use a nodeSelector, as described in DaemonSet, to constrain pods to nodes with specific labels. The following example shows how to use a nodeSelector to deploy pods only on the nodes with the gpu=true label.

      +
      apiVersion: v1
      +kind: Pod
      +metadata:
      +  name: nginx
      +spec:
      +  nodeSelector:                 # Node selection. A pod is deployed on a node only when the node has the gpu=true label.
      +    gpu: true
      +...
      +
      Node affinity rules can achieve the same results, as shown in the following example.
      apiVersion: apps/v1
      +kind: Deployment
      +metadata:
      +  name:  gpu
      +  labels:
      +    app:  gpu
      +spec:
      +  selector:
      +    matchLabels:
      +      app: gpu
      +  replicas: 3
      +  template:
      +    metadata:
      +      labels:
      +        app:  gpu
      +    spec:
      +      containers:
      +      - image:  nginx:alpine
      +        name:  gpu
      +        resources:
      +          requests:
      +            cpu: 100m
      +            memory: 200Mi
      +          limits:
      +            cpu: 100m
      +            memory: 200Mi
      +      imagePullSecrets:
      +      - name: default-secret
      +      affinity:
      +        nodeAffinity:
      +          requiredDuringSchedulingIgnoredDuringExecution:
      +            nodeSelectorTerms:
      +            - matchExpressions:
      +              - key: gpu
      +                operator: In
      +                values:
      +                - "true"
      +
      +

      Even though the node affinity rule requires more lines, it is more expressive, which will be further described later.

      +

      requiredDuringSchedulingIgnoredDuringExecution seems to be complex, but it can be easily understood as a combination of two parts.

      +
      • requiredDuringScheduling indicates that pods can be scheduled to the node only when all the defined rules are met (required).
      • IgnoredDuringExecution indicates that pods already running on the node do not need to meet the defined rules. That is, a label on the node is ignored, and pods that require the node to contain that label will not be re-scheduled.
      +

      In addition, the value of operator is In, indicating that the label value must be in the values list. Other available operator values are as follows:

      +
      • NotIn: The label value is not in a list.
      • Exists: A specific label exists.
      • DoesNotExist: A specific label does not exist.
      • Gt: The label value is greater than a specified value (string comparison).
      • Lt: The label value is less than a specified value (string comparison).
      +

      Note that there is no such thing as nodeAntiAffinity because operators NotIn and DoesNotExist provide the same function.

      +

      The following describes how to check whether the rule takes effect. Assume that a cluster has three nodes.

      +
      $ kubectl get node
      +NAME            STATUS   ROLES    AGE   VERSION                            
      +192.168.0.212   Ready    <none>   13m   v1.15.6-r1-20.3.0.2.B001-15.30.2
      +192.168.0.94    Ready    <none>   13m   v1.15.6-r1-20.3.0.2.B001-15.30.2   
      +192.168.0.97    Ready    <none>   13m   v1.15.6-r1-20.3.0.2.B001-15.30.2   
      +

      Add the gpu=true label to the 192.168.0.212 node.

      +
      $ kubectl label node 192.168.0.212 gpu=true
      +node/192.168.0.212 labeled
      +
      +$ kubectl get node -L gpu
      +NAME            STATUS   ROLES    AGE   VERSION                            GPU
      +192.168.0.212   Ready    <none>   13m   v1.15.6-r1-20.3.0.2.B001-15.30.2   true
      +192.168.0.94    Ready    <none>   13m   v1.15.6-r1-20.3.0.2.B001-15.30.2   
      +192.168.0.97    Ready    <none>   13m   v1.15.6-r1-20.3.0.2.B001-15.30.2   
      +

      Create the Deployment. You can find that all pods are deployed on the 192.168.0.212 node.

      +
      $ kubectl create -f affinity.yaml 
      +deployment.apps/gpu created
      +
      +$ kubectl get pod -o wide
      +NAME                     READY   STATUS    RESTARTS   AGE   IP            NODE         
      +gpu-6df65c44cf-42xw4     1/1     Running   0          15s   172.16.0.37   192.168.0.212
      +gpu-6df65c44cf-jzjvs     1/1     Running   0          15s   172.16.0.36   192.168.0.212
      +gpu-6df65c44cf-zv5cl     1/1     Running   0          15s   172.16.0.38   192.168.0.212
      +
      +

      Node Preference Rule

      The preceding requiredDuringSchedulingIgnoredDuringExecution rule is a hard selection rule. There is another type of selection rule, that is, preferredDuringSchedulingIgnoredDuringExecution. It is used to specify which nodes are preferred during scheduling.

      +

      To achieve this effect, add a node attached with SAS disks to the cluster, add the DISK=SAS label to the node, and add the DISK=SSD label to the other three nodes.

      +
      $ kubectl get node -L DISK,gpu
      +NAME            STATUS   ROLES    AGE     VERSION                            DISK     GPU
      +192.168.0.100   Ready    <none>   7h23m   v1.15.6-r1-20.3.0.2.B001-15.30.2   SAS   
      +192.168.0.212   Ready    <none>   8h      v1.15.6-r1-20.3.0.2.B001-15.30.2   SSD      true
      +192.168.0.94    Ready    <none>   8h      v1.15.6-r1-20.3.0.2.B001-15.30.2   SSD   
      +192.168.0.97    Ready    <none>   8h      v1.15.6-r1-20.3.0.2.B001-15.30.2   SSD  
      +

      Define a Deployment. Use the preferredDuringSchedulingIgnoredDuringExecution rule to set the weight of nodes with the SSD disk installed as 80 and nodes with the gpu=true label as 20. In this way, pods are preferentially deployed on the nodes with the SSD disk installed.

      +
      apiVersion: apps/v1
      +kind: Deployment
      +metadata:
      +  name:  gpu
      +  labels:
      +    app:  gpu
      +spec:
      +  selector:
      +    matchLabels:
      +      app: gpu
      +  replicas: 10
      +  template:
      +    metadata:
      +      labels:
      +        app:  gpu
      +    spec:
      +      containers:
      +      - image:  nginx:alpine
      +        name:  gpu
      +        resources:
      +          requests:
      +            cpu:  100m
      +            memory:  200Mi
      +          limits:
      +            cpu:  100m
      +            memory:  200Mi
      +      imagePullSecrets:
      +      - name: default-secret
      +      affinity:
      +        nodeAffinity:
      +          preferredDuringSchedulingIgnoredDuringExecution:
      +          - weight: 80 
      +            preference: 
      +              matchExpressions: 
      +              - key: DISK
      +                operator: In 
      +                values: 
      +                - SSD
      +          - weight: 20 
      +            preference: 
      +              matchExpressions: 
      +              - key: gpu
      +                operator: In 
      +                values: 
      +                - "true"
      +

      After the deployment, there are five pods deployed on the node 192.168.0.212 (label: DISK=SSD and GPU=true), three pods deployed on the node 192.168.0.97 (label: DISK=SSD), and two pods deployed on the node 192.168.0.100 (label: DISK=SAS).

      +

      From the preceding output, you can find that no pods of the Deployment are scheduled to node 192.168.0.94 (label: DISK=SSD). This is because the node already has many pods on it and its resource usage is high. This also indicates that the preferredDuringSchedulingIgnoredDuringExecution rule defines a preference rather than a hard requirement.

      +
      $ kubectl create -f affinity2.yaml 
      +deployment.apps/gpu created
      +
      +$ kubectl get po -o wide
      +NAME                   READY   STATUS    RESTARTS   AGE     IP            NODE         
      +gpu-585455d466-5bmcz   1/1     Running   0          2m29s   172.16.0.44   192.168.0.212
      +gpu-585455d466-cg2l6   1/1     Running   0          2m29s   172.16.0.63   192.168.0.97 
      +gpu-585455d466-f2bt2   1/1     Running   0          2m29s   172.16.0.79   192.168.0.100
      +gpu-585455d466-hdb5n   1/1     Running   0          2m29s   172.16.0.42   192.168.0.212
      +gpu-585455d466-hkgvz   1/1     Running   0          2m29s   172.16.0.43   192.168.0.212
      +gpu-585455d466-mngvn   1/1     Running   0          2m29s   172.16.0.48   192.168.0.97 
      +gpu-585455d466-s26qs   1/1     Running   0          2m29s   172.16.0.62   192.168.0.97 
      +gpu-585455d466-sxtzm   1/1     Running   0          2m29s   172.16.0.45   192.168.0.212
      +gpu-585455d466-t56cm   1/1     Running   0          2m29s   172.16.0.64   192.168.0.100
      +gpu-585455d466-t5w5x   1/1     Running   0          2m29s   172.16.0.41   192.168.0.212
      +
      +

      In the preceding example, the node scheduling priority is as follows. Nodes with both SSD and gpu=true labels have the highest priority. Nodes with the SSD label but no gpu=true label have the second priority (weight: 80). Nodes with the gpu=true label but no SSD label have the third priority. Nodes without any of these two labels have the lowest priority.

      +
      Figure 1 Scheduling priority
      +

      Workload Affinity (podAffinity)

      Node affinity rules affect only the affinity between pods and nodes. Kubernetes also supports configuring inter-pod affinity rules. For example, the frontend and backend of an application can be deployed together on one node to reduce access latency. There are also two types of inter-pod affinity rules: requiredDuringSchedulingIgnoredDuringExecution and preferredDuringSchedulingIgnoredDuringExecution.

      +

      Assume that the backend of an application has been created and has the app=backend label.

      +
      $ kubectl get po -o wide
      +NAME                       READY   STATUS    RESTARTS   AGE     IP            NODE         
      +backend-658f6cb858-dlrz8   1/1     Running   0          2m36s   172.16.0.67   192.168.0.100
      +

      You can configure the following pod affinity rule to deploy the frontend pods of the application to the same node as the backend pods.

      +
      apiVersion: apps/v1
      +kind: Deployment
      +metadata:
      +  name:   frontend
      +  labels:
      +    app:  frontend
      +spec:
      +  selector:
      +    matchLabels:
      +      app: frontend
      +  replicas: 3
      +  template:
      +    metadata:
      +      labels:
      +        app:  frontend
      +    spec:
      +      containers:
      +      - image:  nginx:alpine
      +        name:  frontend
      +        resources:
      +          requests:
      +            cpu:  100m
      +            memory:  200Mi
      +          limits:
      +            cpu:  100m
      +            memory:  200Mi
      +      imagePullSecrets:
      +      - name: default-secret
      +      affinity:
      +        podAffinity:
      +          requiredDuringSchedulingIgnoredDuringExecution:
      +          - topologyKey: kubernetes.io/hostname
      +            labelSelector:
      +              matchExpressions: 
      +              - key: app
      +                operator: In 
      +                values: 
      +                - backend
      +

      Deploy the frontend and you can find that the frontend is deployed on the same node as the backend.

      +
      $ kubectl create -f affinity3.yaml 
      +deployment.apps/frontend created
      +
      +$ kubectl get po -o wide
      +NAME                        READY   STATUS    RESTARTS   AGE     IP            NODE         
      +backend-658f6cb858-dlrz8    1/1     Running   0          5m38s   172.16.0.67   192.168.0.100
      +frontend-67ff9b7b97-dsqzn   1/1     Running   0          6s      172.16.0.70   192.168.0.100
      +frontend-67ff9b7b97-hxm5t   1/1     Running   0          6s      172.16.0.71   192.168.0.100
      +frontend-67ff9b7b97-z8pdb   1/1     Running   0          6s      172.16.0.72   192.168.0.100
      +

      The topologyKey field specifies the selection range. The scheduler selects nodes within the range based on the affinity rule defined. The effect of topologyKey is not fully demonstrated in the preceding example because all the nodes have the kubernetes.io/hostname label, that is, all the nodes are within the range.

      +

      To see how topologyKey works, assume that the backend of the application has two pods, which are running on different nodes.

      +
      $ kubectl get po -o wide
      +NAME                       READY   STATUS    RESTARTS   AGE     IP            NODE         
      +backend-658f6cb858-5bpd6   1/1     Running   0          23m     172.16.0.40   192.168.0.97
      +backend-658f6cb858-dlrz8   1/1     Running   0          2m36s   172.16.0.67   192.168.0.100
      +

      Add the prefer=true label to nodes 192.168.0.97 and 192.168.0.94.

      +
      $ kubectl label node 192.168.0.97 prefer=true
      +node/192.168.0.97 labeled
      +$ kubectl label node 192.168.0.94 prefer=true
      +node/192.168.0.94 labeled
      +
      +$ kubectl get node -L prefer
      +NAME            STATUS   ROLES    AGE   VERSION                            PREFER
      +192.168.0.100   Ready    <none>   44m   v1.15.6-r1-20.3.0.2.B001-15.30.2   
      +192.168.0.212   Ready    <none>   91m   v1.15.6-r1-20.3.0.2.B001-15.30.2   
      +192.168.0.94    Ready    <none>   91m   v1.15.6-r1-20.3.0.2.B001-15.30.2   true
      +192.168.0.97    Ready    <none>   91m   v1.15.6-r1-20.3.0.2.B001-15.30.2   true
      +

      Define topologyKey in the podAffinity section as prefer.

      +
            affinity:
      +        podAffinity:
      +          requiredDuringSchedulingIgnoredDuringExecution:
      +          - topologyKey: prefer
      +            labelSelector:
      +              matchExpressions: 
      +              - key: app
      +                operator: In 
      +                values: 
      +                - backend
      +

      The scheduler recognizes the nodes with the prefer label, that is, 192.168.0.97 and 192.168.0.94, and then find the pods with the app=backend label. In this way, all frontend pods are deployed onto 192.168.0.97.

      +
      $ kubectl create -f affinity3.yaml 
      +deployment.apps/frontend created
      +
      +$ kubectl get po -o wide
      +NAME                        READY   STATUS    RESTARTS   AGE     IP            NODE         
      +backend-658f6cb858-5bpd6    1/1     Running   0          26m     172.16.0.40   192.168.0.97
      +backend-658f6cb858-dlrz8    1/1     Running   0          5m38s   172.16.0.67   192.168.0.100
      +frontend-67ff9b7b97-dsqzn   1/1     Running   0          6s      172.16.0.70   192.168.0.97
      +frontend-67ff9b7b97-hxm5t   1/1     Running   0          6s      172.16.0.71   192.168.0.97
      +frontend-67ff9b7b97-z8pdb   1/1     Running   0          6s      172.16.0.72   192.168.0.97
      +
      +

      Workload Anti-Affinity (podAntiAffinity)

      Unlike the scenarios in which pods are preferred to be scheduled onto the same node, sometimes, it could be the exact opposite. For example, if certain pods are deployed together, they will affect the performance.

      +

      The following example defines an inter-pod anti-affinity rule, which specifies that pods must not be scheduled to nodes that already have pods with the app=frontend label, that is, to deploy the pods of the frontend to different nodes with each node has only one replica.

      +
      apiVersion: apps/v1
      +kind: Deployment
      +metadata:
      +  name:   frontend
      +  labels:
      +    app:  frontend
      +spec:
      +  selector:
      +    matchLabels:
      +      app: frontend
      +  replicas: 5
      +  template:
      +    metadata:
      +      labels:
      +        app:  frontend
      +    spec:
      +      containers:
      +      - image:  nginx:alpine
      +        name:  frontend
      +        resources:
      +          requests:
      +            cpu:  100m
      +            memory:  200Mi
      +          limits:
      +            cpu:  100m
      +            memory:  200Mi
      +      imagePullSecrets:
      +      - name: default-secret
      +      affinity:
      +        podAntiAffinity:
      +          requiredDuringSchedulingIgnoredDuringExecution:
      +          - topologyKey: kubernetes.io/hostname
      +            labelSelector:
      +              matchExpressions: 
      +              - key: app
      +                operator: In 
      +                values: 
      +                - frontend
      +

      Deploy the frontend and query the deployment results. You can find that each node has only one frontend pod and one pod of the Deployment is Pending. This is because when the scheduler is deploying the fifth pod, all nodes already have one pod with the app=frontend label on them. There is no available node. Therefore, the fifth pod will remain in the Pending status.

      +
      $ kubectl create -f affinity4.yaml 
      +deployment.apps/frontend created
      +
      +$ kubectl get po -o wide
      +NAME                        READY   STATUS    RESTARTS   AGE   IP            NODE         
      +frontend-6f686d8d87-8dlsc   1/1     Running   0          18s   172.16.0.76   192.168.0.100
      +frontend-6f686d8d87-d6l8p   0/1     Pending   0          18s   <none>        <none>       
      +frontend-6f686d8d87-hgcq2   1/1     Running   0          18s   172.16.0.54   192.168.0.97 
      +frontend-6f686d8d87-q7cfq   1/1     Running   0          18s   172.16.0.47   192.168.0.212
      +frontend-6f686d8d87-xl8hx   1/1     Running   0          18s   172.16.0.23   192.168.0.94 
      +
      +

      Configuring Scheduling Policies

      1. Log in to the CCE console.
      2. When creating a workload, click Scheduling in the Advanced Settings area.

        +

        + + + + + + + + + + +
        Table 1 Node affinity settings

        Parameter

        +

        Description

        +

        Required

        +

        This is a hard rule that must be met for scheduling. It corresponds to requiredDuringSchedulingIgnoredDuringExecution in Kubernetes. Multiple required rules can be set, and scheduling will be performed if only one of them is met.

        +

        Preferred

        +

        This is a soft rule specifying preferences that the scheduler will try to enforce but will not guarantee. It corresponds to preferredDuringSchedulingIgnoredDuringExecution in Kubernetes. Scheduling is performed when one rule is met or none of the rules are met.

        +
        +
        +

      3. Under Node Affinity, Workload Affinity, and Workload Anti-Affinity, click to add scheduling policies. In the dialog box displayed, add a policy directly or by specifying a node or an AZ.

        Specifying a node or an AZ is essentially implemented through labels. The kubernetes.io/hostname label is used when you specify a node, and the failure-domain.beta.kubernetes.io/zone label is used when you specify an AZ. +
        + + + + + + + + + + + + + + + + + + + + + + +
        Table 2 Scheduling policy configuration

        Parameter

        +

        Description

        +

        Label

        +

        Node label. You can use the default label or customize a label.

        +

        Operator

        +

        The following relations are supported: In, NotIn, Exists, DoesNotExist, Gt, and Lt

        +
        • In: A label exists in the label list.
        • NotIn: A label does not exist in the label list.
        • Exists: A specific label exists.
        • DoesNotExist: A specific label does not exist.
        • Gt: The label value is greater than a specified value (string comparison).
        • Lt: The label value is less than a specified value (string comparison).
        +

        Label Value

        +

        Label value.

        +

        Namespace

        +

        This parameter is available only in a workload affinity or anti-affinity scheduling policy.

        +

        Namespace for which the scheduling policy takes effect.

        +

        Topology Key

        +

        This parameter can be used only in a workload affinity or anti-affinity scheduling policy.

        +

        Select the scope specified by topologyKey and then select the content defined by the policy.

        +

        Weight

        +

        This parameter can be set only in a Preferred scheduling policy.

        +
        +
        +
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0245.html b/docs/cce/umn/cce_10_0245.html new file mode 100644 index 00000000..6bffd14f --- /dev/null +++ b/docs/cce/umn/cce_10_0245.html @@ -0,0 +1,44 @@ + + +

      Example: Designing and Configuring Permissions for Users in a Department

      +

      Overview

      The conventional distributed task scheduling mode is being replaced by Kubernetes. CCE allows you to easily deploy, manage, and scale containerized applications in the cloud by providing support for you to use Kubernetes.

      +

      To help enterprise administrators manage resource permissions in clusters, CCE provides multi-dimensional, fine-grained permission policies and management measures. CCE permissions are described as follows:

      +
      • Cluster-level permissions: allowing a user group to perform operations on clusters, nodes, node pools, charts, and add-ons. These permissions are assigned based on IAM system policies.
      • Namespace-level permissions: allowing a user or user group to perform operations on Kubernetes resources, such as workloads, networking, storage, and namespaces. These permissions are assigned based on Kubernetes RBAC.
      +

      Cluster permissions and namespace permissions are independent of each other but must be used together. The permissions set for a user group apply to all users in the user group. When multiple permissions are added to a user or user group, they take effect at the same time (the union set is used).

      +
      +

      Permission Design

      The following uses company X as an example.

      +

      Generally, a company has multiple departments or projects, and each department has multiple members. Therefore, you need to design how permissions are to be assigned to different groups and projects, and set a user name for each member to facilitate subsequent user group and permissions configuration.

      +

      The following figure shows the organizational structure of a department in a company and the permissions to be assigned to each member:

      +

      +
      +

      Director: David

      David is a department director of company X. To assign him all CCE permissions (both cluster and namespace permissions), you need to create the cce-admin user group for David on the IAM console and assign the CCE Administrator role.

      +

      CCE Administrator: This role has all CCE permissions. You do not need to assign other permissions.

      +

      CCE FullAccess and CCE ReadOnlyAccess: These policies are related to cluster management permissions and configured only for cluster-related resources (such as clusters and nodes). You must also configure namespace permissions to perform operations on Kubernetes resources (such as workloads and Services).

      +
      +
      +

      O&M Leader: James

      James is the O&M team leader of the department. He needs the cluster permissions for all projects and the read-only permissions for all namespaces.

      +

      To assign the permissions, create a user group named cce-sre on the IAM console and add James to this user group. Then, assign CCE FullAccess to the user group cce-sre to allow it to perform operations on clusters in all projects.

      +

      Assigning Read-only Permissions on All Clusters and Namespaces to All Team Leaders and Engineers

      +

      You can create a read-only user group named read_only on the IAM console and add users to the user group.

      +
      • Although the development engineers Linda and Peter do not require cluster management permissions, they still need to view data on the CCE console. Therefore, the read-only cluster permission is required.
      • For the O&M engineer William, assign the read-only permission on clusters to him in this step.
      • The O&M team leader James already has the management permissions on all clusters. You can add him to the read_only user group to assign the read-only permission on clusters to him.
      +

      Users James, Robert, William, Linda, and Peter are added to the read_only user group.

      +

      Assign the read-only permission on clusters to the user group read_only.

      +

      Return to the CCE console, and add the read-only permission on namespaces to the user group read_only to which the five users belong. Choose Permissions on the CCE console, and assign the read-only policy to the user group read_only for each cluster.

      +

      After the setting is complete, James has the cluster management permissions for all projects and the read-only permissions on all namespaces, and the Robert, William, Linda, and Peter have the read-only permission on all clusters and namespaces.

      +
      +

      Development Team Leader: Robert

      In the previous steps, Robert has been assigned the read-only permission on all clusters and namespaces. Now, assign the administrator permissions on all namespaces to Robert.

      +

      Therefore, you need to assign the administrator permissions on all namespaces in all clusters to Robert.

      +
      +

      O&M Engineer: William

      In the previous steps, William has been assigned the read-only permission on all clusters and namespaces. He also requires the cluster management permissions. Therefore, you can log in to the IAM console, create a user group named cce-sre-b4 and assign CCE FullAccess to William.

      +

      Now, William has the cluster management permissions and the read-only permission on all namespaces.

      +
      +

      Development Engineers: Linda and Peter

      In the previous steps, Linda and Peter have been assigned the read-only permission on clusters and namespaces. Therefore, you only need to assign the edit policy to them.

      +

      By now, all the required permissions are assigned to the department members.

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0247.html b/docs/cce/umn/cce_10_0247.html new file mode 100644 index 00000000..f1bd7ceb --- /dev/null +++ b/docs/cce/umn/cce_10_0247.html @@ -0,0 +1,29 @@ + + + +

      Services

      + +

      +
      + + + diff --git a/docs/cce/umn/cce_10_0248.html b/docs/cce/umn/cce_10_0248.html new file mode 100644 index 00000000..1642d0b6 --- /dev/null +++ b/docs/cce/umn/cce_10_0248.html @@ -0,0 +1,19 @@ + + +

      Ingresses

      +
      + + diff --git a/docs/cce/umn/cce_01_0249.html b/docs/cce/umn/cce_10_0249.html similarity index 52% rename from docs/cce/umn/cce_01_0249.html rename to docs/cce/umn/cce_10_0249.html index 653ad9f1..f4c02e32 100644 --- a/docs/cce/umn/cce_01_0249.html +++ b/docs/cce/umn/cce_10_0249.html @@ -1,27 +1,26 @@ - + -

      Overview

      -

      Direct Access to a Pod

      After a pod is created, the following problems may occur if you directly access the pod:

      -
      • The pod can be deleted and recreated at any time by a controller such as a Deployment, and the result of accessing the pod becomes unpredictable.
      • The IP address of the pod is allocated only after the pod is started. Before the pod is started, the IP address of the pod is unknown.
      • An application is usually composed of multiple pods that run the same image. Accessing pods one by one is not efficient.
      -

      For example, an application uses Deployments to create the frontend and backend. The frontend calls the backend for computing, as shown in Figure 1. Three pods are running in the backend, which are independent and replaceable. When a backend pod is re-created, the new pod is assigned with a new IP address, of which the frontend pod is unaware.

      -
      Figure 1 Inter-pod access
      +

      Service Overview

      +

      Direct Access to a Pod

      After a pod is created, the following problems may occur if you directly access the pod:

      +
      • The pod can be deleted and recreated at any time by a controller such as a Deployment, and the result of accessing the pod becomes unpredictable.
      • The IP address of the pod is allocated only after the pod is started. Before the pod is started, the IP address of the pod is unknown.
      • An application is usually composed of multiple pods that run the same image. Accessing pods one by one is not efficient.
      +

      For example, an application uses Deployments to create the frontend and backend. The frontend calls the backend for computing, as shown in Figure 1. Three pods are running in the backend, which are independent and replaceable. When a backend pod is re-created, the new pod is assigned with a new IP address, of which the frontend pod is unaware.

      +
      Figure 1 Inter-pod access
      -

      Using Services for Pod Access

      Kubernetes Services are used to solve the preceding pod access problems. A Service has a fixed IP address. (When a CCE cluster is created, a Service CIDR block is set, which is used to allocate IP addresses to Services.) A Service forwards requests accessing the Service to pods based on labels, and at the same time, perform load balancing for these pods.

      -

      In the preceding example, a Service is added for the frontend pod to access the backend pods. In this way, the frontend pod does not need to be aware of the changes on backend pods, as shown in Figure 2.

      -
      Figure 2 Accessing pods through a Service
      +

      Using Services for Pod Access

      Kubernetes Services are used to solve the preceding pod access problems. A Service has a fixed IP address. (When a CCE cluster is created, a Service CIDR block is set, which is used to allocate IP addresses to Services.) A Service forwards requests accessing the Service to pods based on labels, and at the same time, perform load balancing for these pods.

      +

      In the preceding example, a Service is added for the frontend pod to access the backend pods. In this way, the frontend pod does not need to be aware of the changes on backend pods, as shown in Figure 2.

      +
      Figure 2 Accessing pods through a Service
      -

      Service Types

      Kubernetes allows you to specify a Service of a required type. The values and actions of different types of Services are as follows:

      -
      • ClusterIP

        A ClusterIP Service allows workloads in the same cluster to use their cluster-internal domain names to access each other.

        +

        Service Types

        Kubernetes allows you to specify a Service of a required type. The values and actions of different types of Services are as follows:

        +
        • ClusterIP

          A ClusterIP Service allows workloads in the same cluster to use their cluster-internal domain names to access each other.

        -
        • NodePort

          A NodePort Service is exposed on each node's IP at a static port. A ClusterIP Service, to which the NodePort Service routes, is automatically created. By requesting <NodeIP>:<NodePort>, you can access a NodePort Service from outside the cluster.

          -
        • LoadBalancer

          A workload can be accessed from public networks through a load balancer, which is more secure and reliable than EIP.

          -
        • ENI LoadBalancer

          An ENI LoadBalancer Service directs traffic from a load balancer at backend pods, reducing the latency and avoiding performance loss for containerized applications.

          +
          • NodePort

            A NodePort Service is exposed on each node's IP at a static port. A ClusterIP Service, to which the NodePort Service routes, is automatically created. By requesting <NodeIP>:<NodePort>, you can access a NodePort Service from outside the cluster.

            +
          • LoadBalancer

            A workload can be accessed from public networks through a load balancer, which is more secure and reliable than EIP.

      diff --git a/docs/cce/umn/cce_10_0251.html b/docs/cce/umn/cce_10_0251.html new file mode 100644 index 00000000..82ed2b1b --- /dev/null +++ b/docs/cce/umn/cce_10_0251.html @@ -0,0 +1,42 @@ + + +

      Using ELB Ingresses on the Console

      +

      Prerequisites

      • An ingress provides network access for backend workloads. Ensure that a workload is available in a cluster. If no workload is available, deploy a workload by referring to Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet.
      • A NodePort Service has been configured for the workload. For details about how to configure the Service, see NodePort.
      • Dedicated load balancers must be the application type (HTTP/HTTPS) supporting private networks (with a private IP).
      • In ELB passthrough networking (CCE Turbo cluster + dedicated load balancer), ELB Ingress supports ClusterIP Services. In other scenarios, ELB Ingress supports NodePort Services.
      +
      +

      Precautions

      • It is recommended that other resources not use the load balancer automatically created by an ingress. Otherwise, the load balancer will be occupied when the ingress is deleted, resulting in residual resources.
      • After an ingress is created, upgrade and maintain the configuration of the selected load balancers on the CCE console. Do not modify the configuration on the ELB console. Otherwise, the ingress service may be abnormal.
      • The URL registered in an ingress forwarding policy must be the same as the URL exposed by the backend Service. Otherwise, a 404 error will be returned.
      • In a cluster using the IPVS proxy mode, if the ingress and Service use the same ELB load balancer, the ingress cannot be accessed from the nodes and containers in the cluster because kube-proxy mounts the LoadBalancer Service address to the ipvs-0 bridge. This bridge intercepts the traffic of the load balancer connected to the ingress. You are advised to use different ELB load balancers for the ingress and Service.
      +
      +

      Adding an ELB Ingress

      This section uses an Nginx workload as an example to describe how to add an ELB ingress.

      +
      1. Log in to the CCE console and access the cluster console.
      2. Choose Networking in the navigation pane, click the Ingresses tab, and click Create Service in the upper right corner.
      3. Set ingress parameters.

        • Name: Specify a name of an ingress, for example, ingress-demo.
        • Load Balancer

          Select the load balancer to interconnect. Only load balancers in the same VPC as the cluster are supported. If no load balancer is available, click Create Load Balancer to create one on the ELB console.

          +

          Dedicated load balancers must support HTTP and the network type must support private networks.

          +
        • Listener Configuration: Ingress configures a listener for the load balancer, which listens to requests from the load balancer and distributes traffic. After the configuration is complete, a listener is created on the load balancer. The default listener name is k8s__<Protocol type>_<Port number>, for example, k8s_HTTP_80.
          • Front-End Protocol: HTTP and HTTPS are available.
          • External Port: Port number that is open to the ELB service address. The port number can be specified randomly.
          • Server Certificate: When an HTTPS listener is created for a load balancer, you need to bind a certificate to the load balancer to support encrypted authentication for HTTPS data transmission.

            If there is already an HTTPS ingress for the chosen port on the load balancer, the certificate of the new HTTPS ingress must be the same as the certificate of the existing ingress. This means that a listener has only one certificate. If two certificates, each with a different ingress, are added to the same listener of the same load balancer, only the certificate added earliest takes effect on the load balancer.

            +
            +
          • SNI: Server Name Indication (SNI) is an extended protocol of TLS. It allows multiple TLS-based access domain names to be provided for external systems using the same IP address and port. Different domain names can use different security certificates. After SNI is enabled, the client is allowed to submit the requested domain name when initiating a TLS handshake request. After receiving the TLS request, the load balancer searches for the certificate based on the domain name in the request. If the certificate corresponding to the domain name is found, the load balancer returns the certificate for authorization. Otherwise, the default certificate (server certificate) is returned for authorization.
            • The SNI option is available only when HTTPS is selected.
            +
            • This function is supported only for clusters of v1.15.11 and later.
            • Specify the domain name for the SNI certificate. Only one domain name can be specified for each certificate. Wildcard-domain certificates are supported.
            +
            +
          • Security Policy: combinations of different TLS versions and supported cipher suites available to HTTPS listeners.

            For details about security policies, see ELB User Guide.

            +
            • Security Policy is available only when HTTPS is selected.
            • This function is supported only for clusters of v1.17.9 and later.
            +
            +
          +
        • Forwarding Policies: When the access address of a request matches the forwarding policy (a forwarding policy consists of a domain name and URL, for example, 10.117.117.117:80/helloworld), the request is forwarded to the corresponding target Service for processing. Click Add Forwarding Policies to add multiple forwarding policies.
          • Domain Name: actual domain name. Ensure that the domain name has been registered and archived. Once a domain name rule is configured, you must use the domain name for access.
          • URL Matching Rule:
            • Prefix match: If the URL is set to /healthz, the URL that meets the prefix can be accessed. For example, /healthz/v1 and /healthz/v2.
            • Exact match: The URL can be accessed only when it is fully matched. For example, if the URL is set to /healthz, only /healthz can be accessed.
            • Regular expression: The URL is matched based on the regular expression. For example, if the regular expression is /[A-Za-z0-9_.-]+/test, all URLs that comply with this rule can be accessed, for example, /abcA9/test and /v1-Ab/test. Two regular expression standards are supported: POSIX and Perl.
            +
          • URL: access path to be registered, for example, /healthz.

            The URL added here must exist in the backend application. Otherwise, the forwarding fails.

            +

            For example, the default access URL of the Nginx application is /usr/share/nginx/html. When adding /test to the ingress forwarding policy, ensure that your Nginx application contains the same URL, that is, /usr/share/nginx/html/test, otherwise, 404 is returned.

            +
            +
          • Destination Service: Select an existing Service or create a Service. Services that do not meet search criteria are automatically filtered out.
          • Destination Service Port: Select the access port of the destination Service.
          • Set ELB:
            • Distribution Policy: Three algorithms are available: weighted round robin, weighted least connections algorithm, or source IP hash.
              • Weighted round robin: Requests are forwarded to different servers based on their weights, which indicate server processing performance. Backend servers with higher weights receive proportionately more requests, whereas equal-weighted servers receive the same number of requests. This algorithm is often used for short connections, such as HTTP services.
              • Weighted least connections: In addition to the weight assigned to each server, the number of connections processed by each backend server is also considered. Requests are forwarded to the server with the lowest connections-to-weight ratio. Building on least connections, the weighted least connections algorithm assigns a weight to each server based on their processing capability. This algorithm is often used for persistent connections, such as database connections.
              • Source IP hash: The source IP address of each request is calculated using the hash algorithm to obtain a unique hash key, and all backend servers are numbered. The generated key allocates the client to a particular server. This enables requests from different clients to be distributed in load balancing mode and ensures that requests from the same client are forwarded to the same server. This algorithm applies to TCP connections without cookies.
              +
              +
            • Type: This function is disabled by default. You can select Load balancer cookie.
            • Health Check: This function is disabled by default. The health check is for the load balancer. When TCP is selected during the port settings, you can choose either TCP or HTTP. Currently, UDP is not supported. By default, the service port (Node Port and container port of the Service) is used for health check. You can also specify another port for health check. After the port is specified, a service port named cce-healthz will be added for the Service.
            +
          • Operation: Click Delete to delete the configuration.
          +
        • Annotation: Ingresses provide some advanced CCE functions, which are implemented by annotations. When you use kubectl to create a container, annotations will be used. For details, see Creating an Ingress - Automatically Creating a Load Balancer and Creating an Ingress - Interconnecting with an Existing Load Balancer.
        +

      4. After the configuration is complete, click OK. After the ingress is created, it is displayed in the ingress list.

        On the ELB console, you can view the ELB automatically created through CCE. The default name is cce-lb-ingress.UID. Click the ELB name to access its details page. On the Listeners tab page, view the route settings of the ingress, including the URL, listener port, and backend server group port.

        +

        After the ingress is created, upgrade and maintain the selected load balancer on the CCE console. Do not maintain the load balancer on the ELB console. Otherwise, the ingress service may be abnormal.

        +
        +

      5. Access the /healthz interface of the workload, for example, workload defaultbackend.

        1. Obtain the access address of the /healthz interface of the workload. The access address consists of the load balancer IP address, external port, and mapping URL, for example, 10.**.**.**:80/healthz.
        2. Enter the URL of the /healthz interface, for example, http://10.**.**.**:80/healthz, in the address box of the browser to access the workload, as shown in Figure 1.
          Figure 1 Accessing the /healthz interface of defaultbackend
          +
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0252.html b/docs/cce/umn/cce_10_0252.html new file mode 100644 index 00000000..db0da4fc --- /dev/null +++ b/docs/cce/umn/cce_10_0252.html @@ -0,0 +1,863 @@ + + +

      Using kubectl to Create an ELB Ingress

      +

      Scenario

      This section uses an Nginx workload as an example to describe how to create an ELB ingress using kubectl.

      + +
      +

      Prerequisites

      • An ingress provides network access for backend workloads. Ensure that a workload is available in a cluster. If no workload is available, deploy a sample Nginx workload by referring to Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet.
      • A NodePort Service has been configured for the workload. For details about how to configure the Service, see NodePort.
      • Dedicated load balancers must be the application type (HTTP/HTTPS) supporting private networks (with a private IP).
      +
      +

      Ingress Description of networking.k8s.io/v1

      In CCE clusters of v1.23 or later, the ingress version is switched to networking.k8s.io/v1.

      +

      Compared with v1beta1, v1 has the following differences in parameters:

      +
      • The ingress type is changed from kubernetes.io/ingress.class in annotations to spec.ingressClassName.
      • The format of backend is changed.
      • The pathType parameter must be specified for each path. The options are as follows:
        • ImplementationSpecific: The matching method depends on Ingress Controller. The matching method defined by ingress.beta.kubernetes.io/url-match-mode is used in CCE, which is the same as v1beta1.
        • Exact: exact matching of the URL, which is case-sensitive.
        • Prefix: matching based on the URL prefix separated by a slash (/). The match is case-sensitive, and elements in the path are matched one by one. A path element refers to a list of labels in the path separated by a slash (/).
        +
      +

      +
      +

      Creating an Ingress - Automatically Creating a Load Balancer

      The following describes how to run the kubectl command to automatically create a load balancer when creating an ingress.

      +
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create a YAML file named ingress-test.yaml. The file name can be customized.

        vi ingress-test.yaml

        +

        Starting from cluster v1.23, the ingress version is switched from networking.k8s.io/v1beta1 to networking.k8s.io/v1. For details about the differences between v1 and v1beta1, see Ingress Description of networking.k8s.io/v1.

        +
        +
        Example of a shared load balancer (public network access) for clusters of v1.23 or later:
        apiVersion: networking.k8s.io/v1
        +kind: Ingress 
        +metadata: 
        +  name: ingress-test
        +  annotations: 
        +    kubernetes.io/elb.class: union
        +    kubernetes.io/elb.port: '80'
        +    kubernetes.io/elb.autocreate: 
        +      '{
        +          "type":"public",
        +          "bandwidth_name":"cce-bandwidth-******",
        +          "bandwidth_chargemode":"bandwidth",
        +          "bandwidth_size":5,
        +          "bandwidth_sharetype":"PER",
        +          "eip_type":"5_bgp"
        +        }'
        +spec:
        +  rules: 
        +  - host: ''
        +    http: 
        +      paths: 
        +      - path: '/'
        +        backend: 
        +          service:
        +            name: <your_service_name>  # Replace it with the name of your target Service.
        +            port: 
        +              number: 8080             # Replace 8080 with the port number of your target Service.
        +        property:
        +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
        +        pathType: ImplementationSpecific
        +  ingressClassName: cce    # ELB ingress is used.
        +
        +
        Example of a shared load balancer (public network access) for clusters of v1.21 or earlier:
        apiVersion: networking.k8s.io/v1beta1
        +kind: Ingress 
        +metadata: 
        +  name: ingress-test
        +  annotations: 
        +    kubernetes.io/elb.class: union
        +    kubernetes.io/ingress.class: cce    # ELB ingress is used.
        +    kubernetes.io/elb.port: '80'
        +    kubernetes.io/elb.autocreate: 
        +      '{
        +          "type":"public",
        +          "bandwidth_name":"cce-bandwidth-******",
        +          "bandwidth_chargemode":"bandwidth",
        +          "bandwidth_size":5,
        +          "bandwidth_sharetype":"PER",
        +          "eip_type":"5_bgp"
        +        }'
        +spec:
        +  rules: 
        +  - host: ''
        +    http: 
        +      paths: 
        +      - path: '/'
        +        backend: 
        +          serviceName: <your_service_name>  # Replace it with the name of your target Service.
        +          servicePort: 80
        +        property:
        +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
        +
        +
        Example of a dedicated load balancer (public network access) for clusters of v1.23 or later:
        apiVersion: networking.k8s.io/v1
        +kind: Ingress
        +metadata:
        +  name: ingress-test
        +  namespace: default
        +  annotations:
        +    kubernetes.io/elb.class: performance
        +    kubernetes.io/elb.port: '80'
        +    kubernetes.io/elb.autocreate: 
        +      '{
        +          "type": "public",
        +          "bandwidth_name": "cce-bandwidth-******",
        +          "bandwidth_chargemode": "bandwidth",
        +          "bandwidth_size": 5,
        +          "bandwidth_sharetype": "PER",
        +          "eip_type": "5_bgp",
        +          "available_zone": [
        +              "eu-de-01"
        +          ],
        +          "l7_flavor_name": "L7_flavor.elb.s1.small"
        +       }'
        +spec:
        +  rules: 
        +  - host: ''
        +    http: 
        +      paths: 
        +      - path: '/'
        +        backend: 
        +          service:
        +            name: <your_service_name>  # Replace it with the name of your target Service.
        +            port: 
        +              number: 8080             # Replace 8080 with the port number of your target Service.
        +        property:
        +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
        +        pathType: ImplementationSpecific
        +  ingressClassName: cce
        +
        +
        Example of a dedicated load balancer (public network access) for clusters of version 1.21 or earlier:
        apiVersion: networking.k8s.io/v1beta1
        +kind: Ingress
        +metadata:
        +  name: ingress-test
        +  namespace: default
        +  annotations:
        +    kubernetes.io/elb.class: performance
        +    kubernetes.io/ingress.class: cce
        +    kubernetes.io/elb.port: '80'
        +    kubernetes.io/elb.autocreate: 
        +      '{
        +          "type": "public",
        +          "bandwidth_name": "cce-bandwidth-******",
        +          "bandwidth_chargemode": "bandwidth",
        +          "bandwidth_size": 5,
        +          "bandwidth_sharetype": "PER",
        +          "eip_type": "5_bgp",
        +          "available_zone": [
        +              "eu-de-01"
        +          ],
        +          "l7_flavor_name": "L7_flavor.elb.s1.small"
        +       }'
        +spec:
        +  rules:
        +  - host: ''
        +    http:
        +      paths:
        +      - path: '/'
        +        backend: 
        +          serviceName: <your_service_name>  # Replace it with the name of your target Service.
        +          servicePort: 80
        +        property:
        +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 1 Key parameters

        Parameter

        +

        Mandatory

        +

        Type

        +

        Description

        +

        kubernetes.io/elb.class

        +

        Yes

        +

        String

        +

        Select a proper load balancer type.

        +

        The value can be:

        +
        • union: shared load balancer
        • performance: dedicated load balancer..
        +

        Default: union

        +

        kubernetes.io/ingress.class

        +

        Yes

        +

        (only for clusters of v1.21 or earlier)

        +

        String

        +

        cce: The self-developed ELB ingress is used.

        +

        This parameter is mandatory when an ingress is created by calling the API.

        +

        ingressClassName

        +

        Yes

        +

        (only for clusters of v1.23 or later)

        +

        String

        +

        cce: The self-developed ELB ingress is used.

        +

        This parameter is mandatory when an ingress is created by calling the API.

        +

        kubernetes.io/elb.port

        +

        Yes

        +

        Integer

        +

        This parameter indicates the external port registered with the address of the LoadBalancer Service.

        +

        Supported range: 1 to 65535

        +

        kubernetes.io/elb.subnet-id

        +

        -

        +

        String

        +

        ID of the subnet where the cluster is located. The value can contain 1 to 100 characters.

        +
        • Mandatory when a cluster of v1.11.7-r0 or earlier is to be automatically created.
        • Optional for clusters later than v1.11.7-r0. It is left blank by default.
        +

        kubernetes.io/elb.autocreate

        +

        Yes

        +

        elb.autocreate object

        +

        Whether to automatically create a load balancer associated with an ingress. For details about the field description, see Table 2.

        +

        Example

        +
        • If a public network load balancer will be automatically created, set this parameter to the following value:

          {"type":"public","bandwidth_name":"cce-bandwidth-******","bandwidth_chargemode":"bandwidth","bandwidth_size":5,"bandwidth_sharetype":"PER","eip_type":"5_bgp","name":"james"}

          +
        • If a private network load balancer will be automatically created, set this parameter to the following value:

          {"type":"inner","name":"A-location-d-test"}

          +
        +

        host

        +

        No

        +

        String

        +

        Domain name for accessing the Service. By default, this parameter is left blank, and the domain name needs to be fully matched.

        +

        path

        +

        Yes

        +

        String

        +

        User-defined route path. All external access requests must match host and path.

        +

        serviceName

        +

        Yes

        +

        String

        +

        Name of the target Service bound to the ingress.

        +

        servicePort

        +

        Yes

        +

        Integer

        +

        Access port of the target Service.

        +

        ingress.beta.kubernetes.io/url-match-mode

        +

        No

        +

        String

        +

        Route matching policy.

        +

        Default: STARTS_WITH (prefix match)

        +

        Options:

        +
        • EQUAL_TO: exact match
        • STARTS_WITH: prefix match
        • REGEX: regular expression match
        +

        pathType

        +

        Yes

        +

        String

        +

        Path type. This field is supported only by clusters of v1.23 or later.

        +
        • ImplementationSpecific: The matching method depends on Ingress Controller. The matching method defined by ingress.beta.kubernetes.io/url-match-mode is used in CCE.
        • Exact: exact matching of the URL, which is case-sensitive.
        • Prefix: matching based on the URL prefix separated by a slash (/). The match is case-sensitive, and elements in the path are matched one by one. A path element refers to a list of labels in the path separated by a slash (/).
        +
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 2 Data structure of the elb.autocreate field

        Parameter

        +

        Mandatory

        +

        Type

        +

        Description

        +

        type

        +

        No

        +

        String

        +

        Network type of the load balancer.

        +
        • public: public network load balancer
        • inner: private network load balancer
        +

        Default: inner

        +

        bandwidth_name

        +

        Yes for public network load balancers

        +

        String

        +

        Bandwidth name. The default value is cce-bandwidth-******.

        +

        Value range: a string of 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

        +

        bandwidth_chargemode

        +

        No

        +

        String

        +

        Bandwidth mode.

        + +

        bandwidth_size

        +

        Yes for public network load balancers

        +

        Integer

        +

        Bandwidth size. The value ranges from 1 Mbit/s to 2000 Mbit/s by default. The actual range varies depending on the configuration in each region.

        +
        • The minimum increment for bandwidth adjustment varies depending on the bandwidth range. The details are as follows:
          • The minimum increment is 1 Mbit/s if the allowed bandwidth ranges from 0 Mbit/s to 300 Mbit/s (with 300 Mbit/s included).
          • The minimum increment is 50 Mbit/s if the allowed bandwidth ranges from 300 Mbit/s to 1000 Mbit/s.
          • The minimum increment is 500 Mbit/s if the allowed bandwidth is greater than 1000 Mbit/s.
          +
        +

        bandwidth_sharetype

        +

        Yes for public network load balancers

        +

        String

        +

        Bandwidth type.

        +

        PER: dedicated bandwidth.

        +

        eip_type

        +

        Yes for public network load balancers

        +

        String

        +

        EIP type.

        +
        • 5_bgp: dynamic BGP
        • 5_sbgp: static BGP
        +

        name

        +

        No

        +

        String

        +

        Name of the automatically created load balancer.

        +

        Value range: a string of 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

        +

        Default: cce-lb+ingress.UID

        +
        +
        +

      3. Create an ingress.

        kubectl create -f ingress-test.yaml

        +

        If information similar to the following is displayed, the ingress has been created.

        +
        ingress/ingress-test created
        +

        kubectl get ingress

        +

        If information similar to the following is displayed, the ingress has been created successfully and the workload is accessible.

        +
        NAME             HOSTS     ADDRESS          PORTS   AGE
        +ingress-test     *         121.**.**.**     80      10s
        +

      4. Enter http://121.**.**.**:80 in the address box of the browser to access the workload (for example, Nginx workload).

        121.**.**.** indicates the IP address of the unified load balancer.

        +

      +
      +

      Creating an Ingress - Interconnecting with an Existing Load Balancer

      CCE allows you to connect to an existing load balancer when creating an ingress.
      • Existing dedicated load balancers must be the application type (HTTP/HTTPS) supporting private networks (with a private IP).
      +
      +
      +
      If the cluster version is 1.23 or earlier, the YAML file configuration is as follows:
      apiVersion: networking.k8s.io/v1
      +kind: Ingress 
      +metadata: 
      +  name: ingress-test
      +  annotations: 
      +    kubernetes.io/elb.id: <your_elb_id>  # Replace it with the ID of your existing load balancer.
      +    kubernetes.io/elb.ip: <your_elb_ip>  # Replace it with your existing load balancer IP.
      +    kubernetes.io/elb.port: '80'
      +spec:
      +  rules: 
      +  - host: ''
      +    http: 
      +      paths: 
      +      - path: '/'
      +        backend: 
      +          service:
      +            name: <your_service_name>  # Replace it with the name of your target Service.
      +            port: 
      +              number: 8080             # Replace 8080 with your target service port number.
      +        property:
      +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
      +        pathType: ImplementationSpecific
      +  ingressClassName: cce               
      +
      +

      If the cluster version is 1.21 or later, the YAML file configuration is as follows:

      +
      apiVersion: networking.k8s.io/v1beta1
      +kind: Ingress 
      +metadata: 
      +  name: ingress-test
      +  annotations: 
      +    kubernetes.io/elb.id: <your_elb_id>  # Replace it with the ID of your existing load balancer.
      +    kubernetes.io/elb.ip: <your_elb_ip>  # Replace it with your existing load balancer IP.
      +    kubernetes.io/elb.port: '80'
      +    kubernetes.io/ingress.class: cce
      +spec:
      +  rules: 
      +  - host: ''
      +    http: 
      +      paths: 
      +      - path: '/'
      +        backend: 
      +          serviceName: <your_service_name>  # Replace it with the name of your target Service.
      +          servicePort: 80
      +        property:
      +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
      + +
      + + + + + + + + + + + + + + + + +
      Table 3 Key parameters

      Parameter

      +

      Mandatory

      +

      Type

      +

      Description

      +

      kubernetes.io/elb.id

      +

      Yes

      +

      String

      +

      This parameter indicates the ID of a load balancer. The value can contain 1 to 100 characters.

      +

      How to obtain:

      +

      On the management console, click Service List, and choose Networking > Elastic Load Balance. Click the name of the target load balancer. On the Summary tab page, find and copy the ID.

      +

      kubernetes.io/elb.ip

      +

      Yes

      +

      String

      +

      This parameter indicates the service address of a load balancer. The value can be the public IP address of a public network load balancer or the private IP address of a private network load balancer.

      +
      +
      +
      +

      Configuring HTTPS Certificates

      Ingress supports TLS certificate configuration and secures your Services with HTTPS.

      +

      If HTTPS is enabled for the same port of the same load balancer of multiple ingresses, you must select the same certificate.

      +
      +
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Run the following command to create a YAML file named ingress-test-secret.yaml (the file name can be customized):

        vi ingress-test-secret.yaml

        +
        The YAML file is configured as follows:
        apiVersion: v1
        +data:
        +  tls.crt: LS0******tLS0tCg==
        +  tls.key: LS0tL******0tLS0K
        +kind: Secret
        +metadata:
        +  annotations:
        +    description: test for ingressTLS secrets
        +  name: ingress-test-secret
        +  namespace: default
        +type: IngressTLS
        +
        +

        In the preceding information, tls.crt and tls.key are only examples. Replace them with the actual files. The values of tls.crt and tls.key are Base64-encoded.

        +
        +

      3. Create a secret.

        kubectl create -f ingress-test-secret.yaml

        +

        If information similar to the following is displayed, the secret is being created:

        +
        secret/ingress-test-secret created
        +

        View the created secrets.

        +

        kubectl get secrets

        +

        If information similar to the following is displayed, the secret has been created successfully:

        +
        NAME                         TYPE                                  DATA      AGE
        +ingress-test-secret          IngressTLS                            2         13s
        +

      4. Create a YAML file named ingress-test.yaml. The file name can be customized.

        vi ingress-test.yaml

        +

        Default security policy (kubernetes.io/elb.tls-ciphers-policy) is supported only in clusters of v1.17.17 or later.

        +
        +

        The following uses the automatically created load balancer as an example. The YAML file is configured as follows:

        +

        For clusters of v1.21 or earlier:

        +
        apiVersion: networking.k8s.io/v1beta1
        +kind: Ingress 
        +metadata: 
        +  name: ingress-test
        +  annotations: 
        +    kubernetes.io/elb.class: union
        +    kubernetes.io/ingress.class: cce
        +    kubernetes.io/elb.port: '443'
        +    kubernetes.io/elb.autocreate: 
        +      '{
        +          "type":"public",
        +          "bandwidth_name":"cce-bandwidth-15511633796**",
        +          "bandwidth_chargemode":"bandwidth",
        +          "bandwidth_size":5,
        +          "bandwidth_sharetype":"PER",
        +          "eip_type":"5_bgp"
        +        }'
        +    kubernetes.io/elb.tls-ciphers-policy: tls-1-2
        +spec:
        +  tls: 
        +  - secretName: ingress-test-secret
        +  rules: 
        +  - host: ''
        +    http: 
        +      paths: 
        +      - path: '/'
        +        backend: 
        +          serviceName: <your_service_name>  # Replace it with the name of your target Service.
        +          servicePort: 80
        +        property:
        +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
        +
        For clusters of v1.23 or later:
        apiVersion: networking.k8s.io/v1
        +kind: Ingress 
        +metadata: 
        +  name: ingress-test
        +  annotations: 
        +    kubernetes.io/elb.class: union
        +    kubernetes.io/elb.port: '443'
        +    kubernetes.io/elb.autocreate: 
        +      '{
        +          "type":"public",
        +          "bandwidth_name":"cce-bandwidth-15511633796**",
        +          "bandwidth_chargemode":"bandwidth",
        +          "bandwidth_size":5,
        +          "bandwidth_sharetype":"PER",
        +          "eip_type":"5_bgp"
        +        }'
        +    kubernetes.io/elb.tls-ciphers-policy: tls-1-2
        +spec:
        +  tls: 
        +  - secretName: ingress-test-secret
        +  rules: 
        +  - host: ''
        +    http: 
        +      paths: 
        +      - path: '/'
        +        backend: 
        +          service:
        +            name: <your_service_name>  # Replace it with the name of your target Service.
        +            port: 
        +              number: 8080             # Replace 8080 with the port number of your target Service.
        +        property:
        +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
        +        pathType: ImplementationSpecific
        +  ingressClassName: cce 
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + +
        Table 4 Key parameters

        Parameter

        +

        Mandatory

        +

        Type

        +

        Description

        +

        kubernetes.io/elb.tls-ciphers-policy

        +

        No

        +

        String

        +

        The default value is tls-1-2, which is the default security policy used by the listener and takes effect only when the HTTPS protocol is used.

        +

        Options:

        +
        • tls-1-0
        • tls-1-1
        • tls-1-2
        • tls-1-2-strict
        +

        For details of cipher suites for each security policy, see Table 5.

        +

        tls

        +

        No

        +

        Array of strings

        +

        This parameter is mandatory if HTTPS is used. Multiple independent domain names and certificates can be added to this parameter. For details, see Configuring the Server Name Indication (SNI).

        +

        secretName

        +

        No

        +

        String

        +

        This parameter is mandatory if HTTPS is used. Set this parameter to the name of the created secret.

        +
        +
        + +
        + + + + + + + + + + + + + + + + + + + +
        Table 5 tls_ciphers_policy parameter description

        Security Policy

        +

        TLS Version

        +

        Cipher Suite

        +

        tls-1-0

        +

        TLS 1.2

        +

        TLS 1.1

        +

        TLS 1.0

        +

        ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES128-SHA256:AES256-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-SHA:AES256-SHA

        +

        tls-1-1

        +

        TLS 1.2

        +

        TLS 1.1

        +

        tls-1-2

        +

        TLS 1.2

        +

        tls-1-2-strict

        +

        TLS 1.2

        +

        ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES128-SHA256:AES256-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384

        +
        +
        +

      5. Create an ingress.

        kubectl create -f ingress-test.yaml

        +

        If information similar to the following is displayed, the ingress has been created.

        +
        ingress/ingress-test created
        +

        View the created ingress.

        +

        kubectl get ingress

        +

        If information similar to the following is displayed, the ingress has been created successfully and the workload is accessible.

        +
        NAME             HOSTS     ADDRESS          PORTS   AGE
        +ingress-test     *         121.**.**.**     80      10s
        +

      6. Enter https://121.**.**.**:443 in the address box of the browser to access the workload (for example, Nginx workload).

        121.**.**.** indicates the IP address of the unified load balancer.

        +

      +
      +

      Using HTTP/2

      Ingresses can use HTTP/2 to expose services. Connections from the load balancer proxy to your applications use HTTP/1.X by default. If your application is capable of receiving HTTP/2 requests, you can add the following field to the ingress annotation to enable the use of HTTP/2:

      +

      `kubernetes.io/elb.http2-enable: 'true'`

      +

      The following shows the YAML file for associating with an existing load balancer:

      +

      For clusters of v1.21 or earlier:

      +
      apiVersion: networking.k8s.io/v1beta1
      +kind: Ingress 
      +metadata: 
      +  name: ingress-test
      +  annotations: 
      +    kubernetes.io/elb.id: <your_elb_id>  # Replace it with the ID of your existing load balancer.
      +    kubernetes.io/elb.ip: <your_elb_ip>  # Replace it with the IP of your existing load balancer.
      +    kubernetes.io/elb.port: '443'
      +    kubernetes.io/ingress.class: cce
      +    kubernetes.io/elb.http2-enable: 'true' # Enable HTTP/2.
      +spec:
      +  tls:
      +  - secretName: ingress-test-secret
      +  rules: 
      +  - host: ''
      +    http: 
      +      paths: 
      +      - path: '/'
      +        backend: 
      +          serviceName: <your_service_name>  # Replace it with the name of your target Service.
      +          servicePort: 80                   # Replace it with the port number of your target Service.
      +        property:
      +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
      +
      For clusters of v1.23 or later:
      apiVersion: networking.k8s.io/v1
      +kind: Ingress 
      +metadata: 
      +  name: ingress-test
      +  annotations: 
      +    kubernetes.io/elb.id: <your_elb_id>  # Replace it with the ID of your existing load balancer.
      +    kubernetes.io/elb.ip: <your_elb_ip>  # Replace it with the IP of your existing load balancer.
      +    kubernetes.io/elb.port: '443'
      +    kubernetes.io/elb.http2-enable: 'true' # Enable HTTP/2.
      +spec:
      +  tls: 
      +  - secretName: ingress-test-secret
      +  rules: 
      +  - host: ''
      +    http: 
      +      paths: 
      +      - path: '/'
      +        backend: 
      +          service:
      +            name: <your_service_name>  # Replace it with the name of your target Service.
      +            port: 
      +              number: 8080             # Replace 8080 with the port number of your target Service.
      +        property:
      +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
      +        pathType: ImplementationSpecific
      +  ingressClassName: cce 
      +
      +

      Table 6 HTTP/2 parameters

      + +
      + + + + + + + + + + + +

      Parameter

      +

      Mandatory

      +

      Type

      +

      Description

      +

      kubernetes.io/elb.http2-enable

      +

      No

      +

      Bool

      +

      Whether HTTP/2 is enabled. Request forwarding using HTTP/2 improves the access performance between your application and the load balancer. However, the load balancer still uses HTTP 1.X to forward requests to the backend server. This parameter is supported in clusters of v1.19.16-r0, v1.21.3-r0, and later versions.

      +

      Options:

      +
      • true: enabled
      • false: disabled (default value)
      +

      Note: HTTP/2 can be enabled or disabled only when the listener uses HTTPS. This parameter is invalid and defaults to false when the listener protocol is HTTP.

      +
      +
      +
      +

      Configuring the Server Name Indication (SNI)

      SNI allows multiple TLS-based access domain names to be provided for external systems using the same IP address and port number. Different domain names can use different security certificates.
      • Only one domain name can be specified for each SNI certificate. Wildcard-domain certificates are supported.
      • Security policy (kubernetes.io/elb.tls-ciphers-policy) is supported only in clusters of v1.17.11 or later.
      +
      +
      +

      You can enable SNI when the preceding conditions are met. The following uses the automatic creation of a load balancer as an example. In this example, sni-test-secret-1 and sni-test-secret-2 are SNI certificates. The domain names specified by the certificates must be the same as those in the certificates.

      +
      For clusters of v1.21 or earlier:
      apiVersion: networking.k8s.io/v1beta1
      +kind: Ingress 
      +metadata: 
      +  name: ingress-test
      +  annotations: 
      +    kubernetes.io/elb.class: union
      +    kubernetes.io/ingress.class: cce
      +    kubernetes.io/elb.port: '443'
      +    kubernetes.io/elb.autocreate: 
      +      '{
      +          "type":"public",
      +          "bandwidth_name":"cce-bandwidth-******",
      +          "bandwidth_chargemode":"bandwidth",
      +          "bandwidth_size":5,
      +          "bandwidth_sharetype":"PER",
      +          "eip_type":"5_bgp"
      +        }'
      +    kubernetes.io/elb.tls-ciphers-policy: tls-1-2
      +spec:
      +  tls: 
      +  - secretName: ingress-test-secret
      +  - hosts:
      +      - example.top  # Domain name specified a certificate is issued
      +    secretName: sni-test-secret-1  
      +  - hosts:
      +      - example.com  # Domain name specified a certificate is issued
      +    secretName: sni-test-secret-2
      +  rules: 
      +  - host: ''
      +    http: 
      +      paths: 
      +      - path: '/'
      +        backend: 
      +          serviceName: <your_service_name>  # Replace it with the name of your target Service.
      +          servicePort: 80
      +        property:
      +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
      +
      +
      For clusters of v1.23 or later:
      apiVersion: networking.k8s.io/v1
      +kind: Ingress 
      +metadata: 
      +  name: ingress-test
      +  annotations: 
      +    kubernetes.io/elb.class: union
      +    kubernetes.io/elb.port: '443'
      +    kubernetes.io/elb.autocreate: 
      +      '{
      +          "type":"public",
      +          "bandwidth_name":"cce-bandwidth-******",
      +          "bandwidth_chargemode":"bandwidth",
      +          "bandwidth_size":5,
      +          "bandwidth_sharetype":"PER",
      +          "eip_type":"5_bgp"
      +        }'
      +    kubernetes.io/elb.tls-ciphers-policy: tls-1-2
      +spec:
      +  tls: 
      +  - secretName: ingress-test-secret
      +  - hosts:
      +      - example.top  # Domain name specified a certificate is issued
      +    secretName: sni-test-secret-1  
      +  - hosts:
      +      - example.com  # Domain name specified a certificate is issued
      +    secretName: sni-test-secret-2
      +  rules: 
      +  - host: ''
      +    http: 
      +      paths: 
      +      - path: '/'
      +        backend: 
      +          service:
      +            name: <your_service_name>  # Replace it with the name of your target Service.
      +            port: 
      +              number: 8080             # Replace 8080 with the port number of your target Service.
      +        property:
      +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
      +        pathType: ImplementationSpecific
      +  ingressClassName: cce 
      +
      +
      +

      Accessing Multiple Services

      Ingresses can route requests to multiple backend Services based on different matching policies. The spec field in the YAML file is set as below. You can access www.example.com/foo, www.example.com/bar, and foo.example.com/ to route to three different backend Services.

      +

      The URL registered in an ingress forwarding policy must be the same as the URL exposed by the backend Service. Otherwise, a 404 error will be returned.

      +
      +
      spec:
      +  rules: 
      +  - host: 'www.example.com'
      +    http: 
      +      paths: 
      +      - path: '/foo'
      +        backend: 
      +          serviceName: <your_service_name>  # Replace it with the name of your target Service.
      +          servicePort: 80
      +        property:
      +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
      +      - path: '/bar'
      +        backend:
      +          serviceName: <your_service_name>  # Replace it with the name of your target Service.
      +          servicePort: 80
      +        property:
      +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
      +  - host: 'foo.example.com'
      +    http:
      +      paths:
      +      - path: '/'
      +        backend:
      +          serviceName: <your_service_name>  # Replace it with the name of your target Service.
      +          servicePort: 80
      +        property:
      +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0257.html b/docs/cce/umn/cce_10_0257.html new file mode 100644 index 00000000..9d30d728 --- /dev/null +++ b/docs/cce/umn/cce_10_0257.html @@ -0,0 +1,207 @@ + + +

      Creating a Deployment Mounted with an EVS Volume

      +

      Scenario

      After an EVS volume is created or imported to CCE, you can mount it to a workload.

      +

      EVS disks cannot be attached across AZs. Before mounting a volume, you can run the kubectl get pvc command to query the available PVCs in the AZ where the current cluster is located.

      +
      +
      +

      Prerequisites

      You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

      +
      +

      Notes and Constraints

      The following configuration example applies to clusters of Kubernetes 1.15 or later.

      +
      +

      Using EVS Volumes for Deployments

      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Run the following commands to configure the evs-deployment-example.yaml file, which is used to create a Deployment.

        touch evs-deployment-example.yaml

        +

        vi evs-deployment-example.yaml

        +
        Example of mounting an EVS volume to a Deployment (PVC-based, shared volume):
        apiVersion: apps/v1 
        +kind: Deployment 
        +metadata: 
        +  name: evs-deployment-example 
        +  namespace: default 
        +spec: 
        +  replicas: 1 
        +  selector: 
        +    matchLabels: 
        +      app: evs-deployment-example 
        +  template: 
        +    metadata: 
        +      labels: 
        +        app: evs-deployment-example 
        +    spec: 
        +      containers: 
        +      - image: nginx
        +        name: container-0 
        +        volumeMounts: 
        +        - mountPath: /tmp 
        +          name: pvc-evs-example 
        +      imagePullSecrets:
        +        - name: default-secret
        +      restartPolicy: Always 
        +      volumes: 
        +      - name: pvc-evs-example 
        +        persistentVolumeClaim: 
        +          claimName: pvc-evs-auto-example
        +
        + +
        + + + + + + + + + + + + + + + + + + + + + +
        Table 1 Key parameters

        Parent Parameter

        +

        Parameter

        +

        Description

        +

        spec.template.spec.containers.volumeMounts

        +

        name

        +

        Name of the volume mounted to the container.

        +

        spec.template.spec.containers.volumeMounts

        +

        mountPath

        +

        Mount path of the container. In this example, the volume is mounted to the /tmp directory.

        +

        spec.template.spec.volumes

        +

        name

        +

        Name of the volume.

        +

        spec.template.spec.volumes.persistentVolumeClaim

        +

        claimName

        +

        Name of an existing PVC.

        +
        +
        +

        spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

        +
        +

      3. Run the following command to create the workload:

        kubectl create -f evs-deployment-example.yaml

        +

      +
      +

      Using EVS Volumes for StatefulSets

      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Run the following commands to configure the evs-statefulset-example.yaml file, which is used to create a Deployment.

        touch evs-statefulset-example.yaml

        +

        vi evs-statefulset-example.yaml

        +

        Mounting an EVS volume to a StatefulSet (PVC template-based, non-shared volume):

        +
        Example YAML:
        apiVersion: apps/v1
        +kind: StatefulSet
        +metadata:
        +  name: evs-statefulset-example
        +  namespace: default
        +spec:
        +  replicas: 1
        +  selector:
        +    matchLabels:
        +      app: evs-statefulset-example
        +  template:
        +    metadata:
        +      labels:
        +        app: evs-statefulset-example
        +    spec:
        +      containers:
        +        - name: container-0
        +          image: 'nginx:latest'
        +          volumeMounts:
        +            - name: pvc-evs-auto-example
        +              mountPath: /tmp
        +      restartPolicy: Always
        +      imagePullSecrets:
        +        - name: default-secret
        +  volumeClaimTemplates:
        +    - metadata:
        +        name: pvc-evs-auto-example
        +        namespace: default
        +        labels:
        +          failure-domain.beta.kubernetes.io/region: eu-de
        +          failure-domain.beta.kubernetes.io/zone: 
        +        annotations:
        +          everest.io/disk-volume-type: SAS
        +      spec:
        +        accessModes:
        +          - ReadWriteOnce
        +        resources:
        +          requests:
        +            storage: 10Gi
        +        storageClassName: csi-disk   
        +  serviceName: evs-statefulset-example-headless
        +  updateStrategy:
        +    type: RollingUpdate
        + +
        + + + + + + + + + + + + + + + + + + + + + +
        Table 2 Key parameters

        Parent Parameter

        +

        Parameter

        +

        Description

        +

        metadata

        +

        name

        +

        Name of the created workload.

        +

        spec.template.spec.containers

        +

        image

        +

        Image of the workload.

        +

        spec.template.spec.containers.volumeMount

        +

        mountPath

        +

        Mount path of the container. In this example, the volume is mounted to the /tmp directory.

        +

        spec

        +

        serviceName

        +

        Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

        +
        +
        +

        spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.

        +
        +
        +

      3. Run the following command to create the workload:

        kubectl create -f evs-statefulset-example.yaml

        +

      +
      +

      Verifying Persistent Storage of an EVS Volume

      1. Query the pod and EVS files of the deployed workload (for example, evs-statefulset-example).

        1. Run the following command to query the pod name of the workload:
          kubectl get po | grep evs-statefulset-example
          +

          Expected outputs:

          +
          evs-statefulset-example-0   1/1     Running   0          22h
          +
        2. Run the following command to check whether an EVS volume is mounted to the /tmp directory:
          kubectl exec evs-statefulset-example-0 -- df tmp
          +

          Expected outputs:

          +
          /dev/sda        10255636 36888  10202364   1% /tmp
          +
        +

      2. Run the following command to create a file named test in the /tmp directory:

        kubectl exec evs-statefulset-example-0 -- touch /tmp/test
        +

      3. Run the following command to view the file in the /tmp directory:

        kubectl exec evs-statefulset-example-0 -- ls -l /tmp
        +

        Expected outputs:

        +
        -rw-r--r-- 1 root root     0 Jun  1 02:50 test
        +

      4. Run the following command to delete the pod named evs-statefulset-example-0:

        kubectl delete po evs-statefulset-example-0
        +

      5. Check whether the file still exists after the pod is rebuilt.

        1. Run the following command to query the name of the rebuilt pod:
          kubectl get po
          +

          Expected outputs:

          +
          evs-statefulset-example-0   1/1     Running   0          2m
          +
        2. Run the following command to view the file in the /tmp directory:
          kubectl exec evs-statefulset-example-0 -- ls -l /tmp
          +

          Expected outputs:

          +
          -rw-r--r-- 1 root root     0 Jun  1 02:50 test
          +
        3. The test file still exists after the pod is rebuilt, indicating that the data in the EVS volume can be persistently stored.
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0262.html b/docs/cce/umn/cce_10_0262.html new file mode 100644 index 00000000..76210b9c --- /dev/null +++ b/docs/cce/umn/cce_10_0262.html @@ -0,0 +1,149 @@ + + +

      Creating a StatefulSet Mounted with an SFS Volume

      +

      Scenario

      CCE allows you to use an existing SGS volume to create a StatefulSet (by using a PVC).

      +
      +

      Prerequisites

      You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

      +
      +

      Notes and Constraints

      The following configuration example applies to clusters of Kubernetes 1.15 or later.

      +
      +

      Procedure

      1. Create an SFS volume by referring to PersistentVolumeClaims (PVCs) and record the volume name.
      2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      3. Create a YAML file for creating the workload. Assume that the file name is sfs-statefulset-example.yaml.

        touch sfs-statefulset-example.yaml

        +

        vi sfs-statefulset-example.yaml

        +

        Configuration example:

        +
        apiVersion: apps/v1
        +kind: StatefulSet
        +metadata:
        +  name: sfs-statefulset-example
        +  namespace: default
        +spec:
        +  replicas: 1
        +  selector:
        +    matchLabels:
        +      app: sfs-statefulset-example
        +  template:
        +    metadata:
        +      labels:
        +        app: sfs-statefulset-example
        +    spec:
        +      volumes: 
        +      - name: pvc-sfs-example 
        +        persistentVolumeClaim:
        +          claimName: pvc-sfs-example     
        +      containers:
        +      - name: container-0
        +        image: 'nginx:latest'
        +        volumeMounts:
        +          - name: pvc-sfs-example
        +            mountPath: /tmp
        +      restartPolicy: Always
        +      imagePullSecrets:
        +      - name: default-secret 
        +  serviceName: sfs-statefulset-example-headless
        +  updateStrategy:
        +    type: RollingUpdate
        + +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Table 1 Key parameters

        Parent Parameter

        +

        Parameter

        +

        Description

        +

        spec

        +

        replicas

        +

        Number of pods.

        +

        metadata

        +

        name

        +

        Name of the new workload.

        +

        spec.template.spec.containers

        +

        image

        +

        Image used by the workload.

        +

        spec.template.spec.containers.volumeMounts

        +

        mountPath

        +

        Mount path of a container.

        +

        spec

        +

        serviceName

        +

        Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

        +

        spec.template.spec.volumes.persistentVolumeClaim

        +

        claimName

        +

        Name of an existing PVC.

        +
        +
        +

        Example of mounting an SFS volume to a StatefulSet (PVC template-based, dedicated volume):

        +
        Example YAML file:
        apiVersion: apps/v1
        +kind: StatefulSet
        +metadata:
        +  name: sfs-statefulset-example
        +  namespace: default
        +spec:
        +  replicas: 1
        +  selector:
        +    matchLabels:
        +      app: sfs-statefulset-example
        +  template:
        +    metadata:
        +      labels:
        +        app: sfs-statefulset-example
        +    spec:
        +      containers:
        +        - name: container-0
        +          image: 'nginx:latest'
        +          volumeMounts:
        +            - name: pvc-sfs-auto-example
        +              mountPath: /tmp
        +      restartPolicy: Always
        +      imagePullSecrets:
        +        - name: default-secret
        +  volumeClaimTemplates:
        +    - metadata:
        +        name: pvc-sfs-auto-example
        +        namespace: default
        +      spec:
        +        accessModes:
        +          - ReadWriteMany
        +        resources:
        +          requests:
        +            storage: 10Gi
        +        storageClassName: csi-nas
        +  serviceName: sfs-statefulset-example-headless
        +  updateStrategy:
        +    type: RollingUpdate
        +
        +

        spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

        +
        +

      4. Create a StatefulSet.

        kubectl create -f sfs-statefulset-example.yaml

        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0263.html b/docs/cce/umn/cce_10_0263.html new file mode 100644 index 00000000..5e18dd25 --- /dev/null +++ b/docs/cce/umn/cce_10_0263.html @@ -0,0 +1,52 @@ + + +

      Creating a Deployment Mounted with an SFS Volume

      +

      Scenario

      After an SFS volume is created or imported to CCE, you can mount the volume to a workload.

      +
      +

      Prerequisites

      You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

      +
      +

      Notes and Constraints

      The following configuration example applies to clusters of Kubernetes 1.15 or later.

      +
      +

      Procedure

      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Run the following commands to configure the sfs-deployment-example.yaml file, which is used to create a pod.

        touch sfs-deployment-example.yaml

        +

        vi sfs-deployment-example.yaml

        +
        Example of mounting an SFS volume to a Deployment (PVC-based, shared volume):
        apiVersion: apps/v1 
        +kind: Deployment 
        +metadata: 
        +  name: sfs-deployment-example                                # Workload name
        +  namespace: default 
        +spec: 
        +  replicas: 1 
        +  selector: 
        +    matchLabels: 
        +      app: sfs-deployment-example 
        +  template: 
        +    metadata: 
        +      labels: 
        +        app: sfs-deployment-example 
        +    spec: 
        +      containers: 
        +      - image: nginx 
        +        name: container-0 
        +        volumeMounts: 
        +        - mountPath: /tmp                                # Mount path 
        +          name: pvc-sfs-example 
        +      imagePullSecrets:
        +        - name: default-secret
        +      restartPolicy: Always 
        +      volumes: 
        +      - name: pvc-sfs-example 
        +        persistentVolumeClaim: 
        +          claimName: pvc-sfs-auto-example                # PVC name
        +
        +

        spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

        +
        +

      3. Run the following command to create the workload:

        kubectl create -f sfs-deployment-example.yaml

        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0268.html b/docs/cce/umn/cce_10_0268.html new file mode 100644 index 00000000..1eea4642 --- /dev/null +++ b/docs/cce/umn/cce_10_0268.html @@ -0,0 +1,152 @@ + + +

      Creating a StatefulSet Mounted with an OBS Volume

      +

      Scenario

      CCE allows you to use an existing OBS volume to create a StatefulSet through a PVC.

      +
      +

      Prerequisites

      You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

      +
      +

      Notes and Constraints

      The following configuration example applies to clusters of Kubernetes 1.15 or later.

      +
      +

      Procedure

      1. Create an OBS volume by referring to PersistentVolumeClaims (PVCs) and obtain the PVC name.
      2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      3. Create a YAML file for creating the workload. Assume that the file name is obs-statefulset-example.yaml.

        touch obs-statefulset-example.yaml

        +

        vi obs-statefulset-example.yaml

        +

        Configuration example:

        +
        apiVersion: apps/v1
        +kind: StatefulSet
        +metadata:
        +  name: obs-statefulset-example
        +  namespace: default
        +spec:
        +  replicas: 1
        +  selector:
        +    matchLabels:
        +      app: obs-statefulset-example
        +  template:
        +    metadata:
        +      labels:
        +        app: obs-statefulset-example
        +    spec:
        +      volumes: 
        +      - name: pvc-obs-example 
        +        persistentVolumeClaim:
        +          claimName: pvc-obs-example     
        +      containers:
        +      - name: container-0
        +        image: 'nginx:latest'
        +        volumeMounts:
        +          - name: pvc-obs-example
        +            mountPath: /tmp
        +      restartPolicy: Always
        +      imagePullSecrets:
        +      - name: default-secret 
        +  serviceName: obs-statefulset-example-headless    # Name of the headless Service
        + +
        + + + + + + + + + + + + + + + + + + + + + + +
        Table 1 Key parameters

        Parameter

        +

        Description

        +

        replicas

        +

        Number of pods.

        +

        name

        +

        Name of the new workload.

        +

        image

        +

        Image used by the workload.

        +

        mountPath

        +

        Mount path of a container.

        +

        serviceName

        +

        Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

        +

        claimName

        +

        Name of an existing PVC.

        +
        +
        +

        Example of mounting an OBS volume to a StatefulSet (PVC template-based, dedicated volume):

        +

        Example YAML:

        +
        apiVersion: apps/v1
        +kind: StatefulSet
        +metadata:
        +  name: obs-statefulset-example
        +  namespace: default
        +spec:
        +  replicas: 1
        +  selector:
        +    matchLabels:
        +      app: obs-statefulset-example
        +  template:
        +    metadata:
        +      labels:
        +        app: obs-statefulset-example
        +    spec:
        +      containers:
        +        - name: container-0
        +          image: 'nginx:latest'
        +          volumeMounts:
        +            - name: pvc-obs-auto-example
        +              mountPath: /tmp
        +      restartPolicy: Always
        +      imagePullSecrets:
        +        - name: default-secret
        +  volumeClaimTemplates:
        +    - metadata:
        +        name: pvc-obs-auto-example
        +        namespace: default
        +        annotations:
        +          everest.io/obs-volume-type: STANDARD
        +      spec:
        +        accessModes:
        +          - ReadWriteMany
        +        resources:
        +          requests:
        +            storage: 1Gi
        +        storageClassName: csi-obs  
        +  serviceName: obs-statefulset-example-headless
        +

      4. Create a StatefulSet.

        kubectl create -f obs-statefulset-example.yaml

        +

      +
      +

      Verifying Persistent Storage of an OBS Volume

      1. Query the pod and OBS volume of the deployed workload (for example, obs-statefulset-example).

        1. Run the following command to query the pod name of the workload:
          kubectl get po | grep obs-statefulset-example
          +

          Expected outputs:

          +
          obs-statefulset-example-0   1/1     Running   0          2m5s
          +
        2. Run the following command to check whether an OBS volume is mounted to the /tmp directory:
          kubectl exec obs-statefulset-example-0 -- mount|grep /tmp
          +

          Expected outputs:

          +
          s3fs on /tmp type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
          +
        +

      2. Run the following command to create a file named test in the /tmp directory:

        kubectl exec obs-statefulset-example-0 -- touch /tmp/test
        +

      3. Run the following command to view the file in the /tmp directory:

        kubectl exec obs-statefulset-example-0 -- ls -l /tmp
        +

        Expected outputs:

        +
        -rw-r--r-- 1 root root     0 Jun  1 02:50 test
        +

      4. Run the following command to delete the pod named obs-statefulset-example-0:

        kubectl delete po obs-statefulset-example-0
        +

      5. Check whether the file still exists after the pod is rebuilt.

        1. Run the following command to query the name of the rebuilt pod:
          kubectl get po
          +

          Expected outputs:

          +
          obs-statefulset-example-0   1/1     Running   0          2m
          +
        2. Run the following command to view the file in the /tmp directory:
          kubectl exec obs-statefulset-example-0 -- ls -l /tmp
          +

          Expected outputs:

          +
          -rw-r--r-- 1 root root     0 Jun  1 02:50 test
          +
        3. The test file still exists after the pod is rebuilt, indicating that the data in the OBS volume can be persistently stored.
        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0269.html b/docs/cce/umn/cce_10_0269.html new file mode 100644 index 00000000..f4bf2f66 --- /dev/null +++ b/docs/cce/umn/cce_10_0269.html @@ -0,0 +1,52 @@ + + +

      Creating a Deployment Mounted with an OBS Volume

      +

      Scenario

      After an OBS volume is created or imported to CCE, you can mount the volume to a workload.

      +
      +

      Prerequisites

      You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

      +
      +

      Notes and Constraints

      The following configuration example applies to clusters of Kubernetes 1.15 or later.

      +
      +

      Procedure

      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Run the following commands to configure the obs-deployment-example.yaml file, which is used to create a pod.

        touch obs-deployment-example.yaml

        +

        vi obs-deployment-example.yaml

        +
        Example of mounting an OBS volume to a Deployment (PVC-based, shared volume):
        apiVersion: apps/v1 
        +kind: Deployment 
        +metadata: 
        +  name: obs-deployment-example                        # Workload name
        +  namespace: default 
        +spec: 
        +  replicas: 1 
        +  selector: 
        +    matchLabels: 
        +      app: obs-deployment-example 
        +  template: 
        +    metadata: 
        +      labels: 
        +        app: obs-deployment-example 
        +    spec: 
        +      containers: 
        +      - image: nginx
        +        name: container-0 
        +        volumeMounts: 
        +        - mountPath: /tmp                       # Mount path
        +          name: pvc-obs-example 
        +      restartPolicy: Always
        +      imagePullSecrets:
        +        - name: default-secret
        +      volumes: 
        +      - name: pvc-obs-example  
        +        persistentVolumeClaim: 
        +          claimName: pvc-obs-auto-example       # PVC name
        +
        +

        spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

        +
        +

      3. Run the following command to create the workload:

        kubectl create -f obs-deployment-example.yaml

        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_01_0275.html b/docs/cce/umn/cce_10_0275.html similarity index 61% rename from docs/cce/umn/cce_01_0275.html rename to docs/cce/umn/cce_10_0275.html index e58eacc3..8e4ab6e0 100644 --- a/docs/cce/umn/cce_01_0275.html +++ b/docs/cce/umn/cce_10_0275.html @@ -1,25 +1,25 @@ - + -

      Pod Security Policies

      -

      A pod security policy (PSP) is a cluster-level resource that controls sensitive security aspects of the pod specification. The PodSecurityPolicy object in Kubernetes defines a group of conditions that a pod must comply with to be accepted by the system, as well as the default values of related fields.

      -

      By default, the PSP access control component is enabled for clusters of v1.17.17 and a global default PSP named psp-global is created. You can modify the default policy (but not delete it). You can also create a PSP and bind it to the RBAC configuration.

      -

      In addition to the global default PSP, the system configures independent PSPs for system components in namespace kube-system. Modifying the psp-global configuration does not affect pod creation in namespace kube-system.

      +

      Configuring a Pod Security Policy

      +

      A pod security policy (PSP) is a cluster-level resource that controls sensitive security aspects of the pod specification. The PodSecurityPolicy object in Kubernetes defines a group of conditions that a pod must comply with to be accepted by the system, as well as the default values of related fields.

      +

      By default, the PSP access control component is enabled for clusters of v1.17.17 and a global default PSP named psp-global is created. You can modify the default policy (but not delete it). You can also create a PSP and bind it to the RBAC configuration.

      +
      • In addition to the global default PSP, the system configures independent PSPs for system components in namespace kube-system. Modifying the psp-global configuration does not affect pod creation in namespace kube-system.
      • In Kubernetes 1.25, PSP has been removed and replaced by Pod Security Admission. For details, see Configuring Pod Security Admission.
      -

      Modifying the Global Default PSP

      Before modifying the global default PSP, ensure that a CCE cluster has been created and connected by using kubectl.

      -
      1. Run the following command:

        kubectl edit psp psp-global

        -

      2. Modify the parameters as required. For details, see PodSecurityPolicy.
      +

      Modifying the Global Default PSP

      Before modifying the global default PSP, ensure that a CCE cluster has been created and connected by using kubectl.

      +
      1. Run the following command:

        kubectl edit psp psp-global

        +

      2. Modify the parameters as required. For details, see PodSecurityPolicy.
      -

      Example of Enabling Unsafe Sysctls in Pod Security Policy

      You can configure allowed-unsafe-sysctls for a node pool. For CCE v1.17.17 and later versions, add configurations in allowedUnsafeSysctls of the pod security policy to make the configuration take effect. For details, see PodSecurityPolicy.

      -

      In addition to modifying the global pod security policy, you can add new pod security policies. For example, enable the net.core.somaxconn unsafe sysctls. The following is an example of adding a pod security policy:

      -
      apiVersion: policy/v1beta1
      +

      Example of Enabling Unsafe Sysctls in Pod Security Policy

      You can configure allowed-unsafe-sysctls for a node pool. For CCE v1.17.17 and later versions, add configurations in allowedUnsafeSysctls of the pod security policy to make the configuration take effect. For details, see PodSecurityPolicy.

      +

      In addition to modifying the global pod security policy, you can add new pod security policies. For example, enable the net.core.somaxconn unsafe sysctls. The following is an example of adding a pod security policy:

      +
      apiVersion: policy/v1beta1
       kind: PodSecurityPolicy
       metadata:
         annotations:
           seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
         name: sysctl-psp
       spec:
      -  allowedUnsafeSysctls:
      -  - net.core.somaxconn
      +  allowedUnsafeSysctls:
      +  - net.core.somaxconn
         allowPrivilegeEscalation: true
         allowedCapabilities:
         - '*'
      @@ -71,10 +71,10 @@ subjects:
         name: system:authenticated
         apiGroup: rbac.authorization.k8s.io
      -

      Restoring the Original PSP

      If you have modified the default pod security policy and want to restore the original pod security policy, perform the following operations.

      -
      1. Create a policy description file named policy.yaml. policy.yaml is an example file name. You can rename it as required.

        vi policy.yaml

        -

        The content of the description file is as follows:

        -
        apiVersion: policy/v1beta1
        +

        Restoring the Original PSP

        If you have modified the default pod security policy and want to restore the original pod security policy, perform the following operations.

        +
        1. Create a policy description file named policy.yaml. policy.yaml is an example file name. You can rename it as required.

          vi policy.yaml

          +

          The content of the description file is as follows:

          +
          apiVersion: policy/v1beta1
           kind: PodSecurityPolicy
           metadata:
             name: psp-global
          @@ -130,13 +130,13 @@ subjects:
           - kind: Group
             name: system:authenticated
             apiGroup: rbac.authorization.k8s.io
          -

        2. Run the following commands:

          kubectl apply -f policy.yaml

          +

        3. Run the following command:

          kubectl apply -f policy.yaml

      diff --git a/docs/cce/umn/cce_10_0276.html b/docs/cce/umn/cce_10_0276.html new file mode 100644 index 00000000..90f83f0e --- /dev/null +++ b/docs/cce/umn/cce_10_0276.html @@ -0,0 +1,39 @@ + + +

      Performing Rolling Upgrade for Nodes

      +

      Scenario

      In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.

      +
      Figure 1 Workload migration
      +
      +

      Notes and Constraints

      • The original node and the target node to which the workload is to be migrated must be in the same cluster.
      • The cluster must be of v1.13.10 or later.
      • The default node pool DefaultPool does not support this configuration.
      +
      +

      Scenario 1: The Original Node Is in DefaultPool

      1. Create a node pool. For details, see Creating a Node Pool.
      2. Click the name of the node pool. The IP address of the new node is displayed in the node list.
      1. Install and configure kubectl. For details, see Connecting to a Cluster Using kubectl.
      1. Migrate the workload.

        1. Add a taint to the node where the workload needs to be migrated out.

          kubectl taint node [node] key=value:[effect]

          +

          In the preceding command, [node] indicates the IP address of the node where the workload to be migrated is located. The value of [effect] can be NoSchedule, PreferNoSchedule, or NoExecute. In this example, set this parameter to NoSchedule.

          +
          • NoSchedule: Pods that do not tolerate this taint are not scheduled on the node; existing pods are not evicted from the node.
          • PreferNoSchedule: Kubernetes tries to avoid scheduling pods that do not tolerate this taint onto the node.
          • NoExecute: A pod is evicted from the node if it is already running on the node, and is not scheduled onto the node if it is not yet running on the node.
          +

          To reset a taint, run the kubectl taint node [node] key:[effect]- command to remove the taint.

          +
          +
        2. Safely evicts the workload on the node.

          kubectl drain [node]

          +

          In the preceding command, [node] indicates the IP address of the node where the workload to be migrated is located.

          +
        3. In the navigation pane of the CCE console, choose Workloads > Deployments. In the workload list, the status of the workload to be migrated changes from Running to Unready. If the workload status changes to Running again, the migration is successful.
        +

        During workload migration, if node affinity is configured for the workload, the workload keeps displaying a message indicating that the workload is not ready. In this case, click the workload name to go to the workload details page. On the Scheduling Policies tab page, delete the affinity configuration of the original node and configure the affinity and anti-affinity policies of the new node. For details, see Scheduling Policy (Affinity/Anti-affinity).

        +
        +

        After the workload is successfully migrated, you can view that the workload is migrated to the node created in 1 on the Pods tab page of the workload details page.

        +

      1. Delete the original node.

        After the workload is successfully migrated and runs properly, delete the original node.

        +

      +
      +

      Scenario 2: The Original Node Is Not in DefaultPool

      1. Copy the node pool and add nodes to it. For details, see Copying a Node Pool.
      2. Click View Node in the Operation column of the node pool. The IP address of the new node is displayed in the node list.
      1. Migrate the workload.

        1. Click Edit on the right of original node pool and set Taints.
        2. Enter the key and value of the taint. The options of Effect are NoSchedule, PreferNoSchedule, and NoExecute. Select NoExecute and click confirm to add.
          • NoSchedule: Pods that do not tolerate this taint are not scheduled on the node; existing pods are not evicted from the node.
          • PreferNoSchedule: Kubernetes tries to avoid scheduling pods that do not tolerate this taint onto the node.
          • NoExecute: A pod is evicted from the node if it is already running on the node, and is not scheduled onto the node if it is not yet running on the node.
          +

          If you need to reset the taint, delete the configured taint.

          +
          +
        3. Click OK.
        4. In the navigation pane of the CCE console, choose Workloads > Deployments. In the workload list, the status of the workload to be migrated changes from Running to Unready. If the workload status changes to Running again, the migration is successful.
        +

        During workload migration, if node affinity is configured for the workload, the workload keeps displaying a message indicating that the workload is not ready. In this case, click the workload name to go to the workload details page. On the Scheduling Policies tab page, delete the affinity configuration of the original node and configure the affinity and anti-affinity policies of the new node. For details, see Scheduling Policy (Affinity/Anti-affinity).

        +
        +

        After the workload is successfully migrated, you can view that the workload is migrated to the node created in 1 on the Pods tab page of the workload details page.

        +

      1. Delete the original node.

        After the workload is successfully migrated and runs properly, delete the original node.

        +

      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0277.html b/docs/cce/umn/cce_10_0277.html new file mode 100644 index 00000000..e2fea603 --- /dev/null +++ b/docs/cce/umn/cce_10_0277.html @@ -0,0 +1,61 @@ + + +

      Overview

      +

      CCE provides multiple types of add-ons to extend cluster functions and meet feature requirements. You can install add-ons as required.

      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 1 Add-on list

      Add-on Name

      +

      Introduction

      +

      coredns (System Resource Add-On, Mandatory)

      +

      The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.

      +

      storage-driver (System Resource Add-On, Discarded)

      +

      storage-driver is a FlexVolume driver used to support IaaS storage services such as EVS, SFS, and OBS.

      +

      everest (System Resource Add-On, Mandatory)

      +

      Everest is a cloud native container storage system. Based on the Container Storage Interface (CSI), clusters of Kubernetes v1.15.6 or later obtain access to cloud storage services.

      +

      npd

      +

      node-problem-detector (npd for short) is an add-on that monitors abnormal events of cluster nodes and connects to a third-party monitoring platform. It is a daemon running on each node. It collects node issues from different daemons and reports them to the API server. The npd add-on can run as a DaemonSet or a daemon.

      +

      autoscaler

      +

      The autoscaler add-on resizes a cluster based on pod scheduling status and resource usage.

      +

      metrics-server

      +

      metrics-server is an aggregator for monitoring data of core cluster resources.

      +

      gpu-beta

      +

      gpu-beta is a device management add-on that supports GPUs in containers. It supports only NVIDIA drivers.

      +

      volcano

      +

      Volcano provides general-purpose, high-performance computing capabilities, such as job scheduling, heterogeneous chip management, and job running management, serving end users through computing frameworks for different industries, such as AI, big data, gene sequencing, and rendering.

      +
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_10_0278.html b/docs/cce/umn/cce_10_0278.html new file mode 100644 index 00000000..9b66158d --- /dev/null +++ b/docs/cce/umn/cce_10_0278.html @@ -0,0 +1,65 @@ + + +

      Creating a Namespace

      +

      When to Use Namespaces

      A namespace is a collection of resources and objects. Multiple namespaces can be created inside a cluster and isolated from each other. This enables namespaces to share the same cluster Services without affecting each other.

      +

      For example, you can deploy workloads in a development environment into one namespace, and deploy workloads in a testing environment into another namespace.

      +
      +

      Prerequisites

      At least one cluster has been created.

      +
      +

      Notes and Constraints

      A maximum of 6,000 Services can be created in each namespace. The Services mentioned here indicate the Kubernetes Service resources added for workloads.

      +
      +

      Namespace Types

      Namespaces can be created in either of the following ways:

      +
      • Created automatically: When a cluster is up, the default, kube-public, kube-system, and kube-node-lease namespaces are created by default.
        • default: All objects for which no namespace is specified are allocated to this namespace.
        • kube-public: Resources in this namespace can be accessed by all users (including unauthenticated users), such as public add-ons and container charts.
        • kube-system: All resources created by Kubernetes are in this namespace.
        • kube-node-lease: Each node has an associated Lease object in this namespace. The object is periodically updated by the node. Both NodeStatus and NodeLease are considered as heartbeats from a node. In versions earlier than v1.13, only NodeStatus is available. The NodeLease feature is introduced in v1.13. NodeLease is more lightweight than NodeStatus. This feature significantly improves the cluster scalability and performance.
        +
      • Created manually: You can create namespaces to serve separate purposes. For example, you can create three namespaces, one for a development environment, one for joint debugging environment, and one for test environment. You can also create one namespace for login services and one for game services.
      +
      +

      Creating a Namespace

      1. Log in to the CCE console and access the cluster console.
      2. Choose Namespaces in the navigation pane and click Create Namespace in the upper right corner.
      3. Set namespace parameters based on Table 1.

        +

        + + + + + + + + + + + + + +
        Table 1 Parameters for creating a namespace

        Parameter

        +

        Description

        +

        Name

        +

        Unique name of the created namespace.

        +

        Description

        +

        Description about the namespace.

        +

        Quota Management

        +

        Resource quotas can limit the amount of resources available in namespaces, achieving resource allocation by namespace.

        +
        NOTICE:

        You are advised to set resource quotas in the namespace as required to prevent cluster or node exceptions caused by resource overload.

        +

        For example, the default number of pods that can be created on each node in a cluster is 110. If you create a cluster with 50 nodes, you can create a maximum of 5,500 pods. Therefore, you can set a resource quota to ensure that the total number of pods in all namespaces does not exceed 5,500.

        +
        +

        Enter an integer. If the quota of a resource is not specified, no limit is posed on the resource.

        +

        If you want to limit the CPU or memory quota, you must specify the CPU or memory request value when creating a workload.

        +
        +
        +

      4. When the configuration is complete, click OK.
      +
      +

      Using kubectl to Create a Namespace

      Define a namespace.

      +
      apiVersion: v1 
      +kind: Namespace 
      +metadata: 
      +  name: custom-namespace 
      +

      Run the kubectl command to create it.

      +
      $ kubectl create -f custom-namespace.yaml
      +namespace/custom-namespace created 
      +

      You can also run the kubectl create namespace command to create a namespace.

      +
      $ kubectl create namespace custom-namespace 
      +namespace/custom-namespace created 
      +
      +
      +
      + +
      + diff --git a/docs/cce/umn/cce_01_0279.html b/docs/cce/umn/cce_10_0279.html similarity index 53% rename from docs/cce/umn/cce_01_0279.html rename to docs/cce/umn/cce_10_0279.html index 44f1ee9e..55cecbab 100644 --- a/docs/cce/umn/cce_01_0279.html +++ b/docs/cce/umn/cce_10_0279.html @@ -1,59 +1,59 @@ - +

      Overview

      -

      Auto scaling is a service that automatically and economically adjusts service resources based on your service requirements and configured policies.

      -

      Context

      More and more applications are developed based on Kubernetes. It becomes increasingly important to quickly scale out applications on Kubernetes to cope with service peaks and to scale in applications during off-peak hours to save resources and reduce costs.

      -

      In a Kubernetes cluster, auto scaling involves pods and nodes. A pod is an application instance. Each pod contains one or more containers and runs on a node (VM). If a cluster does not have sufficient nodes to run new pods, you need to add nodes to the cluster to ensure service running.

      -

      In CCE, auto scaling is used for online services, large-scale computing and training, deep learning GPU or shared GPU training and inference, periodic load changes, and many other scenarios.

      +

      Auto scaling is a service that automatically and economically adjusts service resources based on your service requirements and configured policies.

      +

      Context

      More and more applications are developed based on Kubernetes. It becomes increasingly important to quickly scale out applications on Kubernetes to cope with service peaks and to scale in applications during off-peak hours to save resources and reduce costs.

      +

      In a Kubernetes cluster, auto scaling involves pods and nodes. A pod is an application instance. Each pod contains one or more containers and runs on a node (VM or bare-metal server). If a cluster does not have sufficient nodes to run new pods, you need to add nodes to the cluster to ensure service running.

      +

      In CCE, auto scaling is used for online services, large-scale computing and training, deep learning GPU or shared GPU training and inference, periodic load changes, and many other scenarios.

      -

      Auto Scaling in CCE

      CCE supportsauto scaling for workloads and nodes.

      -
      • Workload scaling: Auto scaling at the scheduling layer to change the scheduling capacity of workloads. For example, you can use the HPA, a scaling component at the scheduling layer, to adjust the number of replicas of an application. Adjusting the number of replicas changes the scheduling capacity occupied by the current workload, thereby enabling scaling at the scheduling layer.
      • Node scaling: Auto scaling at the resource layer. When the planned cluster nodes cannot allow workload scheduling, ECS resources are provided to support scheduling.
      +

      Auto Scaling in CCE

      CCE supports auto scaling for workloads and nodes.

      +
      • Workload scaling: Auto scaling at the scheduling layer to change the scheduling capacity of workloads. For example, you can use the HPA, a scaling component at the scheduling layer, to adjust the number of replicas of an application. Adjusting the number of replicas changes the scheduling capacity occupied by the current workload, thereby enabling scaling at the scheduling layer.
      • Node scaling: Auto scaling at the resource layer. When the planned cluster nodes cannot allow workload scheduling, ECS resources are provided to support scheduling.
      -

      Components

      -

      -

      Workload scaling components are described as follows:

      +

      Components

      +

      +

      Workload scaling components are described as follows:

      -
      Table 1 Workload scaling components

      Type

      +
      - - - - - - -
      Table 1 Workload scaling components

      Type

      Component Name

      +

      Component Name

      Component Description

      +

      Component Description

      Reference

      +

      Reference

      HPA

      +

      HPA

      metrics-server

      +

      metrics-server

      A built-in component of Kubernetes, which enables horizontal scaling of pods. It adds the application-level cooldown time window and scaling threshold functions based on the HPA.

      +

      A built-in component of Kubernetes, which enables horizontal scaling of pods. It adds the application-level cooldown time window and scaling threshold functions based on the HPA.

      Creating an HPA Policy for Workload Auto Scaling

      +

      Creating an HPA Policy for Workload Auto Scaling

      -

      Node scaling components are described as follows:

      +

      Node scaling components are described as follows:

      -
      Table 2 Node scaling components

      Component Name

      +
      - - - - - - - @@ -63,7 +63,7 @@
      diff --git a/docs/cce/umn/cce_01_0280.html b/docs/cce/umn/cce_10_0280.html similarity index 51% rename from docs/cce/umn/cce_01_0280.html rename to docs/cce/umn/cce_10_0280.html index d5d09e54..069f55c7 100644 --- a/docs/cce/umn/cce_01_0280.html +++ b/docs/cce/umn/cce_10_0280.html @@ -1,21 +1,21 @@ - +

      Container Network Models

      diff --git a/docs/cce/umn/cce_10_0281.html b/docs/cce/umn/cce_10_0281.html new file mode 100644 index 00000000..473f5557 --- /dev/null +++ b/docs/cce/umn/cce_10_0281.html @@ -0,0 +1,105 @@ + + +

      Overview

      +

      The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:

      + +

      Network Model Comparison

      Table 1 describes the differences of network models supported by CCE.

      +

      After a cluster is created, the network model cannot be changed.

      +
      +
      + +
      Table 2 Node scaling components

      Component Name

      Component Description

      +

      Component Description

      Application Scenario

      +

      Application Scenario

      Reference

      +

      Reference

      autoscaler

      +

      autoscaler

      An open-source Kubernetes component for horizontal scaling of nodes, which is optimized in terms of scheduling and auto scaling capabilities.

      +

      An open source Kubernetes component for horizontal scaling of nodes, which is optimized in terms of scheduling and auto scaling capabilities.

      Online services, deep learning, and large-scale computing with limited resource budgets

      +

      Online services, deep learning, and large-scale computing with limited resource budgets

      Creating a Node Scaling Policy

      +

      Creating a Node Scaling Policy

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 1 Network model comparison

      Dimension

      +

      Tunnel Network

      +

      VPC Network

      +

      Cloud Native Network 2.0

      +

      Application scenarios

      +
      • Common container service scenarios
      • Scenarios that do not have high requirements on network latency and bandwidth
      +
      • Scenarios that have high requirements on network latency and bandwidth
      • Containers can communicate with VMs using a microservice registration framework, such as Dubbo and CSE.
      +
      • Scenarios that have high requirements on network latency, bandwidth, and performance
      • Containers can communicate with VMs using a microservice registration framework, such as Dubbo and CSE.
      +

      Core technology

      +

      OVS

      +

      IPvlan and VPC route

      +

      VPC ENI/sub-ENI

      +

      Applicable clusters

      +

      CCE cluster

      +

      CCE cluster

      +

      CCE Turbo cluster

      +

      Network isolation

      +

      Kubernetes native NetworkPolicy for pods

      +

      No

      +

      Pods support security group isolation.

      +

      Passthrough networking

      +

      No

      +

      No

      +

      Yes

      +

      IP address management

      +
      • The container CIDR block is allocated separately.
      • CIDR blocks are divided by node and can be dynamically allocated (CIDR blocks can be dynamically added after being allocated.)
      +
      • The container CIDR block is allocated separately.
      • CIDR blocks are divided by node and statically allocated (the CIDR block cannot be changed after a node is created).
      +

      The container CIDR block is divided from the VPC subnet and does not need to be allocated separately.

      +

      Network performance

      +

      Performance loss due to VXLAN encapsulation

      +

      No tunnel encapsulation. Cross-node packets are forwarded through VPC routers, delivering performance equivalent to that of the host network.

      +

      The container network is integrated with the VPC network, eliminating performance loss.

      +

      Networking scale

      +

      A maximum of 2,000 nodes are supported.

      +

      By default, 200 nodes are supported.

      +

      Each time a node is added to the cluster, a route is added to the VPC route tables. Therefore, the cluster scale is limited by the VPC route tables.

      +

      A maximum of 2,000 nodes are supported.

      +
      +
      +
      1. The scale of a cluster that uses the VPC network model is limited by the custom routes of the VPC. Therefore, you need to estimate the number of required nodes before creating a cluster.
      2. The scale of a cluster that uses the Cloud Native Network 2.0 model depends on the size of the VPC subnet CIDR block selected for the network attachment definition. Before creating a cluster, evaluate the scale of your cluster.
      3. By default, VPC routing network supports direct communication between containers and hosts in the same VPC. If a peering connection policy is configured between the VPC and another VPC, the containers can directly communicate with hosts on the peer VPC. In addition, in hybrid networking scenarios such as Direct Connect and VPN, communication between containers and hosts on the peer end can also be achieved with proper planning.
      4. Do not change the mask of the primary CIDR block on the VPC after a cluster is created. Otherwise, the network will be abnormal.
      +
      + +
      + +
      + diff --git a/docs/cce/umn/cce_01_0282.html b/docs/cce/umn/cce_10_0282.html similarity index 60% rename from docs/cce/umn/cce_01_0282.html rename to docs/cce/umn/cce_10_0282.html index 44a93805..3ff4cca1 100644 --- a/docs/cce/umn/cce_01_0282.html +++ b/docs/cce/umn/cce_10_0282.html @@ -1,35 +1,30 @@ - +

      Container Tunnel Network

      -

      Container Tunnel Network Model

      The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch. Though at some costs of performance, packet encapsulation and tunnel transmission enable higher interoperability and compatibility with advanced features (such as network policy-based isolation) for most common scenarios.
      Figure 1 Container tunnel network
      +

      Container Tunnel Network Model

      The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch. Though at some costs of performance, packet encapsulation and tunnel transmission enable higher interoperability and compatibility with advanced features (such as network policy-based isolation) for most common scenarios.
      Figure 1 Container tunnel network
      -

      Pod-to-pod communication

      -
      • On the same node: Packets are directly forwarded via the OVS bridge on the node.
      • Across nodes: Packets are encapsulated in the OVS bridge and then forwarded to the peer node.
      +

      Pod-to-pod communication

      +
      • On the same node: Packets are directly forwarded via the OVS bridge on the node.
      • Across nodes: Packets are encapsulated in the OVS bridge and then forwarded to the peer node.
      -

      Advantages and Disadvantages

      Advantages

      -
      • The container network is decoupled from the node network and is not limited by the VPC quotas and response speed (such as the number of VPC routes, number of elastic ENIs, and creation speed).
      • Network isolation is supported. For details, see Network Policies.
      • Bandwidth limits are supported.
      • Large-scale networking is supported.
      -

      Disadvantages

      -
      • High encapsulation overhead, complex networking, and low performance
      • Failure to use the load balancing and security group capabilities provided by the VPC
      • External networks cannot be directly connected to container IP addresses.
      +

      Advantages and Disadvantages

      Advantages

      +
      • The container network is decoupled from the node network and is not limited by the VPC quotas and response speed (such as the number of VPC routes, number of elastic ENIs, and creation speed).
      • Network isolation is supported. For details, see Network Policies.
      • Bandwidth limits are supported.
      • Large-scale networking is supported.
      +

      Disadvantages

      +
      • High encapsulation overhead, complex networking, and low performance
      • Failure to use the load balancing and security group capabilities provided by the VPC
      • External networks cannot be directly connected to container IP addresses.
      -

      Applicable Scenarios

      • Low requirements on performance: As the container tunnel network requires additional VXLAN tunnel encapsulation, it has about 5% to 15% of performance loss when compared with the other two container network models. Therefore, the container tunnel network is applicable to the scenarios that do not have high performance requirements, such as web applications, and middle-end and back-end services with a small number of access requests.
      • Large-scale networking: Different from the VPC network that is limited by the VPC route quota, the container tunnel network does not have any restriction on the infrastructure. In addition, the container tunnel network controls the broadcast domain to the node level. The container tunnel network supports a maximum of 2000 nodes.
      +

      Applicable Scenarios

      • Low requirements on performance: As the container tunnel network requires additional VXLAN tunnel encapsulation, it has about 5% to 15% of performance loss when compared with the other two container network models. Therefore, the container tunnel network is applicable to the scenarios that do not have high performance requirements, such as web applications, and middle-end and back-end services with a small number of access requests.
      • Large-scale networking: Different from the VPC network that is limited by the VPC route quota, the container tunnel network does not have any restriction on the infrastructure. In addition, the container tunnel network controls the broadcast domain to the node level. The container tunnel network supports a maximum of 2000 nodes.
      -

      Container IP Address Management

      The container tunnel network allocates container IP addresses according to the following rules:

      -
      • The container CIDR block is allocated separately, which is irrelevant to the node CIDR block.
      • IP addresses are allocated by node. One or more CIDR blocks with a fixed size (16 by default) are allocated to each node in a cluster from the container CIDR block.
      • When the IP addresses on a node are used up, you can apply for a new CIDR block.
      • The container CIDR block cyclically allocates CIDR blocks to new nodes or existing nodes in sequence.
      • Pods scheduled to a node are cyclically allocated IP addresses from one or more CIDR blocks allocated to the node.
      -
      Figure 2 IP address allocation of the container tunnel network
      -

      Maximum number of nodes that can be created in the cluster using the container tunnel network = Number of IP addresses in the container CIDR block / Size of the IP CIDR block allocated to the node by the container CIDR block at a time (16 by default)

      -

      For example, if the container CIDR block is 172.16.0.0/16, the number of IP addresses is 65536. If 16 IP addresses are allocated to a node at a time, a maximum of 4096 (65536/16) nodes can be created in the cluster. This is an extreme case. If 4096 nodes are created, a maximum of 16 pods can be created for each node because only 16 IP CIDR block\s are allocated to each node. In addition, the number of nodes that can be created in a cluster also depends on the node network and cluster scale.

      +

      Container IP Address Management

      The container tunnel network allocates container IP addresses according to the following rules:

      +
      • The container CIDR block is allocated separately, which is irrelevant to the node CIDR block.
      • IP addresses are allocated by node. One or more CIDR blocks with a fixed size (16 by default) are allocated to each node in a cluster from the container CIDR block.
      • When the IP addresses on a node are used up, you can apply for a new CIDR block.
      • The container CIDR block cyclically allocates CIDR blocks to new nodes or existing nodes in sequence.
      • Pods scheduled to a node are cyclically allocated IP addresses from one or more CIDR blocks allocated to the node.
      +
      Figure 2 IP address allocation of the container tunnel network
      +

      Maximum number of nodes that can be created in the cluster using the container tunnel network = Number of IP addresses in the container CIDR block / Size of the IP CIDR block allocated to the node by the container CIDR block at a time (16 by default)

      +

      For example, if the container CIDR block is 172.16.0.0/16, the number of IP addresses is 65536. If 16 IP addresses are allocated to a node at a time, a maximum of 4096 (65536/16) nodes can be created in the cluster. This is an extreme case. If 4096 nodes are created, a maximum of 16 pods can be created for each node because only 16 IP CIDR block\s are allocated to each node. In addition, the number of nodes that can be created in a cluster also depends on the node network and cluster scale.

      -

      Recommendation for CIDR Block Planning

      As described in Cluster Network Structure, network addresses in a cluster can be divided into three parts: node network, container network, and service network. When planning network addresses, consider the following aspects:

      -
      • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs.
      • Ensure that each CIDR block has sufficient IP addresses.
        • The IP addresses in the node CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
        • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses. The number of pods that can be created on each node also depends on other parameter settings.
        +

        Recommendation for CIDR Block Planning

        As described in Cluster Network Structure, network addresses in a cluster can be divided into three parts: node network, container network, and service network. When planning network addresses, consider the following aspects:

        +
        • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs.
        • Ensure that each CIDR block has sufficient IP addresses.
          • The IP addresses in the node CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
          • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses. The number of pods that can be created on each node also depends on other parameter settings.
        -

        In the following configuration, the cluster has 200 nodes, and the network model is the container tunnel network.

        -

        In this case, the number of available IP addresses in the selected node subnet must be greater than 200. Otherwise, nodes cannot be created due to insufficient IP addresses.

        -

        The container CIDR block is 10.0.0.0/16, and the number of available IP addresses is 65533. These IP addresses can be allocated to a maximum of 4096 nodes. (16 IP addresses are allocated to each node at a time. For details, see Container IP Address Management.)

        -

        -

        Example of Container Tunnel Network Access

        Create a cluster that uses the container tunnel network model.

        -

        Create a Deployment on the cluster.

        -
        kind: Deployment
        +

        Example of Container Tunnel Network Access

        Create a cluster that uses the container tunnel network model. Create a Deployment in the cluster.

        +
        kind: Deployment
         apiVersion: apps/v1
         metadata:
           name: example
        @@ -56,16 +51,16 @@ spec:
                       memory: 512Mi
               imagePullSecrets:
                 - name: default-secret
        -

        View the created pod.

        -
        $ kubectl get pod -owide
        +

        View the created pod.

        +
        $ kubectl get pod -owide
         NAME                       READY   STATUS    RESTARTS   AGE     IP          NODE           NOMINATED NODE   READINESS GATES
         example-5bdc5699b7-5rvq4   1/1     Running   0          3m28s   10.0.0.20   192.168.0.42   <none>           <none>
         example-5bdc5699b7-984j9   1/1     Running   0          3m28s   10.0.0.21   192.168.0.42   <none>           <none>
         example-5bdc5699b7-lfxkm   1/1     Running   0          3m28s   10.0.0.22   192.168.0.42   <none>           <none>
         example-5bdc5699b7-wjcmg   1/1     Running   0          3m28s   10.0.0.52   192.168.0.64   <none>           <none>
        -

        In this case, the IP address of the pod cannot be directly accessed outside the cluster in the same VPC. This is a feature of the container tunnel network.

        -

        However, the pod can be accessed from a node in the cluster or in the pod. As shown in the following figure, the pod can be accessed directly from the container.

        -
        $ kubectl exec -it example-5bdc5699b7-5rvq4 -- curl 10.0.0.21
        +

        In this case, the IP address of the pod cannot be directly accessed outside the cluster in the same VPC. This is a feature of the container tunnel network.

        +

        However, the pod can be accessed from a node in the cluster or in the pod. As shown in the following figure, the pod can be accessed directly from the container.

        +
        $ kubectl exec -it example-5bdc5699b7-5rvq4 -- curl 10.0.0.21
         <!DOCTYPE html>
         <html>
         <head>
        @@ -95,7 +90,7 @@ Commercial support is available at
         
        diff --git a/docs/cce/umn/cce_01_0283.html b/docs/cce/umn/cce_10_0283.html similarity index 63% rename from docs/cce/umn/cce_01_0283.html rename to docs/cce/umn/cce_10_0283.html index 0e5c7d46..3794d3e5 100644 --- a/docs/cce/umn/cce_01_0283.html +++ b/docs/cce/umn/cce_10_0283.html @@ -1,41 +1,38 @@ - +

        VPC Network

        -

        Model Definition

        The VPC network uses VPC routing to integrate with the underlying network. This network model is suitable for performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the VPC route quota. Each node is assigned a CIDR block of a fixed size. This networking model is free from tunnel encapsulation overhead and outperforms the container tunnel network model. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in a cluster can be directly accessed from outside the cluster.
        Figure 1 VPC network model
        +

        Model Definition

        The VPC network uses VPC routing to integrate with the underlying network. This network model is suitable for performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the VPC route quota. Each node is assigned a CIDR block of a fixed size. This networking model is free from tunnel encapsulation overhead and outperforms the container tunnel network model. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in a cluster can be directly accessed from outside the cluster.
        Figure 1 VPC network model
        -

        Pod-to-pod communication

        -
        • On the same node: Packets are directly forwarded through IPVlan.
        • Across nodes: Packets are forwarded to the default gateway through default routes, and then to the peer node via the VPC routes.
        +

        Pod-to-pod communication

        +
        • On the same node: Packets are directly forwarded through IPVlan.
        • Across nodes: Packets are forwarded to the default gateway through default routes, and then to the peer node via the VPC routes.
        -

        Advantages and Disadvantages

        Advantages

        -
        • No tunnel encapsulation is required, so network problems are easy to locate and the performance is high.
        • External networks in a VPC can be directly connected to container IP addresses.
        -

        Disadvantages

        -
        • The number of nodes is limited by the VPC route quota.
        • Each node is assigned a CIDR block of a fixed size, which leads to a waste of IP addresses in the container CIDR block.
        • Pods cannot directly use functionalities such as EIPs and security groups.
        +

        Advantages and Disadvantages

        Advantages

        +
        • No tunnel encapsulation is required, so network problems are easy to locate and the performance is high.
        • External networks in a VPC can be directly connected to container IP addresses.
        +

        Disadvantages

        +
        • The number of nodes is limited by the VPC route quota.
        • Each node is assigned a CIDR block of a fixed size, which leads to a waste of IP addresses in the container CIDR block.
        • Pods cannot directly use functionalities such as EIPs and security groups.
        -

        Applicable Scenarios

        • High performance requirements: As no tunnel encapsulation is required, the VPC network model delivers the performance close to that of a VPC network when compared with the container tunnel network model. Therefore, the VPC network model is applicable to scenarios that have high requirements on performance, such as AI computing and big data computing.
        • Small- and medium-scale networking: The VPC network is limited by the VPC route quota. Currently, a maximum of 200 nodes are supported by default. If there are large-scale networking requirements, you can increase the VPC route quota.
        +

        Applicable Scenarios

        • High performance requirements: As no tunnel encapsulation is required, the VPC network model delivers the performance close to that of a VPC network when compared with the container tunnel network model. Therefore, the VPC network model is applicable to scenarios that have high requirements on performance, such as AI computing and big data computing.
        • Small- and medium-scale networking: The VPC network is limited by the VPC route quota. Currently, a maximum of 200 nodes are supported by default. If there are large-scale networking requirements, you can increase the VPC route quota.
        -

        Container IP Address Management

        The VPC network allocates container IP addresses according to the following rules:

        -
        • The container CIDR block is allocated separately.
        • IP addresses are allocated by node. One CIDR block with a fixed size (which is configurable) is allocated to each node in a cluster from the container CIDR block.
        • The container CIDR block cyclically allocates CIDR blocks to new nodes in sequence.
        • Pods scheduled to a node are cyclically allocated IP addresses from CIDR blocks allocated to the node.
        -
        Figure 2 IP address management of the VPC network
        -

        Maximum number of nodes that can be created in the cluster using the VPC network = Number of IP addresses in the container CIDR block /Number of IP addresses in the CIDR block allocated to the node by the container CIDR block

        -

        For example, if the container CIDR block is 172.16.0.0/16, the number of IP addresses is 65536. The mask of the container CIDR block allocated to the node is 25. That is, the number of container IP addresses on each node is 128. Therefore, a maximum of 512 (65536/128) nodes can be created. The number of nodes that can be created in a cluster also depends on the node network and cluster scale.

        +

        Container IP Address Management

        The VPC network allocates container IP addresses according to the following rules:

        +
        • The container CIDR block is allocated separately.
        • IP addresses are allocated by node. One CIDR block with a fixed size (which is configurable) is allocated to each node in a cluster from the container CIDR block.
        • The container CIDR block cyclically allocates CIDR blocks to new nodes in sequence.
        • Pods scheduled to a node are cyclically allocated IP addresses from CIDR blocks allocated to the node.
        +
        Figure 2 IP address management of the VPC network
        +

        Maximum number of nodes that can be created in the cluster using the VPC network = Number of IP addresses in the container CIDR block /Number of IP addresses in the CIDR block allocated to the node by the container CIDR block

        +

        For example, if the container CIDR block is 172.16.0.0/16, the number of IP addresses is 65536. The mask of the container CIDR block allocated to the node is 25. That is, the number of container IP addresses on each node is 128. Therefore, a maximum of 512 (65536/128) nodes can be created. In addition, the number of nodes that can be created in a cluster also depends on the node network and cluster scale.

        -

        Recommendation for CIDR Block Planning

        As described in Cluster Network Structure, network addresses in a cluster can be divided into three parts: node network, container network, and service network. When planning network addresses, consider the following aspects:

        -
        • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs.
        • Ensure that each CIDR block has sufficient IP addresses.
          • The IP addresses in the node CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
          • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses. The number of pods that can be created on each node also depends on other parameter settings.
          +

          Recommendation for CIDR Block Planning

          As described in Cluster Network Structure, network addresses in a cluster can be divided into three parts: node network, container network, and service network. When planning network addresses, consider the following aspects:

          +
          • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs.
          • Ensure that each CIDR block has sufficient IP addresses.
            • The IP addresses in the node CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
            • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses. The number of pods that can be created on each node also depends on other parameter settings.
          -

          Assume that a cluster contains 200 nodes and the network model is VPC network.

          -

          In this case, the number of available IP addresses in the selected node subnet must be greater than 200. Otherwise, nodes cannot be created due to insufficient IP addresses.

          -

          The container CIDR block is 10.0.0.0/16, and the number of available IP addresses is 65536. As described in Container IP Address Management, the VPC network is allocated a CIDR block with the fixed size (using the mask to determine the maximum number of container IP addresses allocated to each node). For example, if the upper limit is 128, the cluster supports a maximum of 512 (65536/128) nodes, including the three master nodes.

          -

          +

          Assume that a cluster contains 200 nodes and the network model is VPC network.

          +

          In this case, the number of available IP addresses in the selected node subnet must be greater than 200. Otherwise, nodes cannot be created due to insufficient IP addresses.

          +

          The container CIDR block is 10.0.0.0/16, and the number of available IP addresses is 65536. As described in Container IP Address Management, the VPC network is allocated a CIDR block with the fixed size (using the mask to determine the maximum number of container IP addresses allocated to each node). For example, if the upper limit is 128, the cluster supports a maximum of 512 (65536/128) nodes, including the three master nodes.

          -

          Example of VPC Network Access

          Create a cluster using the VPC network model.

          -
          Figure 3 Cluster network
          -

          The cluster contains one node.

          -
          $ kubectl get node
          +

          Example of VPC Network Access

          Create a cluster using the VPC network model. The cluster contains one node.

          +
          $ kubectl get node
           NAME           STATUS   ROLES    AGE   VERSION
           192.168.0.99   Ready    <none>   9d    v1.17.17-r0-CCE21.6.1.B004-17.37.5
          -

          Check the VPC routing table. The destination address 172.16.0.0/25 is the container CIDR block allocated to the node, and the next hop is the corresponding node. When the container IP address is accessed, the VPC route forwards the access request to the next-hop node. This indicates that the VPC network model uses VPC routes.

          -

          Create a Deployment on the cluster.

          -
          kind: Deployment
          +

          Check the VPC routing table. The destination address 172.16.0.0/25 is the container CIDR block allocated to the node, and the next hop is the corresponding node. When the container IP address is accessed, the VPC route forwards the access request to the next-hop node. This indicates that the VPC network model uses VPC routes.

          +

          Create a Deployment in the cluster.

          +
          kind: Deployment
           apiVersion: apps/v1
           metadata:
             name: example
          @@ -55,16 +52,16 @@ spec:
                     image: 'nginx:perl'
                 imagePullSecrets:
                   - name: default-secret
          -

          Check the pod.

          -
          $ kubectl get pod -owide
          +

          Check the pod.

          +
          $ kubectl get pod -owide
           NAME                       READY   STATUS    RESTARTS   AGE   IP           NODE           NOMINATED NODE   READINESS GATES
           example-86b9779494-l8qrw   1/1     Running   0          14s   172.16.0.6   192.168.0.99   <none>           <none>
           example-86b9779494-svs8t   1/1     Running   0          14s   172.16.0.7   192.168.0.99   <none>           <none>
           example-86b9779494-x8kl5   1/1     Running   0          14s   172.16.0.5   192.168.0.99   <none>           <none>
           example-86b9779494-zt627   1/1     Running   0          14s   172.16.0.8   192.168.0.99   <none>           <none>
          -

          In this case, the IP address of the pod can be directly accessed from a node outside the cluster in the same VPC. This is a feature of the VPC network feature.

          -

          The pod can also be accessed from a node in the same cluster or in the pod. As shown in the following figure, the pod can be accessed directly from the container.

          -
          $ kubectl exec -it example-86b9779494-l8qrw -- curl 172.16.0.7
          +

          In this case, the IP address of the pod can be directly accessed from a node outside the cluster in the same VPC. This is a feature of the VPC network feature.

          +

          The pod can also be accessed from a node in the same cluster or in the pod. As shown in the following figure, the pod can be accessed directly from the container.

          +
          $ kubectl exec -it example-86b9779494-l8qrw -- curl 172.16.0.7
           <!DOCTYPE html>
           <html>
           <head>
          @@ -94,7 +91,7 @@ Commercial support is available at
           
          diff --git a/docs/cce/umn/cce_10_0284.html b/docs/cce/umn/cce_10_0284.html new file mode 100644 index 00000000..cf1b4c20 --- /dev/null +++ b/docs/cce/umn/cce_10_0284.html @@ -0,0 +1,72 @@ + + +

          Cloud Native Network 2.0

          +

          Model Definition

          Developed by CCE, Cloud Native Network 2.0 deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and elastic IPs (EIPs) are bound to deliver high performance.

          +
          Figure 1 Cloud Native Network 2.0
          +

          Pod-to-pod communication

          +
          • Pods on BMS nodes use ENIs, whereas pods on ECS nodes use Sub-ENIs. Sub-ENIs are attached to ENIs through VLAN sub-interfaces.
          • On the same node: Packets are forwarded through the VPC ENI or sub-ENI.
          • Across nodes: Packets are forwarded through the VPC ENI or sub-ENI.
          +
          +

          Advantages and Disadvantages

          Advantages

          +
          • As the container network directly uses VPC, it is easy to locate network problems and provide the highest performance.
          • External networks in a VPC can be directly connected to container IP addresses.
          • The load balancing, security group, and EIP capabilities provided by VPC can be used directly.
          +

          Disadvantages

          +

          The container network directly uses VPC, which occupies the VPC address space. Therefore, you must properly plan the container CIDR block before creating a cluster.

          +
          +

          Application Scenarios

          • High performance requirements and use of other VPC network capabilities: Cloud Native Network 2.0 directly uses VPC, which delivers almost the same performance as the VPC network. Therefore, it is applicable to scenarios that have high requirements on bandwidth and latency, such as online live broadcast and e-commerce seckill.
          • Large-scale networking: Cloud Native Network 2.0 supports a maximum of 2000 ECS nodes and 100,000 containers.
          +
          +

          Recommendation for CIDR Block Planning

          As described in Cluster Network Structure, network addresses in a cluster can be divided into three parts: node network, container network, and service network. When planning network addresses, consider the following aspects:

          +
          • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs. All subnets (including those created from the secondary CIDR block) in the VPC where the cluster resides cannot conflict with the container and Service CIDR blocks.
          • Ensure that each CIDR block has sufficient IP addresses.
            • The IP addresses in the node CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
            • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses.
            +
          +

          In the Cloud Native Network 2.0 model, the container CIDR block and node CIDR block share the network addresses in a VPC. It is recommended that the container subnet and node subnet not use the same subnet. Otherwise, containers or nodes may fail to be created due to insufficient IP resources.

          +

          In addition, a subnet can be added to the container CIDR block after a cluster is created to increase the number of available IP addresses. In this case, ensure that the added subnet does not conflict with other subnets in the container CIDR block.

          +
          Figure 2 Configuring CIDR blocks
          +
          +

          Example of Cloud Native Network 2.0 Access

          Create a CCE Turbo cluster, which contains three ECS nodes.

          +

          Access the details page of one node. You can see that the node has one primary NIC and one extended NIC, and both of them are ENIs. The extended NIC belongs to the container CIDR block and is used to mount a sub-ENI to the pod.

          +

          Create a Deployment in the cluster.

          +
          kind: Deployment
          +apiVersion: apps/v1
          +metadata:
          +  name: example
          +  namespace: default
          +spec:
          +  replicas: 6
          +  selector:
          +    matchLabels:
          +      app: example
          +  template:
          +    metadata:
          +      labels:
          +        app: example
          +    spec:
          +      containers:
          +        - name: container-0
          +          image: 'nginx:perl'
          +          resources:
          +            limits:
          +              cpu: 250m
          +              memory: 512Mi
          +            requests:
          +              cpu: 250m
          +              memory: 512Mi
          +      imagePullSecrets:
          +        - name: default-secret
          +

          View the created pod.

          +
          $ kubectl get pod -owide
          +NAME                       READY   STATUS    RESTARTS   AGE   IP            NODE         NOMINATED NODE   READINESS GATES
          +example-5bdc5699b7-54v7g   1/1     Running   0          7s    10.1.18.2     10.1.0.167   <none>           <none>
          +example-5bdc5699b7-6dzx5   1/1     Running   0          7s    10.1.18.216   10.1.0.186   <none>           <none>
          +example-5bdc5699b7-gq7xs   1/1     Running   0          7s    10.1.16.63    10.1.0.144   <none>           <none>
          +example-5bdc5699b7-h9rvb   1/1     Running   0          7s    10.1.16.125   10.1.0.167   <none>           <none>
          +example-5bdc5699b7-s9fts   1/1     Running   0          7s    10.1.16.89    10.1.0.144   <none>           <none>
          +example-5bdc5699b7-swq6q   1/1     Running   0          7s    10.1.17.111   10.1.0.167   <none>           <none>
          +

          The IP addresses of all pods are sub-ENIs, which are mounted to the ENI (extended NIC) of the node.

          +

          For example, the extended NIC of node 10.1.0.167 is 10.1.17.172. On the Network Interfaces page of the Network Console, you can see that three sub-ENIs are mounted to the extended NIC 10.1.17.172, which is the IP address of the pod.

          +

          In the VPC, the IP address of the pod can be successfully accessed.

          +
          +
          +
          + +
          + diff --git a/docs/cce/umn/cce_10_0285.html b/docs/cce/umn/cce_10_0285.html new file mode 100644 index 00000000..b9561a15 --- /dev/null +++ b/docs/cce/umn/cce_10_0285.html @@ -0,0 +1,26 @@ + + +

          Managing Namespaces

          +

          Using Namespaces

          • When creating a workload, you can select a namespace to isolate resources or users.
          • When querying workloads, you can select a namespace to view all workloads in the namespace.
          +
          +

          Isolating Namespaces

          • Isolating namespaces by environment

            An application generally goes through the development, joint debugging, and testing stages before it is launched. In this process, the workloads deployed in each environment (stage) are the same, but are logically defined. There are two ways to define them:

            +
            • Group them in different clusters for different environments.

              Resources cannot be shared among different clusters. In addition, services in different environments can access each other only through load balancing.

              +
            • Group them in different namespaces for different environments.

              Workloads in the same namespace can be mutually accessed by using the Service name. Cross-namespace access can be implemented by using the Service name or namespace name.

              +

              The following figure shows namespaces created for the development, joint debugging, and testing environments, respectively.

              +
              Figure 1 One namespace for one environment
              +
            +
          • Isolating namespaces by application

            You are advised to use this method if a large number of workloads are deployed in the same environment. For example, in the following figure, different namespaces (APP1 and APP2) are created to logically manage workloads as different groups. Workloads in the same namespace access each other using the Service name, and workloads in different namespaces access each other using the Service name or namespace name.

            +
            Figure 2 Grouping workloads into different namespaces
            +
          +
          +

          Deleting a Namespace

          If a namespace is deleted, all resources (such as workloads, jobs, and ConfigMaps) in this namespace will also be deleted. Exercise caution when deleting a namespace.

          +
          1. Log in to the CCE console and access the cluster console.
          2. In the navigation pane, choose Namespaces, select the target namespace, and choose More > Delete.

            Follow the prompts to delete the namespace. The default namespaces cannot be deleted.

            +

          +
          +
          +
          + +
          + diff --git a/docs/cce/umn/cce_10_0287.html b/docs/cce/umn/cce_10_0287.html new file mode 100644 index 00000000..089f9393 --- /dev/null +++ b/docs/cce/umn/cce_10_0287.html @@ -0,0 +1,124 @@ + + +

          Setting a Resource Quota

          +

          Namespace-level resource quotas limit the amount of resources available to teams or users when these teams or users use the same cluster. The quotas include the total number of a type of objects and the total amount of compute resources (CPU and memory) consumed by the objects.

          +

          Usage

          By default, running pods can use the CPUs and memory of a node without restrictions. This means the pods in a namespace may exhaust all resources of the cluster.

          +

          Kubernetes provides namespaces for you to group workloads in a cluster. By setting resource quotas for each namespace, you can prevent resource exhaustion and ensure cluster reliability.

          +

          You can configure quotas for resources such as CPU, memory, and the number of pods in a namespace. For more information, see Resource Quotas.

          +

          The following table recommends how many pods you can configure for your clusters of different sizes.

          + +
          + + + + + + + + + + + + + + + + +

          Cluster Scale

          +

          Recommended Number of Pods

          +

          50 nodes

          +

          2,500 pods

          +

          200 nodes

          +

          10,000 pods

          +

          1,000 nodes

          +

          30,000 pods

          +

          2,000 nodes

          +

          50,000 pods

          +
          +
          +

          Starting from clusters of v1.21 and later, the default Resource Quotas are created when a namespace is created if you have enabled enable-resource-quota in Managing Cluster Components. Table 1 lists the resource quotas based on cluster specifications. You can modify them according to your service requirements.

          + +
          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          Table 1 Default resource quotas

          Cluster Scale

          +

          Pod

          +

          Deployment

          +

          Secret

          +

          ConfigMap

          +

          Service

          +

          50 nodes

          +

          2000

          +

          1000

          +

          1000

          +

          1000

          +

          1000

          +

          200 nodes

          +

          2000

          +

          1000

          +

          1000

          +

          1000

          +

          1000

          +

          1,000 nodes

          +

          5000

          +

          2000

          +

          2000

          +

          2000

          +

          2000

          +

          2,000 nodes

          +

          5000

          +

          2000

          +

          2000

          +

          2000

          +

          2000

          +
          +
          +
          +

          Notes and Constraints

          Kubernetes provides optimistic concurrency control (OCC), also known as optimistic locking, for frequent data updates. You can use optimistic locking by defining the resourceVersion field. This field is in the object metadata. This field identifies the internal version number of the object. When the object is modified, this field is modified accordingly. You can use kube-apiserver to check whether an object has been modified. When the API server receives an update request containing the resourceVersion field, the server compares the requested data with the resource version number of the server. If they are different, the object on the server has been modified when the update is submitted. In this case, the API server returns a conflict error (409). You need to obtain the server data, modify the data, and submit the data to the server again. The resource quota limits the total resource consumption of each namespace and records the resource information in the cluster. Therefore, after the enable-resource-quota option is enabled, the probability of resource creation conflicts increases in large-scale concurrency scenarios, affecting the performance of batch resource creation.

          +
          +

          Procedure

          1. Log in to the CCE console and access the cluster console.
          2. In the navigation pane, click Namespaces.
          3. Click Quota Management next to the target namespace.

            This operation cannot be performed on system namespaces kube-system and kube-public.

            +

          4. Set the resource quotas and click OK.

            • After setting CPU and memory quotas for a namespace, you must specify the request and limit values of CPU and memory resources when creating a workload. Otherwise, the workload cannot be created. If the quota of a resource is set to 0, the resource usage is not limited.
            • Accumulated quota usage includes the resources used by CCE to create default components, such as the Kubernetes Services (which can be viewed using kubectl) created under the default namespace. Therefore, you are advised to set a resource quota greater than expected to reserve resource for creating default components.
            +
            +

          +
          +
          +
          + +
          + diff --git a/docs/cce/umn/cce_10_0288.html b/docs/cce/umn/cce_10_0288.html new file mode 100644 index 00000000..302830b6 --- /dev/null +++ b/docs/cce/umn/cce_10_0288.html @@ -0,0 +1,142 @@ + + +

          Security Group Policies

          +

          When the Cloud Native Network 2.0 model is used, pods use VPC ENIs or sub-ENIs for networking. You can directly bind security groups and EIPs to pods. CCE provides a custom resource object named SecurityGroup for you to associate security groups with pods in CCE. You can customize workloads with specific security isolation requirements using SecurityGroups.

          +

          Notes and Constraints

          • This function is supported for CCE Turbo clusters of v1.19 and later. Upgrade your CCE Turbo clusters if their versions are earlier than v1.19.
          • A workload can be bound to a maximum of five security groups.
          +
          +

          Using the Console

          1. Log in to the CCE console and access the cluster console.
          2. In the navigation pane, choose Workloads. On the displayed page, click the name of the target workload.
          3. Switch to the Security Group Policy tab page and click Create.

            +

          4. Set the parameters as described in Table 1.

            +

            + + + + + + + + + + + + + +
            Table 1 Configuration parameters

            Parameter

            +

            Description

            +

            Example Value

            +

            Security Group Policy Name

            +

            Enter a security policy name.

            +

            Enter 1 to 63 characters. The value must start with a lowercase letter and cannot end with a hyphen (-). Only lowercase letters, digits, and hyphens (-) are allowed.

            +

            security-group

            +

            Associate Security Group

            +

            The selected security group will be bound to the ENI or supplementary ENI of the selected workload. A maximum of five security groups can be selected from the drop-down list. You must select one or multiple security groups to create a SecurityGroup.

            +

            If no security group has not been created, click Create Security Group. After the security group is created, click the refresh button.

            +
            NOTICE:
            • A maximum of 5 security groups can be selected.
            • Hover the cursor on next to the security group name, and you can view details about the security group.
            +
            +

            64566556-bd6f-48fb-b2c6-df8f44617953

            +

            5451f1b0-bd6f-48fb-b2c6-df8f44617953

            +
            +
            +

          5. After setting the parameters, click OK.

            After the security group is created, the system automatically returns to the security group list page where you can see the new security group.

            +

          +
          +

          Using kubectl

          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          2. Create a description file named securitygroup-demo.yaml.

            vi securitygroup-demo.yaml

            +

            For example, create the following SecurityGroup to bind all nginx workloads with two security groups 64566556-bd6f-48fb-b2c6-df8f44617953 and 5451f1b0-bd6f-48fb-b2c6-df8f44617953 that have been created in advance. An example is as follows:

            +
            apiVersion: crd.yangtse.cni/v1
            +kind: SecurityGroup
            +metadata:
            +  name: demo
            +  namespace: default
            +spec:
            +  podSelector:
            +    matchLabels:
            +      app: nginx    
            +  securityGroups:
            +  - id: 64566556-bd6f-48fb-b2c6-df8f44617953
            +  - id: 5451f1b0-bd6f-48fb-b2c6-df8f44617953
            +
            Table 2 describes the parameters in the YAML file. +
            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Table 2 Description

            Field

            +

            Description

            +

            Mandatory

            +

            apiVersion

            +

            API version. The value is crd.yangtse.cni/v1.

            +

            Yes

            +

            kind

            +

            Type of the object to be created.

            +

            Yes

            +

            metadata

            +

            Metadata definition of the resource object.

            +

            Yes

            +

            name

            +

            Name of the SecurityGroup.

            +

            Yes

            +

            namespace

            +

            Name of the namespace.

            +

            Yes

            +

            spec

            +

            Detailed description of the SecurityGroup.

            +

            Yes

            +

            podSelector

            +

            Used to define the workload to be associated with security groups in the SecurityGroup.

            +

            Yes

            +

            securityGroups

            +

            Security group ID.

            +

            Yes

            +
            +
            +
            +

          3. Run the following command to create the SecurityGroup:

            kubectl create -f securitygroup-demo.yaml

            +

            If the following information is displayed, the SecurityGroup is being created.

            +
            securitygroup.crd.yangtse.cni/demo created
            +

          4. Run the following command to view the SecurityGroup:

            kubectl get sg

            +

            If the name of the created SecurityGroup is demo in the command output, the SecurityGroup is created successfully.

            +
            NAME                       POD-SELECTOR                      AGE
            +all-no                     map[matchLabels:map[app:nginx]]   4h1m
            +s001test                   map[matchLabels:map[app:nginx]]   19m
            +demo                       map[matchLabels:map[app:nginx]]   2m9s
            +

          +
          +
          +
          + +
          + diff --git a/docs/cce/umn/cce_01_0290.html b/docs/cce/umn/cce_10_0290.html similarity index 57% rename from docs/cce/umn/cce_01_0290.html rename to docs/cce/umn/cce_10_0290.html index b29087b3..58175b9e 100644 --- a/docs/cce/umn/cce_01_0290.html +++ b/docs/cce/umn/cce_10_0290.html @@ -1,30 +1,28 @@ - +

          Workload Scaling Mechanisms

          -

          Scaling policy priority: If you do not manually adjust the number of pods, auto scaling policies will take effect for resource scheduling. If manual scaling is triggered, auto scaling policies will be temporarily invalid.

          -
          -

          How HPA Works

          HPA is a controller that controls horizontal pod scaling. HPA periodically checks the pod metrics, calculates the number of replicas required to meet the target values configured for HPA resources, and then adjusts the value of the replicas field in the target resource object (such as a Deployment).

          -

          A prerequisite for auto scaling is that your container running data can be collected, such as number of cluster nodes/pods, and CPU and memory usage of containers. Kubernetes does not provide such monitoring capabilities itself. You can use extensions to monitor and collect your data. CCE integrates Metrics Server to realize such capabilities:

          -
          • Metrics Server is a cluster-wide aggregator of resource utilization data. Metrics Server collects metrics from the Summary API exposed by kubelet. These metrics are set for core Kubernetes resources, such as pods, nodes, containers, and Services. Metrics Server provides a set of standard APIs for external systems to collect these metrics.
          -

          HPA can work with Metrics Server to implement auto scaling based on the CPU and memory usage.

          +

          How HPA Works

          HPA is a controller that controls horizontal pod scaling. HPA periodically checks the pod metrics, calculates the number of replicas required to meet the target values configured for HPA resources, and then adjusts the value of the replicas field in the target resource object (such as a Deployment).

          +

          A prerequisite for auto scaling is that your container running data can be collected, such as number of cluster nodes/pods, and CPU and memory usage of containers. Kubernetes does not provide such monitoring capabilities itself. You can use extensions to monitor and collect your data. CCE integrates Metrics Server to realize such capabilities:

          +
          • Metrics Server is a cluster-wide aggregator of resource utilization data. Metrics Server collects metrics from the Summary API exposed by kubelet. These metrics are set for core Kubernetes resources, such as pods, nodes, containers, and Services. Metrics Server provides a set of standard APIs for external systems to collect these metrics.
          +

          HPA can work with Metrics Server to implement auto scaling based on the CPU and memory usage.

          -

          Two core modules of HPA:

          -
          • Data Source Monitoring

            The community provided only CPU- and memory-based HPA at the early stage. With the population of Kubernetes, developers need more custom metrics or monitoring information at the access layer for their own applications, for example, the QPS of the load balancer and the number of online users of the website. In response, the community defines a set of standard metric APIs to provide services externally through these aggregated APIs.

            -
            • metrics.k8s.io provides monitoring metrics related to the CPU and memory of pods and nodes.
            • custom.metrics.k8s.io provides custom monitoring metrics related to Kubernetes objects.
            • external.metrics.k8s.io provides metrics that come from external systems and are irrelevant to any Kubernetes resource metrics.
            -
          • Scaling Decision-Making Algorithms

            The HPA controller calculates the scaling ratio based on the current metric values and desired metric values using the following formula:

            -

            desiredReplicas = ceil[currentReplicas x (currentMetricValue/desiredMetricValue)]

            -

            For example, if the current metric value is 200m and the target value is 100m, the desired number of pods will be doubled according to the formula. In practice, pods may be constantly added or reduced. To ensure stability, the HPA controller is optimized from the following aspects:

            -
            • Cooldown interval: In v1.11 and earlier versions, Kubernetes introduced the startup parameters horizontal-pod-autoscaler-downscale-stabilization-window and horizontal-pod-autoScaler-upscale-stabilization-window to indicate the cooldown intervals after a scale-in and scale-out, respectively, in which no scaling operation will not be performed. In versions later than v1.14, the scheduling queue is introduced to store all decision-making suggestions detected within a period of time. Then, the system makes decisions based on all valid decision-making suggestions to minimize changes of the desired number of replicas to ensure stability.
            • Tolerance: It can be considered as a buffer zone. If the pod number changes can be tolerated, the number of pods remains unchanged.

              Use the formula: ratio = currentMetricValue/desiredMetricValue

              -

              When |ratio – 1.0| ≤ tolerance, scaling will not be performed.

              -

              When |ratio – 1.0| > tolerance, the desired value is calculated using the formula mentioned above.

              -

              The default value is 0.1 in the current community version.

              +

              Two core modules of HPA:

              +
              • Data Source Monitoring

                The community provided only CPU- and memory-based HPA at the early stage. With the population of Kubernetes, developers need more custom metrics or monitoring information at the access layer for their own applications, for example, the QPS of the load balancer and the number of online users of the website. In response, the community defines a set of standard metric APIs to provide services externally through these aggregated APIs.

                +
                • metrics.k8s.io provides monitoring metrics related to the CPU and memory of pods and nodes.
                • custom.metrics.k8s.io provides custom monitoring metrics related to Kubernetes objects.
                • external.metrics.k8s.io provides metrics that come from external systems and are irrelevant to any Kubernetes resource metrics.
                +
              • Scaling Decision-Making Algorithms

                The HPA controller calculates the scaling ratio based on the current metric values and desired metric values using the following formula:

                +

                desiredReplicas = ceil[currentReplicas x (currentMetricValue/desiredMetricValue)]

                +

                For example, if the current metric value is 200m and the target value is 100m, the desired number of pods will be doubled according to the formula. In practice, pods may be constantly added or reduced. To ensure stability, the HPA controller is optimized from the following aspects:

                +
                • Cooldown interval: In v1.11 and earlier versions, Kubernetes introduced the startup parameters horizontal-pod-autoscaler-downscale-stabilization-window and horizontal-pod-autoScaler-upscale-stabilization-window to indicate the cooldown intervals after a scale-in and scale-out, respectively, in which no scaling operation will not be performed. In versions later than v1.14, the scheduling queue is introduced to store all decision-making suggestions detected within a period of time. Then, the system makes decisions based on all valid decision-making suggestions to minimize changes of the desired number of replicas to ensure stability.
                • Tolerance: It can be considered as a buffer zone. If the pod number changes can be tolerated, the number of pods remains unchanged.

                  Use the formula: ratio = currentMetricValue/desiredMetricValue

                  +

                  When |ratio – 1.0| ≤ tolerance, scaling will not be performed.

                  +

                  When |ratio – 1.0| > tolerance, the desired value is calculated using the formula mentioned above.

                  +

                  The default value is 0.1 in the current community version.

              -

              The HPA performs scaling based on metric thresholds. Common metrics include the CPU and memory usage. You can also set custom metrics, such as the QPS and number of connections, to trigger scaling. However, metric-based scaling brings in latency of minutes generated during data collection, determination, and scaling phases. Such latency may cause high CPU usage and slow response. To solve this problem, CCE allows you to configure scheduled policies to scale resources regularly for applications with periodic changes.

              +

              The HPA performs scaling based on metric thresholds. Common metrics include the CPU and memory usage. You can also set custom metrics, such as the QPS and number of connections, to trigger scaling. However, metric-based scaling brings in latency of minutes generated during data collection, determination, and scaling phases. Such latency may cause high CPU usage and slow response. To solve this problem, CCE allows you to configure scheduled policies to scale resources regularly for applications with periodic changes.

          diff --git a/docs/cce/umn/cce_10_0291.html b/docs/cce/umn/cce_10_0291.html new file mode 100644 index 00000000..5341dde3 --- /dev/null +++ b/docs/cce/umn/cce_10_0291.html @@ -0,0 +1,20 @@ + + +

          Scaling a Node

          +

          +
          + + diff --git a/docs/cce/umn/cce_10_0293.html b/docs/cce/umn/cce_10_0293.html new file mode 100644 index 00000000..dc8e0b53 --- /dev/null +++ b/docs/cce/umn/cce_10_0293.html @@ -0,0 +1,20 @@ + + +

          Scaling a Workload

          +

          +
          + + diff --git a/docs/cce/umn/cce_10_0296.html b/docs/cce/umn/cce_10_0296.html new file mode 100644 index 00000000..26efeae2 --- /dev/null +++ b/docs/cce/umn/cce_10_0296.html @@ -0,0 +1,27 @@ + + +

          Node Scaling Mechanisms

          +

          Kubernetes HPA is designed for pods. However, if the cluster resources are insufficient, you can only add nodes. Scaling of cluster nodes could be laborious. Now with clouds, you can add or delete nodes by simply calling APIs.

          +

          autoscaler is a component provided by Kubernetes for auto scaling of cluster nodes based on the pod scheduling status and resource usage.

          +

          Prerequisites

          Before using the node scaling function, you must install the autoscaler add-on of v1.13.8 or later.

          +
          +

          How autoscaler Works

          The cluster autoscaler (CA) goes through two processes.

          +
          • Scale-out: The CA checks all unschedulable pods every 10 seconds and selects a node group that meets the requirements for scale-out based on the policy you set.
          • Scale-in: The CA scans all nodes every 10 seconds. If the number of pod requests on a node is less than the user-defined percentage for scale-in, the CA simulates whether the pods on the node can be migrated to other nodes. If yes, the node will be removed after an idle time window.
          +

          As described above, if a cluster node is idle for a period of time (10 minutes by default), scale-in is triggered, and the idle node is removed.

          +

          However, a node cannot be removed from a cluster if the following pods exist:

          +
          1. Pods that do not meet specific requirements set in PodDisruptionBudget
          2. Pods that cannot be scheduled to other nodes due to constraints such as affinity and anti-affinity policies
          3. Pods that have the "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" annotation
          4. Pods (except those created by kube-system DaemonSet) that exist in the kube-system namespace on the node
          5. Pods that are not created by the controller (Deployment/ReplicaSet/job/StatefulSet)
          +
          +

          autoscaler Architecture

          Figure 1 shows the autoscaler architecture and its core modules:

          +
          Figure 1 autoscaler architecture
          +

          Description

          +
          • Estimator: Evaluates the number of nodes to be added to each node pool to host unschedulable pods.
          • Simulator: Finds the nodes that meet the scale-in conditions in the scale-in scenario.
          • Expander: Selects an optimal node from the node pool picked out by the Estimator based on the user-defined policy in the scale-out scenario. Currently, the Expander has the following policies:
            • Random: Selects a node pool randomly. If you have not specified a policy, Random is set by default.
            • most-Pods: Selects the node pool that can host the largest number of unschedulable pods after the scale-out. If multiple node pools meet the requirement, a random node pool will be selected.
            • least-waste: Selects the node pool that has the least CPU or memory resource waste after scale-out.
            • price: Selects the node pool in which the to-be-added nodes cost least for scale-out.
            • priority: Selects the node pool with the highest weight. The weights are user-defined.
            +
          +

          Currently, CCE supports all policies except price. By default, CCE add-ons use the least-waste policy.

          +
          +
          +
          + +
          + diff --git a/docs/cce/umn/cce_10_0298.html b/docs/cce/umn/cce_10_0298.html new file mode 100644 index 00000000..350af8eb --- /dev/null +++ b/docs/cce/umn/cce_10_0298.html @@ -0,0 +1,40 @@ + + +

          Creating a CCE Turbo Cluster

          +

          CCE Turbo clusters run on a cloud native infrastructure that features software-hardware synergy to support passthrough networking, high security and reliability, and intelligent scheduling.

          +

          CCE Turbo clusters are paired with the Cloud Native Network 2.0 model for large-scale, high-performance container deployment. Containers are assigned IP addresses from the VPC CIDR block. Containers and nodes can belong to different subnets. Access requests from external networks in a VPC can be directly routed to container IP addresses, which greatly improves networking performance. It is recommended that you go through Cloud Native Network 2.0 to understand the features and network planning of each CIDR block of Cloud Native Network 2.0.

          +

          Notes and Constraints

          • During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
          • You can create a maximum of 50 clusters in a single region.
          • CCE Turbo clusters support only Cloud Native Network 2.0. For details about this network model, see Cloud Native Network 2.0.
          • After a cluster is created, the following items cannot be changed:
            • Cluster type
            • Number of master nodes in the cluster
            • AZ of a master node
            • Network configuration of the cluster, such as the VPC, subnet, container CIDR block, Service CIDR block, and kube-proxy (forwarding) settings.
            • Network model. For example, change Tunnel network to VPC network.
            +
          +
          +

          Procedure

          1. Log in to the CCE console. Choose Clusters. On the displayed page, click Create next to CCE Turbo cluster.
          2. Specify cluster parameters.

            Basic Settings
            • Cluster Name
            • Cluster Version: Select the Kubernetes version used by the cluster.
            • Cluster Scale: Select the maximum number of nodes that can be managed by the cluster. After the creation is complete, only scale-out is supported, but not scale-in.
            • HA: distribution mode of master nodes. By default, master nodes are randomly distributed in different AZs to improve DR capabilities.
              You can also expand advanced settings and customize the master node distribution mode. The following modes are supported:
              • Host: Master nodes are created on different hosts in the same AZ.
              • Custom: You can determine the location of each master node.
              +
              +
            +
            +

            Network Settings

            +

            The cluster network settings cover nodes, containers, and Services. For details about the cluster networking and container network models, see Overview.

            +
            • Network Model: CCE Turbo clusters support only Cloud Native Network 2.0. For details, see Cloud Native Network 2.0.
            • VPC: Select the VPC to which the cluster belongs. If no VPC is available, click Create VPC to create one. The value cannot be changed after creation.
            • Master Node Subnet: Select the subnet where the master node is deployed. If no subnet is available, click Create Subnet to create one. A master node requires at least four IP addresses, which cannot be changed after creation.
            • Pod Subnet: Select the subnet where the container is located. If no subnet is available, click Create Subnet to create one. The pod subnet determines the maximum number of containers in the cluster. You can add pod subnets after creating the cluster.
            • Service CIDR Block: CIDR block for Services used by containers in the same cluster to access each other. The value determines the maximum number of Services you can create. The value cannot be changed after creation.
            +

            Advanced Settings

            +
            • Request Forwarding: The IPVS and iptables modes are supported. For details, see Comparing iptables and IPVS.
            • CPU Manager: For details, see Binding CPU Cores.
            • Resource Tag:

              You can add resource tags to classify resources.

              +

              You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use predefined tags to improve tag creation and resource migration efficiency.

              +
            • Certificate Authentication:
              • Default: The X509-based authentication mode is enabled by default. X509 is a commonly used certificate format.
              • Custom: The cluster can identify users based on the header in the request body for authentication.

                You need to upload your CA root certificate, client certificate, and private key of the client certificate.

                +
                • Upload a file smaller than 1 MB. The CA certificate and client certificate can be in .crt or .cer format. The private key of the client certificate can only be uploaded unencrypted.
                • The validity period of the client certificate must be longer than five years.
                • The uploaded CA certificate is used for both the authentication proxy and the kube-apiserver aggregation layer configuration. If the certificate is invalid, the cluster cannot be created.
                • Starting from v1.25, Kubernetes no longer supports certificate authentication generated using the SHA1WithRSA or ECDSAWithSHA1 algorithm. You are advised to use the SHA256 algorithm.
                +
                +
              +
            • Description: The value can contain a maximum of 200 English characters.
            +

          3. Click Next: Add-on Configuration.

            By default, cordens and everest add-ons are installed.

            +
            Service log
            • ICAgent:

              A log collector provided by Application Operations Management (AOM), reporting logs to AOM and Log Tank Service (LTS) according to the log collection rules you configured.

              +

              You can collect stdout logs as required.

              +
            +
            +

          4. After configuring the parameters, click Next: Confirm.

            It takes about 6 to 10 minutes to create a cluster. You can click Back to Cluster List to perform other operations on the cluster or click Go to Cluster Events to view the cluster details.

            +

          +
          +

          Related Operations

          +
          +
          +
          + +
          + diff --git a/docs/cce/umn/cce_10_0300.html b/docs/cce/umn/cce_10_0300.html new file mode 100644 index 00000000..d21f0ee9 --- /dev/null +++ b/docs/cce/umn/cce_10_0300.html @@ -0,0 +1,227 @@ + + +

          Using HPA and CA for Auto Scaling of Workloads and Nodes

          +

          Scenario

          The best way to handle surging traffic is to automatically adjust the number of machines based on the traffic volume or resource usage, which is called scaling.

          +

          In CCE, the resources that can be used by containers are fixed during application deployment. Therefore, in auto scaling, pods are scaled first. The node resource usage increases only after the number of pods increases. Then, nodes can be scaled based on the node resource usage. How to configure auto scaling in CCE?

          +
          +

          Solution

          Two major auto scaling policies are HPA (Horizontal Pod Autoscaling) and CA (Cluster AutoScaling). HPA is for workload auto scaling and CA is for node auto scaling.

          +

          HPA and CA work with each other. HPA requires sufficient cluster resources for successful scaling. When the cluster resources are insufficient, CA is needed to add nodes. If HPA reduces workloads, the cluster will have a large number of idle resources. In this case, CA needs to release nodes to avoid resource waste.

          +
          As shown in Figure 1, HPA performs scale-out based on the monitoring metrics. When cluster resources are insufficient, newly created pods are in Pending state. CA then checks these pending pods and selects the most appropriate node pool based on the configured scaling policy to scale out the node pool.
          Figure 1 HPA and CA working flows
          +
          +

          Using HPA and CA can easily implement auto scaling in most scenarios. In addition, the scaling process of nodes and pods can be easily observed.

          +

          This section uses an example to describe the auto scaling process using HPA and CA policies together.

          +
          +

          Preparations

          1. Create a cluster with one node. The node should have 2 cores of CPU and 4 GB of memory, or a higher specification, as well as an EIP to allow external access. If no EIP is bound to the node during node creation, you can manually bind one on the ECS console after creating the node.

            +

          2. Install add-ons for the cluster.

            • autoscaler: node scaling add-on
            • metrics-server: an aggregator of resource usage data in a Kubernetes cluster. It can collect measurement data of major Kubernetes resources, such as pods, nodes, containers, and Services.
            +

          3. Log in to the cluster node and run a computing-intensive application. When a user sends a request, the result needs to be calculated before being returned to the user.

            1. Create a PHP file named index.php to calculate the square root of the request for 1,000,000 times before returning OK!.
              vi index.php
              +
              Example file content:
              <?php
              +  $x = 0.0001;
              +  for ($i = 0; $i <= 1000000; $i++) {
              +    $x += sqrt($x);
              +  }
              +  echo "OK!";
              +?>
              +
              +
            2. Compile a Dockerfile to build an image.
              vi Dockerfile
              +
              Example Dockerfile:
              FROM php:5-apache
              +COPY index.php /var/www/html/index.php
              +RUN chmod a+rx index.php
              +
              +
            3. Run the following command to build an image named hpa-example with the tag latest.
              docker build -t hpa-example:latest .
              +
            4. (Optional) Log in to the SWR console, choose Organization Management in the navigation pane, and click Create Organization in the upper right corner to create an organization.

              Skip this step if you already have an organization.

              +
            5. In the navigation pane, choose My Images and then click Upload Through Client. On the page displayed, click Generate a temporary login command and click to copy the command.
            6. Run the login command copied in the previous step on the cluster node. If the login is successful, the message "Login Succeeded" is displayed.
            7. Tag the hpa-example image.

              docker tag [Image name 1:Tag 1] [Image repository address]/[Organization name]/[Image name 2:Tag 2]

              +
              • [Image name 1:Tag 1]: name and tag of the local image to be uploaded.
              • [Image repository address]: The domain name at the end of the login command in 5 is the image repository address, which can be obtained on the SWR console.
              • [Organization name]: name of the organization created in 4.
              • [Image name 2:Tag 2]: desired image name and tag to be displayed on the SWR console.
              +

              Example:

              +

              docker tag hpa-example:latest swr.eu-de.otc.t-systems.com/group/hpa-example:latest

              +
            8. Push the image to the image repository.

              docker push [Image repository address]/[Organization name]/[Image name 2:Tag 2]

              +

              Example:

              +

              docker push swr.eu-de.otc.t-systems.com/group/hpa-example:latest

              +

              The following information will be returned upon a successful push:

              +
              6d6b9812c8ae: Pushed 
              +... 
              +fe4c16cbf7a4: Pushed 
              +latest: digest: sha256:eb7e3bbd*** size: **
              +

              To view the pushed image, go to the SWR console and refresh the My Images page.

              +
            +

          +
          +

          Creating a Node Pool and a Node Scaling Policy

          1. Log in to the CCE console, access the created cluster, click Nodes on the left, click the Node Pools tab, and click Create Node Pool in the upper right corner.
          2. Set node pool parameters, add a node with 2 vCPUs and 4 GB memory, and enable auto scaling.

            • Nodes: Set it to 1, indicating that one node is created by default when a node pool is created.
            • Auto Scaling: Enable the option, meaning that nodes will be automatically created or deleted in the node pool based on the cluster loads.
            • Max. Nodes: Set it to 5, indicating the maximum number of nodes in a node pool.
            • Specifications: 2 vCPUs | 4 GiB
            +

            Retain the defaults for other parameters. For details, see Creating a Node Pool.

            +

            +

          3. Click Add-ons on the left of the cluster console, click Edit under the autoscaler add-on, modify the add-on configuration, enable Auto node scale-in, and configure scale-in parameters. For example, trigger scale-in when the node resource utilization is less than 50%.

            +

            +

            After the preceding configurations, scale-out is performed based on the pending status of the pod and scale-in is triggered when the node resource utilization decreases.

            +

          4. Click Node Scaling on the left of the cluster console and click Create Node Scaling Policy in the upper right corner. Node scaling policies added here trigger scale-out based on the CPU/memory allocation rate or periodically.

            As shown in the following figure, when the cluster CPU allocation rate is greater than 70%, one node will be added. A node scaling policy needs to be associated with a node pool. Multiple node pools can be associated. When you need to scale nodes, node with proper specifications will be added or reduced from the node pool based on the minimum waste principle. For details, see Creating a Node Scaling Policy.

            +

            +

            +

          +
          +

          Creating a Workload

          Use the hpa-example image to create a Deployment with one replica. The image path is related to the organization uploaded to the SWR repository and needs to be replaced with the actual value.

          +
          kind: Deployment
          +apiVersion: apps/v1
          +metadata:
          +  name: hpa-example
          +spec:
          +  replicas: 1
          +  selector:
          +    matchLabels:
          +      app: hpa-example
          +  template:
          +    metadata:
          +      labels:
          +        app: hpa-example
          +    spec:
          +      containers:
          +      - name: container-1
          +        image: 'hpa-example:latest '  # Replace it with the address of the image you uploaded to SWR.
          +        resources:
          +          limits:                  # The value of limits must be the same as that of requests to prevent flapping during scaling.
          +            cpu: 500m
          +            memory: 200Mi
          +          requests:
          +            cpu: 500m
          +            memory: 200Mi
          +      imagePullSecrets:
          +      - name: default-secret
          +

          Then, create a NodePort Service for the workload so that the workload can be accessed from external networks.

          +
          kind: Service
          +apiVersion: v1
          +metadata:
          +  name: hpa-example
          +spec:
          +  ports:
          +    - name: cce-service-0
          +      protocol: TCP
          +      port: 80
          +      targetPort: 80
          +      nodePort: 31144
          +  selector:
          +    app: hpa-example
          +  type: NodePort
          +
          +

          Creating an HPA Policy

          Create an HPA policy. As shown below, the policy is associated with the hpa-example workload, and the target CPU usage is 50%.

          +

          There are two other annotations. One annotation defines the CPU thresholds, indicating that scaling is not performed when the CPU usage is between 30% and 70% to prevent impact caused by slight fluctuation. The other is the scaling time window, indicating that after the policy is successfully executed, a scaling operation will not be triggered again in this cooling interval to prevent impact caused by short-term fluctuation.

          +
          apiVersion: autoscaling/v2
          +kind: HorizontalPodAutoscaler
          +metadata:
          +  name: hpa-policy
          +  annotations:
          +    extendedhpa.metrics: '[{"type":"Resource","name":"cpu","targetType":"Utilization","targetRange":{"low":"30","high":"70"}}]'
          +    extendedhpa.option: '{"downscaleWindow":"5m","upscaleWindow":"3m"}'
          +spec:
          +  scaleTargetRef:
          +    kind: Deployment
          +    name: hpa-example
          +    apiVersion: apps/v1
          +  minReplicas: 1
          +  maxReplicas: 100
          +  metrics:
          +    - type: Resource
          +      resource:
          +        name: cpu
          +        targetAverageUtilization: 50
          +

          Set the parameters as follows if you are using the console.

          +

          +

          +
          +

          Observing the Auto Scaling Process

          1. Check the cluster node status. In the following example, there are two nodes.

            # kubectl get node
            +NAME            STATUS   ROLES    AGE     VERSION
            +192.168.0.183   Ready    <none>   2m20s   v1.17.9-r0-CCE21.1.1.3.B001-17.36.8
            +192.168.0.26    Ready    <none>   55m     v1.17.9-r0-CCE21.1.1.3.B001-17.36.8
            +

            Check the HPA policy. The CPU usage of the target workload is 0%.

            +
            # kubectl get hpa hpa-policy
            +NAME         REFERENCE                TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
            +hpa-policy   Deployment/hpa-example   0%/50%    1         100       1          4m
            +

          2. Run the following command to access the workload. In the following command, {ip:port} indicates the access address of the workload, which can be queried on the workload details page.

            while true;do wget -q -O- http://{ip:port}; done

            +

            If no EIP is displayed, the cluster node has not been assigned any EIP. You need to create one, bind it to the node, and synchronize node data. .

            +
            +

            Observe the scaling process of the workload.

            +
            # kubectl get hpa hpa-policy --watch
            +NAME         REFERENCE                TARGETS    MINPODS   MAXPODS   REPLICAS   AGE
            +hpa-policy   Deployment/hpa-example   0%/50%     1         100       1          4m
            +hpa-policy   Deployment/hpa-example   190%/50%   1         100       1          4m23s
            +hpa-policy   Deployment/hpa-example   190%/50%   1         100       4          4m31s
            +hpa-policy   Deployment/hpa-example   200%/50%   1         100       4          5m16s
            +hpa-policy   Deployment/hpa-example   200%/50%   1         100       4          6m16s
            +hpa-policy   Deployment/hpa-example   85%/50%    1         100       4          7m16s
            +hpa-policy   Deployment/hpa-example   81%/50%    1         100       4          8m16s
            +hpa-policy   Deployment/hpa-example   81%/50%    1         100       7          8m31s
            +hpa-policy   Deployment/hpa-example   57%/50%    1         100       7          9m16s
            +hpa-policy   Deployment/hpa-example   51%/50%    1         100       7          10m
            +hpa-policy   Deployment/hpa-example   58%/50%    1         100       7          11m
            +

            You can see that the CPU usage of the workload is 190% at 4m23s, which exceeds the target value. In this case, scaling is triggered to expand the workload to four replicas/pods. In the subsequent several minutes, the CPU usage does not decrease until 7m16s. This is because the new pods may not be successfully created. The possible cause is that resources are insufficient and the pods are in Pending state. During this period, nodes are added.

            +

            At 7m16s, the CPU usage decreases, indicating that the pods are successfully created and start to bear traffic. The CPU usage decreases to 81% at 8m, still greater than the target value (50%) and the high threshold (70%). Therefore, 7 pods are added at 9m16s, and the CPU usage decreases to 51%, which is within the range of 30% to 70%. From then on, the number of pods remains 7.

            +

            In the following output, you can see the workload scaling process and the time when the HPA policy takes effect.

            +
            # kubectl describe deploy hpa-example
            +...
            +Events:
            +  Type    Reason             Age    From                   Message
            +  ----    ------             ----   ----                   -------
            +  Normal  ScalingReplicaSet  25m    deployment-controller  Scaled up replica set hpa-example-79dd795485 to 1
            +  Normal  ScalingReplicaSet  20m    deployment-controller  Scaled up replica set hpa-example-79dd795485 to 4
            +  Normal  ScalingReplicaSet  16m    deployment-controller  Scaled up replica set hpa-example-79dd795485 to 7
            +# kubectl describe hpa hpa-policy
            +...
            +Events:
            +  Type    Reason             Age    From                       Message
            +  ----    ------             ----   ----                       -------
            +  Normal  SuccessfulRescale  20m    horizontal-pod-autoscaler  New size: 4; reason: cpu resource utilization (percentage of request) above target
            +  Normal  SuccessfulRescale  16m    horizontal-pod-autoscaler  New size: 7; reason: cpu resource utilization (percentage of request) above target
            +

            Check the number of nodes. The following output shows that two nodes are added.

            +
            # kubectl get node
            +NAME            STATUS   ROLES    AGE     VERSION
            +192.168.0.120   Ready    <none>   3m5s    v1.17.9-r0-CCE21.1.1.3.B001-17.36.8
            +192.168.0.136   Ready    <none>   6m58s   v1.17.9-r0-CCE21.1.1.3.B001-17.36.8
            +192.168.0.183   Ready    <none>   18m     v1.17.9-r0-CCE21.1.1.3.B001-17.36.8
            +192.168.0.26    Ready    <none>   71m     v1.17.9-r0-CCE21.1.1.3.B001-17.36.8
            +

            You can also view the scaling history on the console. For example, the CA policy is executed once when the CPU allocation rate in the cluster is greater than 70%, and the number of nodes in the node pool is increased from 2 to 3. The new node is automatically added by autoscaler based on the pending state of pods in the initial phase of HPA.

            +

            The node scaling process is as follows:

            +
            1. After the number of pods changes to 4, the pods are in Pending state due to insufficient resources. As a result, the default scale-out policy of the autoscaler add-on is triggered, and the number of nodes is increased by one.
            2. The second node scale-out is triggered because the CPU allocation rate in the cluster is greater than 70%. As a result, the number of nodes is increased by one, which is recorded in the scaling history on the console. Scaling based on the allocation rate ensures that the cluster has sufficient resources.
            +

          3. Stop accessing the workload and check the number of pods.

            # kubectl get hpa hpa-policy --watch
            +NAME         REFERENCE                TARGETS    MINPODS   MAXPODS   REPLICAS   AGE
            +hpa-policy   Deployment/hpa-example   50%/50%    1         100       7          12m
            +hpa-policy   Deployment/hpa-example   21%/50%    1         100       7          13m
            +hpa-policy   Deployment/hpa-example   0%/50%     1         100       7          14m
            +hpa-policy   Deployment/hpa-example   0%/50%     1         100       7          18m
            +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          18m
            +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          19m
            +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          19m
            +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          19m
            +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          19m
            +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          23m
            +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          23m
            +hpa-policy   Deployment/hpa-example   0%/50%     1         100       1          23m
            +

            You can see that the CPU usage is 21% at 13m. The number of pods is reduced to 3 at 18m, and then reduced to 1 at 23m.

            +

            In the following output, you can see the workload scaling process and the time when the HPA policy takes effect.

            +
            # kubectl describe deploy hpa-example
            +...
            +Events:
            +  Type    Reason             Age    From                   Message
            +  ----    ------             ----   ----                   -------
            +  Normal  ScalingReplicaSet  25m    deployment-controller  Scaled up replica set hpa-example-79dd795485 to 1
            +  Normal  ScalingReplicaSet  20m    deployment-controller  Scaled up replica set hpa-example-79dd795485 to 4
            +  Normal  ScalingReplicaSet  16m    deployment-controller  Scaled up replica set hpa-example-79dd795485 to 7
            +  Normal  ScalingReplicaSet  6m28s  deployment-controller  Scaled down replica set hpa-example-79dd795485 to 3
            +  Normal  ScalingReplicaSet  72s    deployment-controller  Scaled down replica set hpa-example-79dd795485 to 1
            +# kubectl describe hpa hpa-policy
            +...
            +Events:
            +  Type    Reason             Age    From                       Message
            +  ----    ------             ----   ----                       -------
            +  Normal  SuccessfulRescale  20m    horizontal-pod-autoscaler  New size: 4; reason: cpu resource utilization (percentage of request) above target
            +  Normal  SuccessfulRescale  16m    horizontal-pod-autoscaler  New size: 7; reason: cpu resource utilization (percentage of request) above target
            +  Normal  SuccessfulRescale  6m45s  horizontal-pod-autoscaler  New size: 3; reason: All metrics below target
            +  Normal  SuccessfulRescale  90s    horizontal-pod-autoscaler  New size: 1; reason: All metrics below target
            +

            You can also view the HPA policy execution history on the console. Wait until the one node is reduced.

            +

            The reason why the other two nodes in the node pool are not reduced is that they both have pods in the kube-system namespace (and these pods are not created by DaemonSets). For details, see Node Scaling Mechanisms.

            +

          +
          +

          Summary

          Using HPA and CA can easily implement auto scaling in most scenarios. In addition, the scaling process of nodes and pods can be easily observed.

          +
          +
          +
          + +
          + diff --git a/docs/cce/umn/cce_10_0301.html b/docs/cce/umn/cce_10_0301.html new file mode 100644 index 00000000..4b761d90 --- /dev/null +++ b/docs/cce/umn/cce_10_0301.html @@ -0,0 +1,141 @@ + + +

          Performing In-place Upgrade

          +
          +
          +

          Scenario

          +

          You can upgrade your clusters to a newer version on the CCE console.

          +

          Before the upgrade, learn about the target version to which each CCE + cluster can be upgraded in what ways, and the upgrade impacts. For details, see Upgrade Overview and Before You Start.

          +
          +
          +

          Description

          +
            +
          • An in-place upgrade updates the Kubernetes components on cluster nodes, + without changing their OS version.
          • +
          • Data plane nodes are upgraded in batches. By default, they are + prioritized based on their CPU, memory, and PodDisruptionBudgets (PDBs). You can also set the priorities according + to your service requirements.
          • +
          +
          +
          +

          Precautions

          +
            +
          • During the cluster upgrade, the system will automatically upgrade + add-ons to a version compatible with the target cluster version. Do not uninstall or reinstall add-ons + during the cluster upgrade.
          • +
          • Before the upgrade, ensure that all add-ons are running. If an add-on + fails to be upgraded, rectify the fault and try again.
          • +
          • During the upgrade, CCE checks the add-on running status. Some add-ons + (such as coredns) require at least two nodes to run normally. In this case, at least two nodes must be + available for the upgrade.
          • +
          +

          For more information, see Before You Start. +

          +
          +
          +

          Procedure

          +

          This section describes how to upgrade a CCE cluster of v1.15 or later. For + other versions, see Performing Replace/Rolling Upgrade.

          +
            +
          1. Log in to the CCE console and click the cluster name to access + the cluster.
          2. +
          3. In the navigation pane, choose Cluster Upgrade. You can view the new version + available for upgrade on the right. +

            +

            Check the version information, last update/upgrade time, available + upgrade version, and upgrade history of the current cluster.

            +

            The cluster upgrade goes through pre-upgrade check, add-on + upgrade/uninstallation, master node upgrade, worker node upgrade, and post-upgrade processing.

            +
            +
            +
              +
            • If your cluster version is up-to-date, the Upgrade button is grayed out.
            • +
            • If your cluster status is abnormal or there are + abnormal add-ons, the Upgrade button is + dimmed. Perform a check by referring to Before You Start. +
            • +
            +
            +
            +

            +
          4. +
          5. Click Upgrade or Install Patch on + the right. Set the upgrade parameters. +

            +

              +
            • New + Version: Select the Kubernetes version to which the cluster can be upgraded.
            • +
            • (Optional) Cluster + Backup: Determine whether to back up the entire master node. This backup mode is + recommended.

              A manual confirmation is required for backing up + the entire master node. The backup process uses the Cloud Backup and Recovery (CBR) service + and takes about 20 minutes. If there are many cloud backup tasks at the current site, the + backup time may be prolonged. You are advised to back up the master node.

              +
            • +
            • Add-on Upgrade + Configuration: Add-ons that have been installed in your cluster are listed. During + the cluster upgrade, the system automatically upgrades the add-ons to be compatible with the + target cluster version. You can click Set to + re-define the add-on parameters.
              +
              +

              If a red dot is displayed on the right of + an add-on, the add-on is incompatible with the target cluster version. During the + upgrade, the add-on will be uninstalled and then re-installed. Ensure that the + add-on parameters are correctly configured.

              +
              +
              +
            • +
            • Node Upgrade + Configuration: Before setting the node upgrade priority, you need to select a node + pool. Nodes and node pools will be upgraded according to the priorities you specify. You can set + the maximum number of nodes to be upgraded in a batch, or set priorities for nodes to be + upgraded. If you do not set this parameter, the system will determine the nodes to upgrade in + batches based on specific conditions.
                +
              • Add Upgrade + Priority: Add upgrade priorities for node pools.
              • +
              • Add Node + Priority: After adding a node pool priority, you can set the upgrade + sequence of nodes in the node pool. The system upgrades nodes in the sequence you + specify. If you skip this setting, the system upgrades nodes based on the default + policy.
              • +
              +
            • +
            +

            +
          6. +
          7. Read the upgrade instructions carefully, and select I have read and agree to Upgrade Precautions. Click + Upgrade.
          8. +
          9. After you click Upgrade, the cluster upgrade starts. You can view the + upgrade process in the lower part of the page. +

            +

            During the upgrade, you can click Suspend on the right to suspend the cluster upgrade. + To continue the upgrade, click Continue.

            +

            If an upgrade failure message is displayed during the cluster upgrade, + rectify the fault as prompted and try again.

            +

            +
          10. +
          11. When the upgrade progress reaches 100%, the cluster is upgraded. + The version information will be properly displayed, and no upgrade is required.
          12. +
          13. After the upgrade is complete, verify the cluster Kubernetes + version on the Clusters page.
          14. +
          +
          +
          +
          + +
          \ No newline at end of file diff --git a/docs/cce/umn/cce_10_0302.html b/docs/cce/umn/cce_10_0302.html new file mode 100644 index 00000000..acbcbe16 --- /dev/null +++ b/docs/cce/umn/cce_10_0302.html @@ -0,0 +1,114 @@ + + +

          Before You Start

          +

          Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Upgrade Overview.

          +

          Precautions

          • Upgraded clusters cannot be rolled back. Therefore, perform the upgrade during off-peak hours to minimize the impact on your services.
          • Do not shut down, restart, or delete nodes during cluster upgrade. Otherwise, the upgrade fails.
          • Before upgrading a cluster, disable auto scaling policies to prevent node scaling during the upgrade. Otherwise, the upgrade fails.
          • If you locally modify the configuration of a cluster node, the cluster upgrade may fail or the configuration may be lost after the upgrade. Therefore, modify the configurations on the CCE console (cluster or node pool list page) so that they will be automatically inherited during the upgrade.
          • During the cluster upgrade, the running workload services will not be interrupted, but access to the API server will be temporarily interrupted.
          • Before upgrading the cluster, check whether the cluster is healthy.
          • To ensure data security, you are advised to back up data before upgrading the cluster. During the upgrade, you are not advised to perform any operations on the cluster.
          • During the cluster upgrade, the node.kubernetes.io/upgrade taint (the effect is NoSchedule) is added to the node. After the cluster upgrade is complete, the taint is removed. Do not add taints with the same key name on the node. Even if the taints have different effects, they may be deleted by the system by mistake after the upgrade.
          +
          +

          Notes and Constraints

          • Currently, only CCE clusters consisting of VM nodes can be upgraded.
          • After the cluster is upgraded, if the containerd vulnerability of the container engine is fixed in Cluster Version Release Notes, you need to manually restart containerd for the upgrade to take effect. The same applies to the existing pods.
          • If initContainer or Istio is used in the in-place upgrade of a cluster of v1.15, pay attention to the following restrictions:

            In kubelet 1.16 and later versions, QoS classes are different from those in earlier versions. In kubelet 1.15 and earlier versions, only containers in spec.containers are counted. In kubelet 1.16 and later versions, containers in both spec.containers and spec.initContainers are counted. The QoS class of a pod will change after the upgrade. As a result, the container in the pod restarts. You are advised to modify the QoS class of the service container before the upgrade to avoid this problem. For details, see Table 1.

            + +
            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Table 1 QoS class changes before and after the upgrade

            Init Container (Calculated Based on spec.initContainers)

            +

            Service Container (Calculated Based on spec.containers)

            +

            Pod (Calculated Based on spec.containers and spec.initContainers)

            +

            Impacted or Not

            +

            Guaranteed

            +

            Besteffort

            +

            Burstable

            +

            Yes

            +

            Guaranteed

            +

            Burstable

            +

            Burstable

            +

            No

            +

            Guaranteed

            +

            Guaranteed

            +

            Guaranteed

            +

            No

            +

            Besteffort

            +

            Besteffort

            +

            Besteffort

            +

            No

            +

            Besteffort

            +

            Burstable

            +

            Burstable

            +

            No

            +

            Besteffort

            +

            Guaranteed

            +

            Burstable

            +

            Yes

            +

            Burstable

            +

            Besteffort

            +

            Burstable

            +

            Yes

            +

            Burstable

            +

            Burstable

            +

            Burstable

            +

            No

            +

            Burstable

            +

            Guaranteed

            +

            Burstable

            +

            Yes

            +
            +
            +
          +
          +

          Upgrade Backup

          How to back up a node:

          +
          • etcd database backup: CCE automatically backs up the etcd database during the cluster upgrade.
          • Master node backup (recommended, manual confirmation required): On the upgrade confirmation page, click Backup to back up the entire master node of the cluster. The backup process uses the Cloud Backup and Recovery (CBR) service and takes about 20 minutes. If there are many cloud backup tasks at the current site, the backup time may be prolonged.
          +
          +
          +
          + +
          + diff --git a/docs/cce/umn/cce_10_0305.html b/docs/cce/umn/cce_10_0305.html new file mode 100644 index 00000000..d236fb11 --- /dev/null +++ b/docs/cce/umn/cce_10_0305.html @@ -0,0 +1,22 @@ + + +

          Storage (FlexVolume)

          +

          +
          + + diff --git a/docs/cce/umn/cce_10_0306.html b/docs/cce/umn/cce_10_0306.html new file mode 100644 index 00000000..c3741037 --- /dev/null +++ b/docs/cce/umn/cce_10_0306.html @@ -0,0 +1,58 @@ + + +

          FlexVolume Overview

          +

          In container storage, you can use different types of volumes and mount them to containers in pods as many as you want.

          +

          In CCE, container storage is backed both by Kubernetes-native objects, such as emptyDir, hostPath, secret, and ConfigMap, and by cloud storage services.

          +

          CCE clusters of 1.13 and earlier versions use the storage-driver add-on to connect to cloud storage services to support Kubernetes FlexVolume driver for container storage. The FlexVolume driver has been deprecated in favor of the Container Storage Interface (CSI). The everest add-on for CSI is installed in CCE clusters of 1.15 and later versions by default. For details, see Overview.

          +
          • In CCE clusters earlier than Kubernetes 1.13, end-to-end capacity expansion of container storage is not supported, and the PVC capacity is inconsistent with the storage capacity.
          • In a cluster of v1.13 or earlier, when an upgrade or bug fix is available for storage functionalities, you only need to install or upgrade the storage-driver add-on. Upgrading the cluster or creating a cluster is not required.
          +
          +

          Notes and Constraints

          • For clusters created in CCE, Kubernetes v1.15.11 is a transitional version in which the FlexVolume plug-in (storage-driver) is compatible with the CSI plug-in (everest). Clusters of v1.17 and later versions do not support FlexVolume anymore. You need to use the everest add-on.
          • The FlexVolume plug-in will be maintained by Kubernetes developers, but new functionality will only be added to CSI. You are advised not to create storage that connects to the FlexVolume plug-in (storage-driver) in CCE anymore. Otherwise, the storage resources may not function normally.
          +
          +

          Checking Storage Add-ons

          1. Log in to the CCE console.
          2. In the navigation tree on the left, click Add-ons.
          3. Click the Add-on Instance tab.
          4. Select a cluster in the upper right corner. The default storage add-on installed during cluster creation is displayed.
          +
          +

          Differences Between CSI and FlexVolume Plug-ins

          +
          + + + + + + + + + + + + + + + + +
          Table 1 CSI and FlexVolume

          Kubernetes Solution

          +

          CCE Add-on

          +

          Feature

          +

          Recommendation

          +

          CSI

          +

          Everest

          +

          CSI was developed as a standard for exposing arbitrary block and file storage storage systems to containerized workloads. Using CSI, third-party storage providers can deploy plugins exposing new storage systems in Kubernetes without having to touch the core Kubernetes code. In CCE, the everest add-on is installed by default in clusters of Kubernetes v1.15 and later to connect to storage services (EVS, OBS, SFS, and SFS Turbo).

          +

          The everest add-on consists of two parts:

          +
          • everest-csi-controller for storage volume creation, deletion, capacity expansion, and cloud disk snapshots
          • everest-csi-driver for mounting, unmounting, and formatting storage volumes on nodes
          +

          For details, see everest.

          +

          The everest add-on is installed by default in clusters of v1.15 and later. CCE will mirror the Kubernetes community by providing continuous support for updated CSI capabilities.

          +

          Flexvolume

          +

          storage-driver

          +

          FlexVolume is an out-of-tree plugin interface that has existed in Kubernetes since version 1.2 (before CSI). CCE provided FlexVolume volumes through the storage-driver add-on installed in clusters of Kubernetes v1.13 and earlier versions. This add-on connects clusters to storage services (EVS, OBS, SFS, and SFS Turbo).

          +

          For details, see storage-driver.

          +

          For the created clusters of v1.13 or earlier, the installed FlexVolume plug-in (CCE add-on storage-driver) can still be used. CCE stops providing update support for this add-on, and you are advised to upgrade these clusters.

          +
          +
          +
          • A cluster can use only one type of storage plug-ins.
          • The FlexVolume plug-in cannot be replaced by the CSI plug-in in clusters of v1.13 or earlier. You can only upgrade these clusters. For details, see Cluster Upgrade.
          +
          +
          +
          +
          + +
          + diff --git a/docs/cce/umn/cce_10_0307.html b/docs/cce/umn/cce_10_0307.html new file mode 100644 index 00000000..4acf97a4 --- /dev/null +++ b/docs/cce/umn/cce_10_0307.html @@ -0,0 +1,234 @@ + + +

          Overview

          +

          Volume

          On-disk files in a container are ephemeral, which will be lost when the container crashes and are difficult to be shared between containers running together in a pod. The Kubernetes volume abstraction solves both of these problems. Volumes cannot be independently created, but defined in the pod spec. All containers in a pod can access its volumes, but the volumes must have been mounted to any directory in a container.

          +

          The following figure shows how a storage volume is used between containers in a pod.

          +

          +

          A volume will no longer exist if the pod to which it is mounted does not exist. However, files in the volume may outlive the volume, depending on the volume type.

          +
          +

          Volume Types

          Volumes can be classified into local volumes and cloud volumes.

          +
          • Local volumes
            CCE supports the following five types of local volumes. For details about how to use them, see Using Local Disks as Storage Volumes.
            • emptyDir: an empty volume used for temporary storage
            • hostPath: mounts a directory on a host (node) to your container for reading data from the host.
            • ConfigMap: references the data stored in a ConfigMap for use by containers.
            • Secret: references the data stored in a secret for use by containers.
            +
            +
          • Cloud volumes

            CCE supports the following types of cloud volumes:

            +
            • EVS
            • SFS Turbo
            • OBS
            • SFS
            +
          +
          +

          CSI

          You can use Kubernetes Container Storage Interface (CSI) to develop plug-ins to support specific storage volumes.

          +

          CCE developed the storage add-on everest for you to use cloud storage services, such as EVS and OBS. You can install this add-on when creating a cluster.

          +
          +

          PV and PVC

          Kubernetes provides PersistentVolumes (PVs) and PersistentVolumeClaims (PVCs) to abstract details of how storage is provided from how it is consumed. You can request specific size of storage when needed, just like pods can request specific levels of resources (CPU and memory).

          +
          • PV: A PV is a persistent storage volume in a cluster. Same as a node, a PV is a cluster-level resource.
          • PVC: A PVC describes a workload's request for storage resources. This request consumes existing PVs in the cluster. If there is no PV available, underlying storage and PVs are dynamically created. When creating a PVC, you need to describe the attributes of the requested persistent storage, such as the size of the volume and the read/write permissions.
          +

          You can bind PVCs to PVs in a pod so that the pod can use storage resources. The following figure shows the relationship between PVs and PVCs.

          +
          Figure 1 PVC-to-PV binding
          +

          PVs describes storage resources in the cluster. PVCs are requests for those resources. The following sections will describe how to use kubectl to connect to storage resources.

          +

          If you do not want to create storage resources or PVs manually, you can use StorageClasses.

          +
          +

          StorageClass

          StorageClass describes the storage class used in the cluster. You need to specify StorageClass when creating a PVC or PV. As of now, CCE provides storage classes such as csi-disk, csi-nas, and csi-obs by default. When defining a PVC, you can use a StorageClassName to create a PV of the corresponding type and automatically create underlying storage resources.

          +

          You can run the following command to query the storage classes that CCE supports. You can use the CSI plug-in provided by CCE to customize a storage class, which functions similarly as the default storage classes in CCE.

          +
          # kubectl get sc
          +NAME                PROVISIONER                     AGE
          +csi-disk            everest-csi-provisioner         17d          # Storage class for EVS disks
          +csi-disk-topology   everest-csi-provisioner         17d          # Storage class for EVS disks with delayed binding
          +csi-nas             everest-csi-provisioner         17d          # Storage class for SFS file systems
          +csi-obs             everest-csi-provisioner         17d          # Storage class for OBS buckets
          +

          After a StorageClass is set, PVs can be automatically created and maintained. You only need to specify the StorageClass when creating a PVC, which greatly reduces the workload.

          +
          +

          Cloud Services for Container Storage

          CCE allows you to mount local and cloud storage volumes listed in Volume Types to your pods. Their features are described below.

          +
          Figure 2 Volume types supported by CCE
          + +
          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          Table 1 Detailed description of cloud storage services

          Dimension

          +

          EVS

          +

          SFS

          +

          OBS

          +

          SFS Turbo

          +

          Definition

          +

          EVS offers scalable block storage for cloud servers. With high reliability, high performance, and rich specifications, EVS disks can be used for distributed file systems, dev/test environments, data warehouses, and high-performance computing (HPC) applications.

          +

          Expandable to petabytes, SFS provides fully hosted shared file storage, highly available and stable to handle data- and bandwidth-intensive applications in HPC, media processing, file sharing, content management, and web services.

          +

          +

          OBS is a stable, secure, and easy-to-use object storage service that lets you inexpensively store data of any format and size. You can use it in enterprise backup/archiving, video on demand (VoD), video surveillance, and many other scenarios.

          +

          Expandable to 320 TB, SFS Turbo provides a fully hosted shared file storage, highly available and stable to support small files and applications requiring low latency and high IOPS. You can use SFS Turbo in high-traffic websites, log storage, compression/decompression, DevOps, enterprise OA, and containerized applications.

          +

          Data storage logic

          +

          Stores binary data and cannot directly store files. To store files, you need to format the file system first.

          +

          Stores files and sorts and displays data in the hierarchy of files and folders.

          +

          Stores objects. Files directly stored automatically generate the system metadata, which can also be customized by users.

          +

          Stores files and sorts and displays data in the hierarchy of files and folders.

          +

          Services

          +

          Accessible only after being mounted to ECSs or BMSs and initialized.

          +

          Mounted to ECSs or BMSs using network protocols. A network address must be specified or mapped to a local directory for access.

          +

          Accessible through the Internet or Direct Connect (DC). You need to specify the bucket address and use transmission protocols such as HTTP and HTTPS.

          +

          Supports the Network File System (NFS) protocol (NFSv3 only). You can seamlessly integrate existing applications and tools with SFS Turbo.

          +

          Static provisioning

          +

          Supported

          +

          Supported

          +

          Supported

          +

          Supported

          +

          Dynamic provisioning

          +

          Supported

          +

          Supported

          +

          Supported

          +

          Not supported

          +

          Features

          +

          Non-shared storage. Each volume can be mounted to only one node.

          +

          Shared storage featuring high performance and throughput

          +

          Shared, user-mode file system

          +

          Shared storage featuring high performance and bandwidth

          +

          Usage

          +

          HPC, enterprise core cluster applications, enterprise application systems, and dev/test

          +
          NOTE:

          HPC apps here require high-speed and high-IOPS storage, such as industrial design and energy exploration.

          +
          +

          HPC, media processing, content management, web services, big data, and analysis applications

          +
          NOTE:

          HPC apps here require high bandwidth and shared file storage, such as gene sequencing and image rendering.

          +
          +

          Big data analysis, static website hosting, online video on demand (VoD), gene sequencing, intelligent video surveillance, backup and archiving, and enterprise cloud boxes (web disks)

          +

          High-traffic websites, log storage, DevOps, and enterprise OA

          +

          Capacity

          +

          TB

          +

          SFS 1.0: PB

          +

          EB

          +

          TB

          +

          Latency

          +

          1-2 ms

          +

          SFS 1.0: 3-20 ms

          +

          10 ms

          +

          1-2 ms

          +

          IOPS/TPS

          +

          33,000 for a single disk

          +

          SFS 1.0: 2K

          +

          Tens of millions

          +

          100K

          +

          Bandwidth

          +

          MB/s

          +

          SFS 1.0: GB/s

          +

          TB/s

          +

          GB/s

          +
          +
          +
          +

          Notes and Constraints

          Secure containers do not support OBS volumes.

          +
          • A single user can create a maximum of 100 OBS buckets on the console. If you have a large number of CCE workloads and you want to mount an OBS bucket to every workload, you may easily run out of buckets. In this scenario, you are advised to use OBS through the OBS API or SDK and do not mount OBS buckets to the workload on the console.
          • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

            For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volume mounted, a new pod cannot be started because EVS disks cannot be attached.

            +
          • When you uninstall a subpath in a cluster of v1.19 or earlier, all folders in the subpath are traversed. If there are a large number of folders, the traversal takes a long time, so does the volume unmount. You are advised not to create too many folders in the subpath.
          • The maximum size of a single file in OBS mounted to a CCE cluster is far smaller than that defined by obsfs.
          +
          +

          Notice on Using Add-ons

          • To use the CSI plug-in (the everest add-on in CCE), your cluster must be using Kubernetes 1.15 or later. This add-on is installed by default when you create a cluster of v1.15 or later. The FlexVolume plug-in (the storage-driver add-on in CCE) is installed by default when you create a cluster of v1.13 or earlier.
          • If your cluster is upgraded from v1.13 to v1.15, storage-driver is replaced by everest (v1.1.6 or later) for container storage. The takeover does not affect the original storage functions.
          • In version 1.2.0 of the everest add-on, key authentication is optimized when OBS is used. After the everest add-on is upgraded from a version earlier than 1.2.0, you need to restart all workloads that use OBS in the cluster. Otherwise, workloads may not be able to use OBS.
          +
          +

          Differences Between CSI and FlexVolume Plug-ins

          +
          + + + + + + + + + + + + + + + + +
          Table 2 CSI and FlexVolume

          Kubernetes Solution

          +

          CCE Add-on

          +

          Feature

          +

          Recommendation

          +

          CSI

          +

          Everest

          +

          CSI was developed as a standard for exposing arbitrary block and file storage storage systems to containerized workloads. Using CSI, third-party storage providers can deploy plugins exposing new storage systems in Kubernetes without having to touch the core Kubernetes code. In CCE, the everest add-on is installed by default in clusters of Kubernetes v1.15 and later to connect to storage services (EVS, OBS, SFS, and SFS Turbo).

          +

          The everest add-on consists of two parts:

          +
          • everest-csi-controller for storage volume creation, deletion, capacity expansion, and cloud disk snapshots
          • everest-csi-driver for mounting, unmounting, and formatting storage volumes on nodes
          +

          For details, see everest.

          +

          The everest add-on is installed by default in clusters of v1.15 and later. CCE will mirror the Kubernetes community by providing continuous support for updated CSI capabilities.

          +

          Flexvolume

          +

          storage-driver

          +

          FlexVolume is an out-of-tree plugin interface that has existed in Kubernetes since version 1.2 (before CSI). CCE provided FlexVolume volumes through the storage-driver add-on installed in clusters of Kubernetes v1.13 and earlier versions. This add-on connects clusters to storage services (EVS, OBS, SFS, and SFS Turbo).

          +

          For details, see storage-driver.

          +

          For the created clusters of v1.13 or earlier, the installed FlexVolume plug-in (CCE add-on storage-driver) can still be used. CCE stops providing update support for this add-on, and you are advised to upgrade these clusters.

          +
          +
          +
          • A cluster can use only one type of storage plug-ins.
          • The FlexVolume plug-in cannot be replaced by the CSI plug-in in clusters of v1.13 or earlier. You can only upgrade these clusters. For details, see Cluster Upgrade.
          +
          +
          +

          Checking Storage Add-ons

          1. Log in to the CCE console.
          2. In the navigation tree on the left, click Add-ons.
          3. Click the Add-on Instance tab.
          4. Select a cluster in the upper right corner. The default storage add-on installed during cluster creation is displayed.
          +
          +
          +
          + +
          + diff --git a/docs/cce/umn/cce_01_0309.html b/docs/cce/umn/cce_10_0309.html similarity index 50% rename from docs/cce/umn/cce_01_0309.html rename to docs/cce/umn/cce_10_0309.html index 769ac489..162fc2e9 100644 --- a/docs/cce/umn/cce_01_0309.html +++ b/docs/cce/umn/cce_10_0309.html @@ -1,23 +1,21 @@ - +

          Using EVS Disks as Storage Volumes

          diff --git a/docs/cce/umn/cce_10_0310.html b/docs/cce/umn/cce_10_0310.html new file mode 100644 index 00000000..4c4ebe24 --- /dev/null +++ b/docs/cce/umn/cce_10_0310.html @@ -0,0 +1,14 @@ + + +

          Overview

          +

          To achieve persistent storage, CCE allows you to mount the storage volumes created from Elastic Volume Service (EVS) disks to a path of a container. When the container is migrated, the mounted EVS volumes are also migrated. By using EVS volumes, you can mount the remote file directory of storage system into a container so that data in the data volume is permanently preserved even when the container is deleted.

          +
          Figure 1 Mounting EVS volumes to CCE
          +

          Description

          • User-friendly: Similar to formatting disks for on-site servers in traditional layouts, you can format block storage (disks) mounted to cloud servers, and create file systems on them.
          • Data isolation: Each server uses an independent block storage device (disk).
          • Private network: User can access data only in private networks of data centers.
          • Capacity and performance: The capacity of a single volume is limited (TB-level), but the performance is excellent (ms-level read/write I/O latency).
          • Restriction: EVS disks that have partitions or have non-ext4 file systems cannot be imported.
          • Applications: HPC, enterprise core applications running in clusters, enterprise application systems, and development and testing. These volumes are often used by single-pod Deployments and jobs, or exclusively by each pod in a StatefulSet. EVS disks are non-shared storage and cannot be attached to multiple nodes at the same time. If two pods are configured to use the same EVS disk and the two pods are scheduled to different nodes, one pod cannot be started because the EVS disk cannot be attached to it.
          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0312.html b/docs/cce/umn/cce_10_0312.html new file mode 100644 index 00000000..39bb7f80 --- /dev/null +++ b/docs/cce/umn/cce_10_0312.html @@ -0,0 +1,71 @@ + + +

          (kubectl) Automatically Creating an EVS Disk

          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          2. Run the following commands to configure the pvc-evs-auto-example.yaml file, which is used to create a PVC.

            touch pvc-evs-auto-example.yaml

            +

            vi pvc-evs-auto-example.yaml

            +
            Example YAML file for clusters of v1.9, v1.11, and v1.13:
            apiVersion: v1
            +kind: PersistentVolumeClaim
            +metadata:
            +  name: pvc-evs-auto-example
            +  namespace: default
            +  annotations:
            +    volume.beta.kubernetes.io/storage-class: sas
            +  labels:
            +    failure-domain.beta.kubernetes.io/region: eu-de
            +    failure-domain.beta.kubernetes.io/zone: eu-de-01
            +spec:
            +  accessModes:
            +  - ReadWriteOnce
            +  resources:
            +    requests:
            +      storage: 10Gi
            + +
            + + + + + + + + + + + + + + + + + + + +
            Table 1 Key parameters

            Parameter

            +

            Description

            +

            volume.beta.kubernetes.io/storage-class

            +

            EVS disk type. The value is in lowercase.

            +

            failure-domain.beta.kubernetes.io/region

            +

            Region where the cluster is located.

            +

            failure-domain.beta.kubernetes.io/zone

            +

            AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

            +

            storage

            +

            Storage capacity in the unit of Gi.

            +

            accessModes

            +

            Read/write mode of the volume.

            +

            You can set this parameter to ReadWriteMany (shared volume) and ReadWriteOnce (non-shared volume).

            +
            +
            +
            +

          3. Run the following command to create a PVC.

            kubectl create -f pvc-evs-auto-example.yaml

            +

            After the command is executed, an EVS disk is created in the partition where the cluster is located. Choose Storage > EVS to view the EVS disk. Alternatively, you can view the EVS disk based on the volume name on the EVS console.

            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0313.html b/docs/cce/umn/cce_10_0313.html new file mode 100644 index 00000000..4aca363c --- /dev/null +++ b/docs/cce/umn/cce_10_0313.html @@ -0,0 +1,539 @@ + + +

          (kubectl) Creating a PV from an Existing EVS Disk

          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Log in to the EVS console, create an EVS disk, and record the volume ID, capacity, and disk type of the EVS disk.
          2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          3. Create two YAML files for creating the PersistentVolume (PV) and PersistentVolumeClaim (PVC). Assume that the file names are pv-evs-example.yaml and pvc-evs-example.yaml.

            touch pv-evs-example.yaml pvc-evs-example.yaml

            + +
            + + + + + + + + + + + + + + + + + +

            Kubernetes Cluster Version

            +

            Description

            +

            YAML Example

            +

            1.11.7 ≤ K8s version ≤ 1.13

            +

            Clusters from v1.11.7 to v1.13

            +

            Example YAML

            +

            1.11 ≤ K8s version < 1.11.7

            +

            Clusters from v1.11 to v1.11.7

            +

            Example YAML

            +

            K8s version = 1.9

            +

            Clusters of v1.9

            +

            Example YAML

            +
            +
            +

            Clusters from v1.11.7 to v1.13

            +
            • Example YAML file for the PV:
              apiVersion: v1 
              +kind: PersistentVolume 
              +metadata: 
              +  labels: 
              +    failure-domain.beta.kubernetes.io/region: eu-de
              +    failure-domain.beta.kubernetes.io/zone:  eu-de-01
              +  annotations:
              +    pv.kubernetes.io/provisioned-by: flexvolume-huawei.com/fuxivol
              +  name: pv-evs-example 
              +spec: 
              +  accessModes: 
              +  - ReadWriteOnce 
              +  capacity: 
              +    storage: 10Gi 
              +  claimRef:
              +    apiVersion: v1
              +    kind: PersistentVolumeClaim
              +    name: pvc-evs-example
              +    namespace: default
              +  flexVolume: 
              +    driver: huawei.com/fuxivol 
              +    fsType: ext4 
              +    options:
              +      disk-mode: SCSI
              +      fsType: ext4 
              +      volumeID: 0992dbda-6340-470e-a74e-4f0db288ed82 
              +  persistentVolumeReclaimPolicy: Delete 
              +  storageClassName: sas
              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Key parameters

              Parameter

              +

              Description

              +

              failure-domain.beta.kubernetes.io/region

              +

              Region where the cluster is located.

              +

              failure-domain.beta.kubernetes.io/zone

              +

              AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

              +

              storage

              +

              EVS volume capacity in the unit of Gi.

              +

              storageClassName

              +

              EVS disk type. Supported values: Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)

              +

              driver

              +

              Storage driver.

              +

              For EVS disks, set this parameter to huawei.com/fuxivol.

              +

              volumeID

              +

              Volume ID of the EVS disk.

              +

              To obtain the volume ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the EVS tab page, and copy the PVC ID on the PVC details page.

              +

              disk-mode

              +

              Device type of the EVS disk. The value is VBD or SCSI.

              +

              For CCE clusters earlier than v1.11.7, you do not need to set this field. The value defaults to VBD.

              +

              This field is mandatory for CCE clusters from v1.11.7 to v1.13 that use Linux x86. As the EVS volumes dynamically provisioned by a PVC are created from SCSI EVS disks, you are advised to choose SCSI when manually creating volumes (static PVs). Volumes in the VBD mode can still be used after cluster upgrades.

              +

              spec.claimRef.apiVersion

              +

              The value is fixed at v1.

              +

              spec.claimRef.kind

              +

              The value is fixed at PersistentVolumeClaim.

              +

              spec.claimRef.name

              +

              PVC name. The value is the same as the name of the PVC created in the next step.

              +

              spec.claimRef.namespace

              +

              Namespace of the PVC. The value is the same as the namespace of the PVC created in the next step.

              +
              +
              +
            • Example YAML file for the PVC:
              apiVersion: v1  
              +kind: PersistentVolumeClaim  
              +metadata:  
              +  annotations:  
              +    volume.beta.kubernetes.io/storage-class: sas
              +    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxivol 
              +  labels: 
              +    failure-domain.beta.kubernetes.io/region: eu-de
              +    failure-domain.beta.kubernetes.io/zone: eu-de-01     
              +  name: pvc-evs-example 
              +  namespace: default  
              +spec:  
              +  accessModes:  
              +  - ReadWriteOnce  
              +  resources:  
              +    requests:  
              +      storage: 10Gi
              +  volumeName: pv-evs-example
              + +
              + + + + + + + + + + + + + + + + + + + + + + +
              Table 2 Key parameters

              Parameter

              +

              Description

              +

              volume.beta.kubernetes.io/storage-class

              +

              Storage class, which must be the same as that of the existing PV.

              +

              volume.beta.kubernetes.io/storage-provisioner

              +

              The field must be set to flexvolume-huawei.com/fuxivol.

              +

              failure-domain.beta.kubernetes.io/region

              +

              Region where the cluster is located.

              +

              failure-domain.beta.kubernetes.io/zone

              +

              AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

              +

              storage

              +

              Requested capacity in the PVC, in Gi.

              +

              The value must be the same as the storage size of the existing PV.

              +

              volumeName

              +

              Name of the PV.

              +
              +
              +
            +

            Clusters from v1.11 to v1.11.7

            +
            • Example YAML file for the PV:
              apiVersion: v1 
              +kind: PersistentVolume 
              +metadata: 
              +  labels: 
              +    failure-domain.beta.kubernetes.io/region: eu-de
              +    failure-domain.beta.kubernetes.io/zone:  
              +  name: pv-evs-example 
              +spec: 
              +  accessModes: 
              +  - ReadWriteOnce
              +  capacity: 
              +    storage: 10Gi 
              +  flexVolume: 
              +    driver: huawei.com/fuxivol 
              +    fsType: ext4 
              +    options:
              +      fsType: ext4 
              +      volumeID: 0992dbda-6340-470e-a74e-4f0db288ed82 
              +  persistentVolumeReclaimPolicy: Delete 
              +  storageClassName: sas
              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 3 Key parameters

              Parameter

              +

              Description

              +

              failure-domain.beta.kubernetes.io/region

              +

              Region where the cluster is located.

              +

              failure-domain.beta.kubernetes.io/zone

              +

              AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

              +

              storage

              +

              EVS volume capacity in the unit of Gi.

              +

              storageClassName

              +

              EVS disk type. Supported values: Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)

              +

              driver

              +

              Storage driver.

              +

              For EVS disks, set this parameter to huawei.com/fuxivol.

              +

              volumeID

              +

              Volume ID of the EVS disk.

              +

              To obtain the volume ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the EVS tab page, and copy the PVC ID on the PVC details page.

              +

              disk-mode

              +

              Device type of the EVS disk. The value is VBD or SCSI.

              +

              For CCE clusters earlier than v1.11.7, you do not need to set this field. The default value is VBD.

              +

              This field is mandatory for CCE clusters from v1.11.7 to v1.13 that use Linux x86. As the EVS volumes dynamically provisioned by a PVC are created from SCSI EVS disks, you are advised to choose SCSI when manually creating volumes (static PVs). Volumes in the VBD mode can still be used after cluster upgrades.

              +
              +
              +
            • Example YAML file for the PVC:
              apiVersion: v1  
              +kind: PersistentVolumeClaim  
              +metadata:  
              +  annotations:  
              +    volume.beta.kubernetes.io/storage-class: sas
              +    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxivol 
              +  labels: 
              +    failure-domain.beta.kubernetes.io/region: eu-de
              +    failure-domain.beta.kubernetes.io/zone: eu-de-01     
              +  name: pvc-evs-example 
              +  namespace: default  
              +spec:  
              +  accessModes:  
              +  - ReadWriteOnce
              +  resources:  
              +    requests:  
              +      storage: 10Gi
              +  volumeName: pv-evs-example
              + +
              + + + + + + + + + + + + + + + + + + + + + + +
              Table 4 Key parameters

              Parameter

              +

              Description

              +

              volume.beta.kubernetes.io/storage-class

              +

              Storage class. The value can be sas or ssd. The value must be the same as that of the existing PV.

              +

              volume.beta.kubernetes.io/storage-provisioner

              +

              The field must be set to flexvolume-huawei.com/fuxivol.

              +

              failure-domain.beta.kubernetes.io/region

              +

              Region where the cluster is located.

              +

              failure-domain.beta.kubernetes.io/zone

              +

              AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

              +

              storage

              +

              Requested capacity in the PVC, in Gi.

              +

              The value must be the same as the storage size of the existing PV.

              +

              volumeName

              +

              Name of the PV.

              +
              +
              +
            +

            Clusters of v1.9

            +
            • Example YAML file for the PV:
              apiVersion: v1 
              +kind: PersistentVolume 
              +metadata: 
              +  labels: 
              +    failure-domain.beta.kubernetes.io/region: eu-de
              +    failure-domain.beta.kubernetes.io/zone:  
              +  name: pv-evs-example 
              +  namespace: default 
              +spec: 
              +  accessModes: 
              +  - ReadWriteOnce
              +  capacity: 
              +    storage: 10Gi 
              +  flexVolume: 
              +    driver: huawei.com/fuxivol 
              +    fsType: ext4 
              +    options: 
              +      fsType: ext4 
              +      kubernetes.io/namespace: default 
              +      volumeID: 0992dbda-6340-470e-a74e-4f0db288ed82 
              +  persistentVolumeReclaimPolicy: Delete 
              +  storageClassName: sas
              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 5 Key parameters

              Parameter

              +

              Description

              +

              failure-domain.beta.kubernetes.io/region

              +

              Region where the cluster is located.

              +

              failure-domain.beta.kubernetes.io/zone

              +

              AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

              +

              storage

              +

              EVS volume capacity in the unit of Gi.

              +

              storageClassName

              +

              EVS disk type. Supported values: Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)

              +

              driver

              +

              Storage driver.

              +

              For EVS disks, set this parameter to huawei.com/fuxivol.

              +

              volumeID

              +

              Volume ID of the EVS disk.

              +

              To obtain the volume ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the EVS tab page, and copy the PVC ID on the PVC details page.

              +

              disk-mode

              +

              Device type of the EVS disk. The value is VBD or SCSI.

              +

              For CCE clusters earlier than v1.11.7, you do not need to set this field. The default value is VBD.

              +

              This field is mandatory for CCE clusters from v1.11.7 to v1.13 that use Linux x86. As the EVS volumes dynamically provisioned by a PVC are created from SCSI EVS disks, you are advised to choose SCSI when manually creating volumes (static PVs). Volumes in the VBD mode can still be used after cluster upgrades.

              +
              +
              +
            • Example YAML file for the PVC:
              apiVersion: v1  
              +kind: PersistentVolumeClaim  
              +metadata:  
              +  annotations:  
              +    volume.beta.kubernetes.io/storage-class: sas
              +    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxivol 
              +  labels: 
              +    failure-domain.beta.kubernetes.io/region: eu-de
              +    failure-domain.beta.kubernetes.io/zone: 
              +  name: pvc-evs-example 
              +  namespace: default  
              +spec:  
              +  accessModes:  
              +  - ReadWriteOnce 
              +  resources:  
              +    requests:  
              +      storage: 10Gi
              +  volumeName: pv-evs-example
              +  volumeNamespace: default
              + +
              + + + + + + + + + + + + + + + + + + + + + + +
              Table 6 Key parameters

              Parameter

              +

              Description

              +

              volume.beta.kubernetes.io/storage-class

              +

              Storage class, which must be the same as that of the existing PV.

              +

              volume.beta.kubernetes.io/storage-provisioner

              +

              The field must be set to flexvolume-huawei.com/fuxivol.

              +

              failure-domain.beta.kubernetes.io/region

              +

              Region where the cluster is located.

              +

              failure-domain.beta.kubernetes.io/zone

              +

              AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

              +

              storage

              +

              Requested capacity in the PVC, in Gi.

              +

              The value must be the same as the storage size of the existing PV.

              +

              volumeName

              +

              Name of the PV.

              +
              +
              +
            +

          4. Create the PV.

            kubectl create -f pv-evs-example.yaml

            +

          5. Create the PVC.

            kubectl create -f pvc-evs-example.yaml

            +

            After the operation is successful, choose Resource Management > Storage to view the created PVC. You can also view the EVS disk by name on the EVS console.

            +

          6. (Optional) Add the metadata associated with the cluster to ensure that the EVS disk associated with the mounted static PV is not deleted when the node or cluster is deleted.

            If you skip this step in this example or when creating a static PV or PVC, ensure that the EVS disk associated with the static PV has been unbound from the node before you delete the node.

            +
            +
            1. Obtain the tenant token. For details, see Obtaining a User Token.
            2. Obtain the EVS access address EVS_ENDPOINT. For details, see Regions and Endpoints.

              +
            3. Add the metadata associated with the cluster to the EVS disk backing the static PV.
              curl -X POST ${EVS_ENDPOINT}/v2/${project_id}/volumes/${volume_id}/metadata --insecure \
              +    -d '{"metadata":{"cluster_id": "${cluster_id}", "namespace": "${pvc_namespace}"}}' \
              +    -H 'Accept:application/json' -H 'Content-Type:application/json;charset=utf8' \
              +    -H 'X-Auth-Token:${TOKEN}'
              + +
              + + + + + + + + + + + + + + + + + + + + + + +
              Table 7 Key parameters

              Parameter

              +

              Description

              +

              EVS_ENDPOINT

              +

              EVS access address. Set this parameter to the value obtained in 6.b.

              +

              project_id

              +

              Project ID.

              +

              volume_id

              +

              ID of the associated EVS disk. Set this parameter to volume_id of the static PV to be created. You can also log in to the EVS console, click the name of the EVS disk to be imported, and obtain the ID from Summary on the disk details page.

              +

              cluster_id

              +

              ID of the cluster where the EVS PV is to be created. On the CCE console, choose Resource Management > Clusters. Click the name of the cluster to be associated. On the cluster details page, obtain the cluster ID.

              +

              pvc_namespace

              +

              Namespace where the PVC is to be bound.

              +

              TOKEN

              +

              User token. Set this parameter to the value obtained in 6.a.

              +
              +
              +

              For example, run the following commands:

              +
              curl -X POST https://evs.eu-de.otc.t-systems.com:443/v2/060576866680d5762f52c0150e726aa7/volumes/69c9619d-174c-4c41-837e-31b892604e14/metadata --insecure \
              +    -d '{"metadata":{"cluster_id": "71e8277e-80c7-11ea-925c-0255ac100442", "namespace": "default"}}' \
              +    -H 'Accept:application/json' -H 'Content-Type:application/json;charset=utf8' \
              +    -H 'X-Auth-Token:MIIPe******IsIm1ldG
              +

              After the request is executed, run the following commands to check whether the EVS disk has been associated with the metadata of the cluster:

              +
              curl -X GET ${EVS_ENDPOINT}/v2/${project_id}/volumes/${volume_id}/metadata --insecure \
              +    -H 'X-Auth-Token:${TOKEN}'
              +

              For example, run the following commands:

              +
              curl -X GET https://evs.eu-de.otc.t-systems.com/v2/060576866680d5762f52c0150e726aa7/volumes/69c9619d-174c-4c41-837e-31b892604e14/metadata --insecure \
              +    -H 'X-Auth-Token:MIIPeAYJ***9t1c31ASaQ=='
              +

              The command output displays the current metadata of the EVS disk.

              +
              {
              +    "metadata": {
              +        "namespace": "default",
              +        "cluster_id": "71e8277e-80c7-11ea-925c-0255ac100442",
              +        "hw:passthrough": "true"
              +    }
              +}
              +
            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0314.html b/docs/cce/umn/cce_10_0314.html new file mode 100644 index 00000000..eb8d01df --- /dev/null +++ b/docs/cce/umn/cce_10_0314.html @@ -0,0 +1,174 @@ + + +

          (kubectl) Creating a Pod Mounted with an EVS Volume

          +

          Scenario

          After an EVS volume is created or imported to CCE, you can mount it to a workload.

          +

          EVS disks cannot be attached across AZs. Before mounting a volume, you can run the kubectl get pvc command to query the available PVCs in the AZ where the current cluster is located.

          +
          +
          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          2. Run the following commands to configure the evs-deployment-example.yaml file, which is used to create a Deployment.

            touch evs-deployment-example.yaml

            +

            vi evs-deployment-example.yaml

            +
            Example of mounting an EVS volume to a Deployment (PVC-based, shared volume):
            apiVersion: apps/v1 
            +kind: Deployment 
            +metadata: 
            +  name: evs-deployment-example 
            +  namespace: default 
            +spec: 
            +  replicas: 1 
            +  selector: 
            +    matchLabels: 
            +      app: evs-deployment-example 
            +  template: 
            +    metadata: 
            +      labels: 
            +        app: evs-deployment-example 
            +    spec: 
            +      containers: 
            +      - image: nginx
            +        name: container-0 
            +        volumeMounts: 
            +        - mountPath: /tmp 
            +          name: pvc-evs-example 
            +      imagePullSecrets:
            +        - name: default-secret
            +      restartPolicy: Always 
            +      volumes: 
            +      - name: pvc-evs-example 
            +        persistentVolumeClaim: 
            +          claimName: pvc-evs-auto-example
            +
            + +
            + + + + + + + + + + + + + + + + + + + + + +
            Table 1 Key parameters

            Parent Parameter

            +

            Parameter

            +

            Description

            +

            spec.template.spec.containers.volumeMounts

            +

            name

            +

            Name of the volume mounted to the container.

            +

            spec.template.spec.containers.volumeMounts

            +

            mountPath

            +

            Mount path of the container. In this example, the volume is mounted to the /tmp directory.

            +

            spec.template.spec.volumes

            +

            name

            +

            Name of the volume.

            +

            spec.template.spec.volumes.persistentVolumeClaim

            +

            claimName

            +

            Name of an existing PVC.

            +
            +
            +

            spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

            +
            +

            Mounting an EVS volume to a StatefulSet (PVC template-based, non-shared volume):

            +
            Example YAML:
            apiVersion: apps/v1
            +kind: StatefulSet
            +metadata:
            +  name: deploy-evs-sas-in
            +spec:
            +  replicas: 1
            +  selector:
            +    matchLabels:
            +      app: deploy-evs-sata-in
            +  template:
            +    metadata:
            +      labels:
            +        app: deploy-evs-sata-in
            +        failure-domain.beta.kubernetes.io/region: eu-de
            +        failure-domain.beta.kubernetes.io/zone: eu-de-01
            +    spec:
            +      containers:
            +        - name: container-0
            +          image: 'nginx:1.12-alpine-perl'
            +          volumeMounts:
            +            - name: bs-sas-mountoptionpvc
            +              mountPath: /tmp
            +      imagePullSecrets:
            +        - name: default-secret
            +  volumeClaimTemplates:
            +    - metadata:
            +        name: bs-sas-mountoptionpvc
            +        annotations:
            +          volume.beta.kubernetes.io/storage-class: sas
            +          volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxivol
            +      spec:
            +        accessModes:
            +          - ReadWriteOnce
            +        resources:
            +          requests:
            +            storage: 10Gi
            +  serviceName: wwww
            + +
            + + + + + + + + + + + + + + + + + + + + + +
            Table 2 Key parameters

            Parent Parameter

            +

            Parameter

            +

            Description

            +

            metadata

            +

            name

            +

            Name of the created workload.

            +

            spec.template.spec.containers

            +

            image

            +

            Image of the workload.

            +

            spec.template.spec.containers.volumeMount

            +

            mountPath

            +

            Mount path of the container. In this example, the volume is mounted to the /tmp directory.

            +

            spec

            +

            serviceName

            +

            Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

            +
            +
            +

            spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.

            +
            +
            +

          3. Run the following command to create the pod:

            kubectl create -f evs-deployment-example.yaml

            +

            After the creation is complete, log in to the CCE console. In the navigation pane, choose Resource Management > Storage > EVS. Then, click the PVC name. On the PVC details page, you can view the binding relationship between the EVS volume and the PVC.

            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_01_0315.html b/docs/cce/umn/cce_10_0315.html similarity index 52% rename from docs/cce/umn/cce_01_0315.html rename to docs/cce/umn/cce_10_0315.html index 12ea529f..135337a9 100644 --- a/docs/cce/umn/cce_01_0315.html +++ b/docs/cce/umn/cce_10_0315.html @@ -1,25 +1,23 @@ - +

          Using SFS File Systems as Storage Volumes

          diff --git a/docs/cce/umn/cce_10_0316.html b/docs/cce/umn/cce_10_0316.html new file mode 100644 index 00000000..7004742a --- /dev/null +++ b/docs/cce/umn/cce_10_0316.html @@ -0,0 +1,14 @@ + + +

          Overview

          +

          CCE allows you to mount a volume created from a Scalable File Service (SFS) file system to a container to store data persistently. SFS volumes are commonly used in ReadWriteMany scenarios, such as media processing, content management, big data analysis, and workload process analysis.

          +
          Figure 1 Mounting SFS volumes to CCE
          +

          Description

          • Standard file protocols: You can mount file systems as volumes to servers, the same as using local directories.
          • Data sharing: The same file system can be mounted to multiple servers, so that data can be shared.
          • Private network: User can access data only in private networks of data centers.
          • Capacity and performance: The capacity of a single file system is high (PB level) and the performance is excellent (ms-level read/write I/O latency).
          • Use cases: Deployments/StatefulSets in the ReadWriteMany mode and jobs created for high-performance computing (HPC), media processing, content management, web services, big data analysis, and workload process analysis
          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0318.html b/docs/cce/umn/cce_10_0318.html new file mode 100644 index 00000000..f2957fa0 --- /dev/null +++ b/docs/cce/umn/cce_10_0318.html @@ -0,0 +1,62 @@ + + +

          (kubectl) Automatically Creating an SFS Volume

          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          2. Run the following commands to configure the pvc-sfs-auto-example.yaml file, which is used to create a PVC.

            touch pvc-sfs-auto-example.yaml

            +

            vi pvc-sfs-auto-example.yaml

            +
            Example YAML file:
            apiVersion: v1 
            +kind: PersistentVolumeClaim 
            +metadata: 
            +  annotations: 
            +    volume.beta.kubernetes.io/storage-class: nfs-rw
            +  name: pvc-sfs-auto-example 
            +  namespace: default 
            +spec: 
            +  accessModes: 
            +  - ReadWriteMany 
            +  resources: 
            +    requests: 
            +      storage: 10Gi
            + +
            + + + + + + + + + + + + + + + + +
            Table 1 Key parameters

            Parameter

            +

            Description

            +

            volume.beta.kubernetes.io/storage-class

            +

            File storage class. Currently, the standard file protocol type (nfs-rw) is supported.

            +

            name

            +

            Name of the PVC to be created.

            +

            accessModes

            +

            Only ReadWriteMany is supported. ReadWriteOnly is not supported.

            +

            storage

            +

            Storage capacity in the unit of Gi.

            +
            +
            +
            +

          3. Run the following command to create a PVC:

            kubectl create -f pvc-sfs-auto-example.yaml

            +

            After the command is executed, a file system is created in the VPC to which the cluster belongs. Choose Storage > SFS on the CCE console or log in to the SFS console to view the file system.

            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0319.html b/docs/cce/umn/cce_10_0319.html new file mode 100644 index 00000000..8069d6cd --- /dev/null +++ b/docs/cce/umn/cce_10_0319.html @@ -0,0 +1,279 @@ + + +

          (kubectl) Creating a PV from an Existing SFS File System

          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Log in to the SFS console, create a file system, and record the file system ID, shared path, and capacity.
          2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          3. Create two YAML files for creating the PV and PVC. Assume that the file names are pv-sfs-example.yaml and pvc-sfs-example.yaml.

            touch pv-sfs-example.yaml pvc-sfs-example.yaml

            + +
            + + + + + + + + + + + + + +

            Kubernetes Cluster Version

            +

            Description

            +

            YAML Example

            +

            1.11 ≤ K8s version < 1.13

            +

            Clusters from v1.11 to v1.13

            +

            Example YAML

            +

            K8s version = 1.9

            +

            Clusters of v1.9

            +

            Example YAML

            +
            +
            +

            Clusters from v1.11 to v1.13

            +
            • Example YAML file for the PV:
              apiVersion: v1 
              +kind: PersistentVolume 
              +metadata: 
              +  name: pv-sfs-example 
              +  annotations:
              +    pv.kubernetes.io/provisioned-by: flexvolume-huawei.com/fuxinfs
              +spec: 
              +  accessModes: 
              +  - ReadWriteMany 
              +  capacity: 
              +    storage: 10Gi 
              +  claimRef:
              +    apiVersion: v1
              +    kind: PersistentVolumeClaim
              +    name: pvc-sfs-example
              +    namespace: default
              +  flexVolume: 
              +    driver: huawei.com/fuxinfs 
              +    fsType: nfs 
              +    options: 
              +      deviceMountPath: <your_deviceMountPath>  # Shared storage path of your file.
              +      fsType: nfs 
              +      volumeID: f6976f9e-2493-419b-97ca-d7816008d91c 
              +  persistentVolumeReclaimPolicy: Delete 
              +  storageClassName: nfs-rw
              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Key parameters

              Parameter

              +

              Description

              +

              driver

              +

              Storage driver used to mount the volume. Set the driver to huawei.com/fuxinfs for the file system.

              +

              deviceMountPath

              +

              Shared path of the file system.

              +

              On the management console, choose Service List > Storage > Scalable File Service. You can obtain the shared path of the file system from the Mount Address column.

              +

              volumeID

              +

              File system ID.

              +

              To obtain the ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the SFS tab page, and copy the PVC ID on the PVC details page.

              +

              storage

              +

              File system size.

              +

              storageClassName

              +

              Read/write mode supported by the file system. Currently, nfs-rw and nfs-ro are supported.

              +

              spec.claimRef.apiVersion

              +

              The value is fixed at v1.

              +

              spec.claimRef.kind

              +

              The value is fixed at PersistentVolumeClaim.

              +

              spec.claimRef.name

              +

              The value is the same as the name of the PVC created in the next step.

              +

              spec.claimRef.namespace

              +

              Namespace of the PVC. The value is the same as the namespace of the PVC created in the next step.

              +
              +
              +
            • Example YAML file for the PVC:
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  annotations:
              +    volume.beta.kubernetes.io/storage-class: nfs-rw
              +    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxinfs
              +  name: pvc-sfs-example
              +  namespace: default
              +spec:
              +  accessModes:
              +  - ReadWriteMany
              +  resources:
              +    requests:
              +      storage: 10Gi
              +  volumeName: pv-sfs-example
              + +
              + + + + + + + + + + + + + + + + +
              Table 2 Key parameters

              Parameter

              +

              Description

              +

              volume.beta.kubernetes.io/storage-class

              +

              Read/write mode supported by the file system. nfs-rw and nfs-ro are supported. The value must be the same as that of the existing PV.

              +

              volume.beta.kubernetes.io/storage-provisioner

              +

              Must be set to flexvolume-huawei.com/fuxinfs.

              +

              storage

              +

              Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

              +

              volumeName

              +

              Name of the PV.

              +
              +
              +
            +

            Clusters of v1.9

            +
            • Example YAML file for the PV:
              apiVersion: v1 
              +kind: PersistentVolume 
              +metadata: 
              +  name: pv-sfs-example 
              +  namespace: default 
              +spec: 
              +  accessModes: 
              +  - ReadWriteMany 
              +  capacity: 
              +    storage: 10Gi 
              +  flexVolume: 
              +    driver: huawei.com/fuxinfs 
              +    fsType: nfs 
              +    options: 
              +      deviceMountPath: <your_deviceMountPath>  # Shared storage path of your file.
              +      fsType: nfs 
              +      kubernetes.io/namespace: default 
              +      volumeID: f6976f9e-2493-419b-97ca-d7816008d91c 
              +  persistentVolumeReclaimPolicy: Delete 
              +  storageClassName: nfs-rw
              + +
              + + + + + + + + + + + + + + + + + + + +
              Table 3 Key parameters

              Parameter

              +

              Description

              +

              driver

              +

              Storage driver used to mount the volume. Set the driver to huawei.com/fuxinfs for the file system.

              +

              deviceMountPath

              +

              Shared path of the file system.

              +

              On the management console, choose Service List > Storage > Scalable File Service. You can obtain the shared path of the file system from the Mount Address column.

              +

              volumeID

              +

              File system ID.

              +

              To obtain the ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the SFS tab page, and copy the PVC ID on the PVC details page.

              +

              storage

              +

              File system size.

              +

              storageClassName

              +

              Read/write mode supported by the file system. Currently, nfs-rw and nfs-ro are supported.

              +
              +
              +
            • Example YAML file for the PVC:
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  annotations:
              +    volume.beta.kubernetes.io/storage-class: nfs-rw
              +    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxinfs
              +  name: pvc-sfs-example
              +  namespace: default
              +spec:
              +  accessModes:
              +  - ReadWriteMany
              +  resources:
              +    requests:
              +      storage: 10Gi
              +  volumeName: pv-sfs-example
              +  volumeNamespace: default
              + +
              + + + + + + + + + + + + + + + + +
              Table 4 Key parameters

              Parameter

              +

              Description

              +

              volume.beta.kubernetes.io/storage-class

              +

              Read/write mode supported by the file system. nfs-rw and nfs-ro are supported. The value must be the same as that of the existing PV.

              +

              volume.beta.kubernetes.io/storage-provisioner

              +

              The field must be set to flexvolume-huawei.com/fuxinfs.

              +

              storage

              +

              Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

              +

              volumeName

              +

              Name of the PV.

              +
              +
              +
            +

            The VPC to which the file system belongs must be the same as the VPC of the ECS VM to which the workload is planned.

            +
            +

          4. Create the PV.

            kubectl create -f pv-sfs-example.yaml

            +

          5. Create the PVC.

            kubectl create -f pvc-sfs-example.yaml

            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0320.html b/docs/cce/umn/cce_10_0320.html new file mode 100644 index 00000000..e8605db3 --- /dev/null +++ b/docs/cce/umn/cce_10_0320.html @@ -0,0 +1,166 @@ + + +

          (kubectl) Creating a Deployment Mounted with an SFS Volume

          +

          Scenario

          After an SFS volume is created or imported to CCE, you can mount the volume to a workload.

          +
          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          2. Run the following commands to configure the sfs-deployment-example.yaml file, which is used to create a pod.

            touch sfs-deployment-example.yaml

            +

            vi sfs-deployment-example.yaml

            +
            Example of mounting an SFS volume to a Deployment (PVC-based, shared volume):
            apiVersion: apps/v1 
            +kind: Deployment 
            +metadata: 
            +  name: sfs-deployment-example                                # Workload name
            +  namespace: default 
            +spec: 
            +  replicas: 1 
            +  selector: 
            +    matchLabels: 
            +      app: sfs-deployment-example 
            +  template: 
            +    metadata: 
            +      labels: 
            +        app: sfs-deployment-example 
            +    spec: 
            +      containers: 
            +      - image: nginx 
            +        name: container-0 
            +        volumeMounts: 
            +        - mountPath: /tmp                                # Mount path 
            +          name: pvc-sfs-example 
            +      imagePullSecrets:
            +        - name: default-secret
            +      restartPolicy: Always 
            +      volumes: 
            +      - name: pvc-sfs-example 
            +        persistentVolumeClaim: 
            +          claimName: pvc-sfs-auto-example                # PVC name
            +
            + +
            + + + + + + + + + + + + + + + + + +
            Table 1 Key parameters

            Parent Parameter

            +

            Parameter

            +

            Description

            +

            metadata

            +

            name

            +

            Name of the pod to be created.

            +

            spec.template.spec.containers.volumeMounts

            +

            mountPath

            +

            Mount path in the container. In this example, the mount path is /tmp.

            +

            spec.template.spec.volumes.persistentVolumeClaim

            +

            claimName

            +

            Name of an existing PVC.

            +
            +
            +

            spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

            +
            +

            Example of mounting an SFS volume to a StatefulSet (PVC template-based, dedicated volume):

            +
            Example YAML:
            apiVersion: apps/v1
            +kind: StatefulSet
            +metadata:
            +  name: deploy-sfs-nfs-rw-in
            +  namespace: default
            +  labels:
            +    appgroup: ''
            +spec:
            +  replicas: 2
            +  selector:
            +    matchLabels:
            +      app: deploy-sfs-nfs-rw-in
            +  template:
            +    metadata:
            +      labels:
            +        app: deploy-sfs-nfs-rw-in
            +    spec:
            +      containers:
            +        - name: container-0
            +          image: 'nginx:1.12-alpine-perl'
            +          volumeMounts:
            +            - name: bs-nfs-rw-mountoptionpvc
            +              mountPath: /aaa
            +      imagePullSecrets:
            +        - name: default-secret
            +  volumeClaimTemplates:
            +    - metadata:
            +        name: bs-nfs-rw-mountoptionpvc
            +        annotations:
            +          volume.beta.kubernetes.io/storage-class: nfs-rw
            +          volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxinfs
            +      spec:
            +        accessModes:
            +          - ReadWriteMany
            +        resources:
            +          requests:
            +            storage: 1Gi
            +  serviceName: wwww
            +
            + +
            + + + + + + + + + + + + + + + + + + + + + +
            Table 2 Key parameters

            Parent Parameter

            +

            Parameter

            +

            Description

            +

            metadata

            +

            name

            +

            Name of the created workload.

            +

            spec.template.spec.containers

            +

            image

            +

            Image of the workload.

            +

            spec.template.spec.containers.volumeMount

            +

            mountPath

            +

            Mount path in the container. In this example, the mount path is /tmp.

            +

            spec

            +

            serviceName

            +

            Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

            +
            +
            +

            spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.

            +
            +

          3. Run the following command to create the pod:

            kubectl create -f sfs-deployment-example.yaml

            +

            After the creation is complete, log in to the CCE console. In the navigation pane, choose Resource Management > Storage > SFS. Click the PVC name. On the PVC details page, you can view the binding relationship between SFS and PVC.

            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0321.html b/docs/cce/umn/cce_10_0321.html new file mode 100644 index 00000000..cc566cc8 --- /dev/null +++ b/docs/cce/umn/cce_10_0321.html @@ -0,0 +1,108 @@ + + +

          (kubectl) Creating a StatefulSet Mounted with an SFS Volume

          +

          Scenario

          CCE allows you to use an existing SFS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).

          +
          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Create an SFS volume by referring to (kubectl) Automatically Creating an SFS Volume and record the volume name.
          2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          3. Create a YAML file for creating the workload. Assume that the file name is sfs-statefulset-example.yaml.

            touch sfs-statefulset-example.yaml

            +

            vi sfs-statefulset-example.yaml

            +

            Example YAML:

            +
            apiVersion: apps/v1
            +kind: StatefulSet
            +metadata:
            +  name: sfs-statefulset-example
            +  namespace: default
            +spec:
            +  replicas: 2
            +  selector:
            +    matchLabels:
            +      app: sfs-statefulset-example
            +  serviceName: qwqq
            +  template:
            +    metadata:
            +      annotations:
            +        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
            +        pod.alpha.kubernetes.io/initialized: "true"
            +      labels:
            +        app: sfs-statefulset-example
            +    spec:
            +      affinity: {}
            +      containers:
            +      - image: nginx:latest
            +        name: container-0
            +        volumeMounts:
            +        - mountPath: /tmp
            +          name: pvc-sfs-example
            +      imagePullSecrets:
            +      - name: default-secret
            +      volumes:
            +        - name: pvc-sfs-example
            +          persistentVolumeClaim:
            +            claimName: cce-sfs-demo
            + +
            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Table 1 Key parameters

            Parent Parameter

            +

            Parameter

            +

            Description

            +

            spec

            +

            replicas

            +

            Number of pods.

            +

            metadata

            +

            name

            +

            Name of the created workload.

            +

            spec.template.spec.containers

            +

            image

            +

            Image used by the workload.

            +

            spec.template.spec.containers.volumeMounts

            +

            mountPath

            +

            Mount path in the container.

            +

            spec

            +

            serviceName

            +

            Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

            +

            spec.template.spec.volumes.persistentVolumeClaim

            +

            claimName

            +

            Name of an existing PVC.

            +
            +
            +

            spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

            +
            +

          4. Create the StatefulSet.

            kubectl create -f sfs-statefulset-example .yaml

            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_01_0322.html b/docs/cce/umn/cce_10_0322.html similarity index 51% rename from docs/cce/umn/cce_01_0322.html rename to docs/cce/umn/cce_10_0322.html index e7faed46..9b022042 100644 --- a/docs/cce/umn/cce_01_0322.html +++ b/docs/cce/umn/cce_10_0322.html @@ -1,25 +1,23 @@ - +

          Using OBS Buckets as Storage Volumes

          diff --git a/docs/cce/umn/cce_10_0323.html b/docs/cce/umn/cce_10_0323.html new file mode 100644 index 00000000..ad8c21d3 --- /dev/null +++ b/docs/cce/umn/cce_10_0323.html @@ -0,0 +1,19 @@ + + +

          Overview

          +

          CCE allows you to mount a volume created from an Object Storage Service (OBS) bucket to a container to store data persistently. Object storage is commonly used in cloud workloads, data analysis, content analysis, and hotspot objects.

          +
          Figure 1 Mounting OBS volumes to CCE
          +

          Storage Class

          Object storage offers three storage classes, Standard, Infrequent Access, and Archive, to satisfy different requirements for storage performance and costs.

          +
          • The Standard storage class features low access latency and high throughput. It is therefore applicable to storing a large number of hot files (frequently accessed every month) or small files (less than 1 MB). The application scenarios include big data analytics, mobile apps, hot videos, and picture processing on social media.
          • The Infrequent Access storage class is ideal for storing data that is semi-frequently accessed (less than 12 times a year), with requirements for quick response. The application scenarios include file synchronization or sharing, and enterprise-level backup. It provides the same durability, access latency, and throughput as the Standard storage class but at a lower cost. However, the Infrequent Access storage class has lower availability than the Standard storage class.
          • The Archive storage class is suitable for archiving data that is rarely-accessed (averagely once a year). The application scenarios include data archiving and long-term data backup. The Archive storage class is secure and durable at an affordable low cost, which can be used to replace tape libraries. However, it may take hours to restore data from the Archive storage class.
          +
          +

          Description

          • Standard APIs: With HTTP RESTful APIs, OBS allows you to use client tools or third-party tools to access object storage.
          • Data sharing: Servers, embedded devices, and IoT devices can use the same path to access shared object data in OBS.
          • Public/Private networks: OBS allows data to be accessed from public networks to meet Internet application requirements.
          • Capacity and performance: No capacity limit; high performance (read/write I/O latency within 10 ms).
          • Use cases: Deployments/StatefulSets in the ReadOnlyMany mode and jobs created for big data analysis, static website hosting, online video on demand (VoD), gene sequencing, intelligent video surveillance, backup and archiving, and enterprise cloud boxes (web disks). You can create object storage by using the OBS console, tools, and SDKs.
          +
          +

          Reference

          CCE clusters can also be mounted with OBS buckets of third-party tenants, including OBS parallel file systems (preferred) and OBS object buckets.

          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0325.html b/docs/cce/umn/cce_10_0325.html new file mode 100644 index 00000000..c89ffb46 --- /dev/null +++ b/docs/cce/umn/cce_10_0325.html @@ -0,0 +1,64 @@ + + +

          (kubectl) Automatically Creating an OBS Volume

          +

          Scenario

          During the use of OBS, expected OBS buckets can be automatically created and mounted as volumes. Currently, standard and infrequent access OBS buckets are supported, which correspond to obs-standard and obs-standard-ia, respectively.

          +
          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          2. Run the following commands to configure the pvc-obs-auto-example.yaml file, which is used to create a PVC.

            touch pvc-obs-auto-example.yaml

            +

            vi pvc-obs-auto-example.yaml

            +

            Example YAML:

            +
            apiVersion: v1 
            +kind: PersistentVolumeClaim 
            +metadata: 
            +  annotations: 
            +    volume.beta.kubernetes.io/storage-class: obs-standard  # OBS bucket type. The value can be obs-standard (standard) or obs-standard-ia (infrequent access).
            +  name: pvc-obs-auto-example  # PVC name
            +  namespace: default 
            +spec: 
            +  accessModes: 
            +  - ReadWriteMany 
            +  resources: 
            +    requests: 
            +      storage: 1Gi   # Storage capacity in the unit of Gi. For OBS buckets, this parameter is used only for verification (fixed to 1, cannot be empty or 0). Any value you set does not take effect for OBS buckets.
            + +
            + + + + + + + + + + + + + + + + +
            Table 1 Key parameters

            Parameter

            +

            Description

            +

            volume.beta.kubernetes.io/storage-class

            +

            Bucket type. Currently, obs-standard and obs-standard-ia are supported.

            +

            name

            +

            Name of the PVC to be created.

            +

            accessModes

            +

            Only ReadWriteMany is supported. ReadWriteOnly is not supported.

            +

            storage

            +

            Storage capacity in the unit of Gi. For OBS buckets, this field is used only for verification (cannot be empty or 0). Its value is fixed at 1, and any value you set does not take effect for OBS buckets.

            +
            +
            +

          3. Run the following command to create a PVC:

            kubectl create -f pvc-obs-auto-example.yaml

            +

            After the command is executed, an OBS bucket is created in the VPC to which the cluster belongs. You can click the bucket name in Storage > OBS to view the bucket or view it on the OBS console.

            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0326.html b/docs/cce/umn/cce_10_0326.html new file mode 100644 index 00000000..cd4f804a --- /dev/null +++ b/docs/cce/umn/cce_10_0326.html @@ -0,0 +1,289 @@ + + +

          (kubectl) Creating a PV from an Existing OBS Bucket

          +

          Scenario

          CCE allows you to use an existing OBS bucket to create a PersistentVolume (PV). You can create a PersistentVolumeClaim (PVC) and bind it to the PV.

          +
          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Log in to the OBS console, create an OBS bucket, and record the bucket name and storage class.
          2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          3. Create two YAML files for creating the PV and PVC. Assume that the file names are pv-obs-example.yaml and pvc-obs-example.yaml.

            touch pv-obs-example.yaml pvc-obs-example.yaml

            + +
            + + + + + + + + + + + + + +

            Kubernetes Cluster Version

            +

            Description

            +

            YAML Example

            +

            1.11 ≤ K8s version ≤ 1.13

            +

            Clusters from v1.11 to v1.13

            +

            Example YAML

            +

            K8s version = 1.9

            +

            Clusters of v1.9

            +

            Example YAML

            +
            +
            +

            Clusters from v1.11 to v1.13

            +
            • Example YAML file for the PV:
              apiVersion: v1 
              +kind: PersistentVolume 
              +metadata: 
              +  name: pv-obs-example 
              +  annotations:
              +    pv.kubernetes.io/provisioned-by: flexvolume-huawei.com/fuxiobs
              +spec: 
              +  accessModes: 
              +  - ReadWriteMany 
              +  capacity: 
              +    storage: 1Gi 
              +  claimRef:
              +    apiVersion: v1
              +    kind: PersistentVolumeClaim
              +    name: pvc-obs-example
              +    namespace: default
              +  flexVolume: 
              +    driver: huawei.com/fuxiobs 
              +    fsType: obs 
              +    options: 
              +      fsType: obs 
              +      region: eu-de
              +      storage_class: STANDARD 
              +      volumeID: test-obs 
              +  persistentVolumeReclaimPolicy: Delete 
              +  storageClassName: obs-standard
              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Key parameters

              Parameter

              +

              Description

              +

              driver

              +

              Storage driver used to mount the volume. Set the driver to huawei.com/fuxiobs for the OBS volume.

              +

              storage_class

              +

              Storage class, including STANDARD (standard bucket) and STANDARD_IA (infrequent access bucket).

              +

              region

              +

              Region where the cluster is located.

              +

              volumeID

              +

              OBS bucket name.

              +

              To obtain the name, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the OBS tab page, and copy the PV name on the PV Details tab page.

              +

              storage

              +

              Storage capacity in the unit of Gi. The value is fixed at 1Gi.

              +

              storageClassName

              +

              Storage class supported by OBS, including obs-standard (standard bucket) and obs-standard-ia (infrequent access bucket).

              +

              spec.claimRef.apiVersion

              +

              The value is fixed at v1.

              +

              spec.claimRef.kind

              +

              The value is fixed at PersistentVolumeClaim.

              +

              spec.claimRef.name

              +

              The value is the same as the name of the PVC created in the next step.

              +

              spec.claimRef.namespace

              +

              The value is the same as the namespace of the PVC created in the next step.

              +
              +
              +
            • Example YAML file for the PVC:
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  annotations:
              +    volume.beta.kubernetes.io/storage-class: obs-standard
              +    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxiobs
              +  name: pvc-obs-example
              +  namespace: default
              +spec:
              +  accessModes:
              +  - ReadWriteMany
              +  resources:
              +    requests:
              +      storage: 1Gi
              +  volumeName: pv-obs-example
              + +
              + + + + + + + + + + + + + + + + +
              Table 2 Key parameters

              Parameter

              +

              Description

              +

              volume.beta.kubernetes.io/storage-class

              +

              Storage class supported by OBS, including obs-standard and obs-standard-ia.

              +

              volume.beta.kubernetes.io/storage-provisioner

              +

              Must be set to flexvolume-huawei.com/fuxiobs.

              +

              volumeName

              +

              Name of the PV.

              +

              storage

              +

              Storage capacity in the unit of Gi. The value is fixed at 1Gi.

              +
              +
              +
            +

            Clusters of v1.9

            +
            • Example YAML file for the PV:
              apiVersion: v1 
              +kind: PersistentVolume 
              +metadata: 
              +  name: pv-obs-example 
              +  namespace: default  
              +spec: 
              +  accessModes: 
              +  - ReadWriteMany 
              +  capacity: 
              +    storage: 1Gi 
              +  flexVolume: 
              +    driver: huawei.com/fuxiobs 
              +    fsType: obs 
              +    options: 
              +      fsType: obs 
              +      kubernetes.io/namespace: default 
              +      region: eu-de
              +      storage_class: STANDARD 
              +      volumeID: test-obs 
              +  persistentVolumeReclaimPolicy: Delete 
              +  storageClassName: obs-standard
              + +
              + + + + + + + + + + + + + + + + + + + + + + +
              Table 3 Key parameters

              Parameter

              +

              Description

              +

              driver

              +

              Storage driver used to mount the volume. Set the driver to huawei.com/fuxiobs for the OBS volume.

              +

              storage_class

              +

              Storage class, including STANDARD (standard bucket) and STANDARD_IA (infrequent access bucket).

              +

              region

              +

              Region where the cluster is located.

              +

              volumeID

              +

              OBS bucket name.

              +

              To obtain the name, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the OBS tab page, and copy the PV name on the PV Details tab page.

              +

              storage

              +

              Storage capacity in the unit of Gi. The value is fixed at 1Gi.

              +

              storageClassName

              +

              Storage class supported by OBS, including obs-standard (standard bucket) and obs-standard-ia (infrequent access bucket).

              +
              +
              +
            • Example YAML file for the PVC:
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  annotations:
              +    volume.beta.kubernetes.io/storage-class: obs-standard
              +    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxiobs
              +  name: pvc-obs-example
              +  namespace: default
              +spec:
              +  accessModes:
              +  - ReadWriteMany
              +  resources:
              +    requests:
              +      storage: 1Gi
              +  volumeName: pv-obs-example
              +  volumeNamespace: default
              + +
              + + + + + + + + + + + + + + + + +
              Table 4 Key parameters

              Parameter

              +

              Description

              +

              volume.beta.kubernetes.io/storage-class

              +

              Storage class supported by OBS, including obs-standard and obs-standard-ia.

              +

              volume.beta.kubernetes.io/storage-provisioner

              +

              Must be set to flexvolume-huawei.com/fuxiobs.

              +

              volumeName

              +

              Name of the PV.

              +

              storage

              +

              Storage capacity in the unit of Gi. The value is fixed at 1Gi.

              +
              +
              +
            +

          4. Create the PV.

            kubectl create -f pv-obs-example.yaml

            +

          5. Create the PVC.

            kubectl create -f pvc-obs-example.yaml

            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0327.html b/docs/cce/umn/cce_10_0327.html new file mode 100644 index 00000000..53ced888 --- /dev/null +++ b/docs/cce/umn/cce_10_0327.html @@ -0,0 +1,173 @@ + + +

          (kubectl) Creating a Deployment Mounted with an OBS Volume

          +

          Scenario

          After an OBS volume is created or imported to CCE, you can mount the volume to a workload.

          +
          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          2. Run the following commands to configure the obs-deployment-example.yaml file, which is used to create a pod.

            touch obs-deployment-example.yaml

            +

            vi obs-deployment-example.yaml

            +
            Example of mounting an OBS volume to a Deployment (PVC-based, shared volume):
            apiVersion: apps/v1 
            +kind: Deployment 
            +metadata: 
            +   name: obs-deployment-example                       # Workload name
            +  namespace: default 
            +spec: 
            +  replicas: 1 
            +  selector: 
            +    matchLabels: 
            +      app: obs-deployment-example 
            +  template: 
            +    metadata: 
            +      labels: 
            +        app: obs-deployment-example 
            +    spec: 
            +      containers: 
            +      - image: nginx 
            +        name: container-0 
            +        volumeMounts: 
            +        - mountPath: /tmp                       # Mount path
            +          name: pvc-obs-example 
            +      restartPolicy: Always
            +      imagePullSecrets:
            +        - name: default-secret
            +      volumes: 
            +      - name: pvc-obs-example  
            +        persistentVolumeClaim: 
            +          claimName: pvc-obs-auto-example       # PVC name
            +
            + +
            + + + + + + + + + + + + + +
            Table 1 Key parameters

            Parameter

            +

            Description

            +

            name

            +

            Name of the pod to be created.

            +

            app

            +

            Name of the application running in the pod.

            +

            mountPath

            +

            Mount path in the container.

            +
            +
            +

            spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

            +
            +

            Example of mounting an OBS volume to a StatefulSet (PVC template-based, dedicated volume):

            +
            Example YAML:
            apiVersion: apps/v1
            +kind: StatefulSet
            +metadata:
            +  name: deploy-obs-standard-in
            +  namespace: default
            +  generation: 1
            +  labels:
            +    appgroup: ''
            +spec:
            +  replicas: 1
            +  selector:
            +    matchLabels:
            +      app: deploy-obs-standard-in
            +  template:
            +    metadata:
            +      labels:
            +        app: deploy-obs-standard-in
            +      annotations:
            +        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
            +        pod.alpha.kubernetes.io/initialized: 'true'
            +    spec:
            +      containers:
            +        - name: container-0
            +          image: 'nginx:1.12-alpine-perl'
            +          env:
            +            - name: PAAS_APP_NAME
            +              value: deploy-obs-standard-in
            +            - name: PAAS_NAMESPACE
            +              value: default
            +            - name: PAAS_PROJECT_ID
            +              value: a2cd8e998dca42e98a41f596c636dbda
            +          resources: {}
            +          volumeMounts:
            +            - name: obs-bs-standard-mountoptionpvc
            +              mountPath: /tmp
            +          terminationMessagePath: /dev/termination-log
            +          terminationMessagePolicy: File
            +          imagePullPolicy: IfNotPresent
            +      restartPolicy: Always
            +      terminationGracePeriodSeconds: 30
            +      dnsPolicy: ClusterFirst
            +      securityContext: {}
            +      imagePullSecrets:
            +        - name: default-secret
            +      affinity: {}
            +      schedulerName: default-scheduler
            +  volumeClaimTemplates:
            +    - metadata:
            +        name: obs-bs-standard-mountoptionpvc
            +        annotations:
            +          volume.beta.kubernetes.io/storage-class: obs-standard
            +          volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxiobs
            +      spec:
            +        accessModes:
            +          - ReadWriteMany
            +        resources:
            +          requests:
            +            storage: 1Gi
            +  serviceName: wwww
            +  podManagementPolicy: OrderedReady
            +  updateStrategy:
            +    type: RollingUpdate
            +  revisionHistoryLimit: 10
            +
            + +
            + + + + + + + + + + + + + + + + +
            Table 2 Key parameters

            Parameter

            +

            Description

            +

            name

            +

            Name of the created workload.

            +

            image

            +

            Image of the workload.

            +

            mountPath

            +

            Mount path in the container. In this example, the volume is mounted to the /tmp directory.

            +

            serviceName

            +

            Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

            +
            +
            +

            spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.

            +
            +

          3. Run the following command to create the pod:

            kubectl create -f obs-deployment-example.yaml

            +

            After the creation is complete, choose Storage > OBS on the CCE console and click the PVC name. On the PVC details page, you can view the binding relationship between the OBS service and the PVC.

            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0328.html b/docs/cce/umn/cce_10_0328.html new file mode 100644 index 00000000..8dac0ed4 --- /dev/null +++ b/docs/cce/umn/cce_10_0328.html @@ -0,0 +1,94 @@ + + +

          (kubectl) Creating a StatefulSet Mounted with an OBS Volume

          +

          Scenario

          CCE allows you to use an existing OBS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).

          +
          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Create an OBS volume by referring to (kubectl) Automatically Creating an OBS Volume and obtain the PVC name.
          2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          3. Create a YAML file for creating the workload. Assume that the file name is obs-statefulset-example.yaml.

            touch obs-statefulset-example.yaml

            +

            vi obs-statefulset-example.yaml

            +

            Example YAML:

            +
            apiVersion: apps/v1
            +kind: StatefulSet
            +metadata:
            +  name: obs-statefulset-example
            +  namespace: default
            +spec:
            +  replicas: 1
            +  selector:
            +    matchLabels:
            +      app: obs-statefulset-example
            +  serviceName: qwqq
            +  template:
            +    metadata:
            +      annotations:
            +        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
            +        pod.alpha.kubernetes.io/initialized: "true"
            +      creationTimestamp: null
            +      labels:
            +        app: obs-statefulset-example
            +    spec:
            +      affinity: {}
            +      containers:	
            +        image: nginx:latest
            +        imagePullPolicy: Always
            +        name: container-0
            +        volumeMounts:
            +        - mountPath: /tmp
            +          name: pvc-obs-example
            +      imagePullSecrets:
            +      - name: default-secret
            +      volumes:
            +        - name: pvc-obs-example
            +          persistentVolumeClaim:
            +            claimName: cce-obs-demo
            + +
            + + + + + + + + + + + + + + + + + + + + + + +
            Table 1 Key parameters

            Parameter

            +

            Description

            +

            replicas

            +

            Number of pods.

            +

            name

            +

            Name of the created workload.

            +

            image

            +

            Image used by the workload.

            +

            mountPath

            +

            Mount path in the container.

            +

            serviceName

            +

            Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

            +

            claimName

            +

            Name of an existing PVC.

            +
            +
            +

          4. Create the StatefulSet.

            kubectl create -f obs-statefulset-example.yaml

            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_01_0329.html b/docs/cce/umn/cce_10_0329.html similarity index 53% rename from docs/cce/umn/cce_01_0329.html rename to docs/cce/umn/cce_10_0329.html index 3e62416d..e5bfba0d 100644 --- a/docs/cce/umn/cce_01_0329.html +++ b/docs/cce/umn/cce_10_0329.html @@ -1,23 +1,21 @@ - +

          Using SFS Turbo File Systems as Storage Volumes

          diff --git a/docs/cce/umn/cce_10_0330.html b/docs/cce/umn/cce_10_0330.html new file mode 100644 index 00000000..1a935be8 --- /dev/null +++ b/docs/cce/umn/cce_10_0330.html @@ -0,0 +1,14 @@ + + +

          Overview

          +

          CCE allows you to mount a volume created from an SFS Turbo file system to a container to store data persistently. Provisioned on demand and fast, SFS Turbo is suitable for DevOps, container microservices, and enterprise OA scenarios.

          +
          Figure 1 Mounting SFS Turbo volumes to CCE
          +

          Description

          • Standard file protocols: You can mount file systems as volumes to servers, the same as using local directories.
          • Data sharing: The same file system can be mounted to multiple servers, so that data can be shared.
          • Private network: User can access data only in private networks of data centers.
          • Data isolation: The on-cloud storage service provides exclusive cloud file storage, which delivers data isolation and ensures IOPS performance.
          • Use cases: Deployments/StatefulSets in the ReadWriteMany mode, DaemonSets, and jobs created for high-traffic websites, log storage, DevOps, and enterprise OA applications
          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0332.html b/docs/cce/umn/cce_10_0332.html new file mode 100644 index 00000000..4d37c897 --- /dev/null +++ b/docs/cce/umn/cce_10_0332.html @@ -0,0 +1,148 @@ + + +

          (kubectl) Creating a PV from an Existing SFS Turbo File System

          +

          Scenario

          CCE allows you to use an existing SFS Turbo file system to create a PersistentVolume (PV). After the creation is successful, you can create a PersistentVolumeClaim (PVC) and bind it to the PV.

          +
          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Log in to the SFS console, create a file system, and record the file system ID, shared path, and capacity.
          2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          3. Create two YAML files for creating the PV and PVC. Assume that the file names are pv-efs-example.yaml and pvc-efs-example.yaml.

            touch pv-efs-example.yaml pvc-efs-example.yaml

            +
            • Example YAML file for the PV:
              apiVersion: v1 
              +kind: PersistentVolume 
              +metadata: 
              +  name: pv-efs-example 
              +  annotations:
              +    pv.kubernetes.io/provisioned-by: flexvolume-huawei.com/fuxiefs
              +spec: 
              +  accessModes: 
              +  - ReadWriteMany 
              +  capacity: 
              +    storage: 100Gi 
              +  claimRef:
              +    apiVersion: v1
              +    kind: PersistentVolumeClaim
              +    name: pvc-efs-example
              +    namespace: default
              +  flexVolume: 
              +    driver: huawei.com/fuxiefs 
              +    fsType: efs 
              +    options: 
              +      deviceMountPath: <your_deviceMountPath>  # Shared storage path of your SFS Turbo file.
              +      fsType: efs 
              +      volumeID: 8962a2a2-a583-4b7f-bb74-fe76712d8414 
              +  persistentVolumeReclaimPolicy: Delete 
              +  storageClassName: efs-standard
              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Key parameters

              Parameter

              +

              Description

              +

              driver

              +

              Storage driver used to mount the volume. Set it to huawei.com/fuxiefs.

              +

              deviceMountPath

              +

              Shared path of the SFS Turbo volume.

              +

              volumeID

              +

              SFS Turbo volume ID.

              +

              To obtain the ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the SFS Turbo tab page, and copy the PVC ID on the PVC details page.

              +

              storage

              +

              File system size.

              +

              storageClassName

              +

              Volume type supported by SFS Turbo. The value can be efs-standard and efs-performance. Currently, SFS Turbo does not support dynamic creation; therefore, this parameter is not used for now.

              +

              spec.claimRef.apiVersion

              +

              The value is fixed at v1.

              +

              spec.claimRef.kind

              +

              The value is fixed at PersistentVolumeClaim.

              +

              spec.claimRef.name

              +

              The value is the same as the name of the PVC created in the next step.

              +

              spec.claimRef.namespace

              +

              The value is the same as the namespace of the PVC created in the next step.

              +
              +
              +
            • Example YAML file for the PVC:
              apiVersion: v1 
              +kind: PersistentVolumeClaim 
              +metadata: 
              +  annotations: 
              +    volume.beta.kubernetes.io/storage-class: efs-standard 
              +    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxiefs 
              +  name: pvc-efs-example 
              +  namespace: default 
              +spec: 
              +  accessModes: 
              +  - ReadWriteMany 
              +  resources: 
              +    requests: 
              +      storage: 100Gi 
              +  volumeName: pv-efs-example
              + +
              + + + + + + + + + + + + + + + + +
              Table 2 Key parameters

              Parameter

              +

              Description

              +

              volume.beta.kubernetes.io/storage-class

              +

              Read/write mode supported by SFS Turbo. The value can be efs-standard or efs-performance. The value must be the same as that of the existing PV.

              +

              volume.beta.kubernetes.io/storage-provisioner

              +

              The field must be set to flexvolume-huawei.com/fuxiefs.

              +

              storage

              +

              Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

              +

              volumeName

              +

              Name of the PV.

              +
              +
              +
            +

            The VPC to which the SFS Turbo file system belongs must be the same as the VPC of the ECS VM planned for the workload. Ports 111, 445, 2049, 2051, and 20048 must be enabled in the security groups.

            +
            +

          4. Create the PV.

            kubectl create -f pv-efs-example.yaml

            +

          5. Create the PVC.

            kubectl create -f pvc-efs-example.yaml

            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_10_0333.html b/docs/cce/umn/cce_10_0333.html new file mode 100644 index 00000000..4fd3b8d4 --- /dev/null +++ b/docs/cce/umn/cce_10_0333.html @@ -0,0 +1,76 @@ + + +

          (kubectl) Creating a Deployment Mounted with an SFS Turbo Volume

          +

          Scenario

          After an SFS Turbo volume is created or imported to CCE, you can mount the volume to a workload.

          +
          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          +
          +

          Procedure

          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          2. Run the following commands to configure the efs-deployment-example.yaml file, which is used to create a Deployment:

            touch efs-deployment-example.yaml

            +

            vi efs-deployment-example.yaml

            +

            Example of mounting an SFS Turbo volume to a Deployment (PVC-based, shared volume):

            +
            apiVersion: apps/v1  
            +kind: Deployment  
            +metadata:  
            +  name: efs-deployment-example                                # Workload name
            +  namespace: default  
            +spec:  
            +  replicas: 1  
            +  selector:  
            +    matchLabels:  
            +      app: efs-deployment-example  
            +  template:  
            +    metadata:  
            +      labels:  
            +        app: efs-deployment-example  
            +    spec:  
            +      containers:  
            +      - image: nginx  
            +        name: container-0  
            +        volumeMounts:  
            +        - mountPath: /tmp                                # Mount path
            +          name: pvc-efs-example  
            +      restartPolicy: Always
            +      imagePullSecrets:
            +        - name: default-secret
            +      volumes:  
            +      - name: pvc-efs-example  
            +        persistentVolumeClaim:  
            +          claimName: pvc-sfs-auto-example                # PVC name
            + +
            + + + + + + + + + + + + + +
            Table 1 Key parameters

            Parameter

            +

            Description

            +

            name

            +

            Name of the created Deployment.

            +

            app

            +

            Name of the application running in the Deployment.

            +

            mountPath

            +

            Mount path in the container. In this example, the mount path is /tmp.

            +
            +
            +

            spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

            +
            +

          3. Run the following command to create the pod:

            kubectl create -f efs-deployment-example.yaml

            +

            After the creation is complete, choose Storage > SFS Turbo on the CCE console and click the PVC name. On the PVC details page, you can view the binding relationship between SFS Turbo and PVC.

            +

          +
          +
          + + diff --git a/docs/cce/umn/cce_01_0334.html b/docs/cce/umn/cce_10_0334.html similarity index 51% rename from docs/cce/umn/cce_01_0334.html rename to docs/cce/umn/cce_10_0334.html index 8d617954..0e81efd3 100644 --- a/docs/cce/umn/cce_01_0334.html +++ b/docs/cce/umn/cce_10_0334.html @@ -1,16 +1,14 @@ - +

          (kubectl) Creating a StatefulSet Mounted with an SFS Turbo Volume

          -

          Scenario

          CCE allows you to use an existing SFS Turbo volume to create a StatefulSet.

          +

          Scenario

          CCE allows you to use an existing SFS Turbo volume to create a StatefulSet.

          -

          Prerequisites

          You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

          +

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          -

          Notes and Constraints

          The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

          -
          -

          Procedure

          1. Create an SFS Turbo volume and record the volume name.
          2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          3. Create a YAML file for creating the workload. Assume that the file name is efs-statefulset-example.yaml.

            touch efs-statefulset-example.yaml

            -

            vi efs-statefulset-example.yaml

            -

            Example YAML:

            -
            apiVersion: apps/v1
            +

            Procedure

            1. Create an SFS Turbo volume and record the volume name.
            2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
            3. Create a YAML file for creating the workload. Assume that the file name is efs-statefulset-example.yaml.

              touch efs-statefulset-example.yaml

              +

              vi efs-statefulset-example.yaml

              +

              Example YAML:

              +
              apiVersion: apps/v1
               kind: StatefulSet
               metadata:
                 name: efs-statefulset-example
              @@ -68,54 +66,54 @@ spec:
                 updateStrategy:
                   type: RollingUpdate
              -
              Table 1 Key parameters

              Parameter

              +
              - - - - - - - - - - - - -
              Table 1 Key parameters

              Parameter

              Description

              +

              Description

              replicas

              +

              replicas

              Number of pods.

              +

              Number of pods.

              name

              +

              name

              Name of the created workload.

              +

              Name of the created workload.

              image

              +

              image

              Image used by the workload.

              +

              Image used by the workload.

              mountPath

              +

              mountPath

              Mount path in the container.

              +

              Mount path in the container.

              serviceName

              +

              serviceName

              Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

              +

              Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

              claimName

              +

              claimName

              Name of an existing PVC.

              +

              Name of an existing PVC.

              -

              spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

              +

              spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

              -

            4. Create the StatefulSet.

              kubectl create -f efs-statefulset-example.yaml

              +

            5. Create the StatefulSet.

              kubectl create -f efs-statefulset-example.yaml

            6. diff --git a/docs/cce/umn/cce_10_0336.html b/docs/cce/umn/cce_10_0336.html new file mode 100644 index 00000000..5e271acd --- /dev/null +++ b/docs/cce/umn/cce_10_0336.html @@ -0,0 +1,242 @@ + + +

              Using a Custom AK/SK to Mount an OBS Volume

              +

              Scenario

              You can solve this issue by using Everest 1.2.8 and later versions to use custom access keys for different IAM users.

              +
              +

              Prerequisites

              • The everest add-on version must be 1.2.8 or later.
              • The cluster version must be 1.15.11 or later.
              +
              +

              Notes and Constraints

              Custom access keys cannot be configured for secure containers.

              +
              +

              Disabling Auto Key Mounting

              The key you uploaded is used by default when mounting an OBS volume. That is, all IAM users under your account will use the same key to mount OBS buckets, and they have the same permissions on buckets. This setting does not allow you to configure differentiated permissions for different IAM users.

              +

              If you have uploaded the AK/SK, you are advised to disable the automatic mounting of access keys by enabling the disable_auto_mount_secret parameter in the everest add-on to prevent IAM users from performing unauthorized operations. In this way, the access keys uploaded on the console will not be used when creating OBS volumes.

              +
              • When enabling disable-auto-mount-secret, ensure that no OBS volume exists in the cluster. A workload mounted with an OBS volume, when scaled or restarted, will fail to remount the OBS volume because it needs to specify the access key but is prohibited by disable-auto-mount-secret.
              • If disable-auto-mount-secret is set to true, an access key must be specified when a PV or PVC is created. Otherwise, the OBS volume fails to be mounted.
              +
              +

              kubectl edit ds everest-csi-driver -nkube-system

              +

              Search for disable-auto-mount-secret and set it to true.

              +

              +

              Run :wq to save the settings and exit. Wait until the pod is restarted.

              +
              +

              Creating a Secret Using an Access Key

              1. Obtain an access key.

                For details, see Creating Access Keys (AK and SK).

                +

              2. Encode the keys using Base64. (Assume that the AK is xxx and the SK is yyy.)

                echo -n xxx|base64

                +

                echo -n yyy|base64

                +

                Record the encoded AK and SK.

                +

              3. Create a YAML file for the secret, for example, test-user.yaml.

                apiVersion: v1
                +data:
                +  access.key: WE5WWVhVNU*****
                +  secret.key: Nnk4emJyZ0*****
                +kind: Secret
                +metadata:
                +  name: test-user
                +  namespace: default
                +  labels:
                +    secret.kubernetes.io/used-by: csi
                +type: cfe/secure-opaque
                +

                Specifically:

                + +
                + + + + + + + + + + + + + + + + + + + + + + +

                Parameter

                +

                Description

                +

                access.key

                +

                Base64-encoded AK.

                +

                secret.key

                +

                Base64-encoded SK.

                +

                name

                +

                Secret name.

                +

                namespace

                +

                Namespace of the secret.

                +

                secret.kubernetes.io/used-by: csi

                +

                You need to add this label in the YAML file if you want to make it available on the CCE console when you create an OBS PV/PVC.

                +

                type

                +

                Secret type. The value must be cfe/secure-opaque.

                +

                When this type is used, the data entered by users is automatically encrypted.

                +
                +
                +

              4. Create the secret.

                kubectl create -f test-user.yaml

                +

              +
              +

              Mounting a Secret When Statically Creating an OBS Volume

              After a secret is created using the AK/SK, you can associate the secret with the PV to be created and then use the AK/SK in the secret to mount an OBS volume.

              +
              1. Log in to the OBS console, create an OBS bucket, and record the bucket name and storage class. The parallel file system is used as an example.
              2. Create a YAML file for the PV, for example, pv-example.yaml.

                +
                apiVersion: v1
                +kind: PersistentVolume
                +metadata:
                +  name: pv-obs-example
                +  annotations:
                +    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
                +spec:
                +  accessModes:
                +  - ReadWriteMany
                +  capacity:
                +    storage: 1Gi
                +  csi:
                +    nodePublishSecretRef:
                +      name: test-user
                +      namespace: default
                +    driver: obs.csi.everest.io
                +    fsType: obsfs
                +    volumeAttributes:
                +      everest.io/obs-volume-type: STANDARD
                +      everest.io/region: eu-de
                +      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
                +    volumeHandle: obs-normal-static-pv
                +  persistentVolumeReclaimPolicy: Delete
                +  storageClassName: csi-obs
                + +
                + + + + + + + + + + + + + +

                Parameter

                +

                Description

                +

                nodePublishSecretRef

                +

                Secret specified during the mounting.

                +
                • name: name of the secret
                • namespace: namespace of the secret
                +

                fsType

                +

                File type. The value can be obsfs or s3fs. If the value is s3fs, an OBS bucket is created and mounted using s3fs. If the value is obsfs, an OBS parallel file system is created and mounted using obsfs. You are advised to set this field to obsfs.

                +

                volumeHandle

                +

                OBS bucket name.

                +
                +
                +

              3. Create the PV.

                kubectl create -f pv-example.yaml

                +

                After a PV is created, you can create a PVC and associate it with the PV.

                +

              4. Create a YAML file for the PVC, for example, pvc-example.yaml.

                Example YAML file for the PVC:

                +
                apiVersion: v1
                +kind: PersistentVolumeClaim
                +metadata:
                +  annotations:
                +    csi.storage.k8s.io/node-publish-secret-name: test-user
                +    csi.storage.k8s.io/node-publish-secret-namespace: default
                +    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
                +    everest.io/obs-volume-type: STANDARD
                +    csi.storage.k8s.io/fstype: obsfs
                +  name: obs-secret
                +  namespace: default
                +spec:
                +  accessModes:
                +  - ReadWriteMany
                +  resources:
                +    requests:
                +      storage: 1Gi
                +  storageClassName: csi-obs
                +  volumeName: pv-obs-example
                + +
                + + + + + + + + + + +

                Parameter

                +

                Description

                +

                csi.storage.k8s.io/node-publish-secret-name

                +

                Name of the secret

                +

                csi.storage.k8s.io/node-publish-secret-namespace

                +

                Namespace of the secret

                +
                +
                +

              5. Create the PVC.

                kubectl create -f pvc-example.yaml

                +

                After the PVC is created, you can create a workload and associate it with the PVC to create volumes.

                +

              +
              +

              Mounting a Secret When Dynamically Creating an OBS Volume

              When dynamically creating an OBS volume, you can use the following method to specify a secret:

              +
              1. Create a YAML file for the PVC, for example, pvc-example.yaml.

                apiVersion: v1
                +kind: PersistentVolumeClaim
                +metadata:
                +  annotations:
                +    csi.storage.k8s.io/node-publish-secret-name: test-user
                +    csi.storage.k8s.io/node-publish-secret-namespace: default
                +    everest.io/obs-volume-type: STANDARD
                +    csi.storage.k8s.io/fstype: obsfs
                +  name: obs-secret
                +  namespace: default
                +spec:
                +  accessModes:
                +  - ReadWriteMany
                +  resources:
                +    requests:
                +      storage: 1Gi
                +  storageClassName: csi-obs
                + +
                + + + + + + + + + + +

                Parameter

                +

                Description

                +

                csi.storage.k8s.io/node-publish-secret-name

                +

                Name of the secret

                +

                csi.storage.k8s.io/node-publish-secret-namespace

                +

                Namespace of the secret

                +
                +
                +

              2. Create the PVC.

                kubectl create -f pvc-example.yaml

                +

                After the PVC is created, you can create a workload and associate it with the PVC to create volumes.

                +

              +
              +

              Verification

              You can use a secret of an IAM user to mount an OBS volume. Assume that a workload named obs-secret is created, the mount path in the container is /temp, and the IAM user has the CCE ReadOnlyAccess and Tenant Guest permissions.
              1. Query the name of the workload pod.

                kubectl get po | grep obs-secret

                +

                Expected outputs:

                +
                obs-secret-5cd558f76f-vxslv          1/1     Running   0          3m22s
                +
              2. Query the objects in the mount path. In this example, the query is successful.

                kubectl exec obs-secret-5cd558f76f-vxslv -- ls -l /temp/

                +
              3. Write data into the mount path. In this example, the write operation fails.

                kubectl exec obs-secret-5cd558f76f-vxslv -- touch /temp/test

                +

                Expected outputs:

                +
                touch: setting times of '/temp/test': No such file or directory
                +command terminated with exit code 1
                +
              4. Set the read/write permissions for the IAM user who mounted the OBS volume by referring to the bucket policy configuration.

                +

                +
              5. Write data into the mouth path again. In this example, the write operation succeeded.

                kubectl exec obs-secret-5cd558f76f-vxslv -- touch /temp/test

                +
              6. Check the mount path in the container to see whether the data is successfully written.

                kubectl exec obs-secret-5cd558f76f-vxslv -- ls -l /temp/

                +

                Expected outputs:

                +
                -rwxrwxrwx 1 root root 0 Jun  7 01:52 test
                +
              +
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0337.html b/docs/cce/umn/cce_10_0337.html new file mode 100644 index 00000000..dada39f1 --- /dev/null +++ b/docs/cce/umn/cce_10_0337.html @@ -0,0 +1,184 @@ + + +

              Setting Mount Options

              +

              Scenario

              You can mount cloud storage volumes to your containers and use these volumes as local directories.

              +

              This section describes how to set mount options when mounting SFS and OBS volumes. You can set mount options in a PV and bind the PV to a PVC. Alternatively, set mount options in a StorageClass and use the StorageClass to create a PVC. In this way, PVs can be dynamically created and inherit mount options configured in the StorageClass by default.

              +
              +

              SFS Volume Mount Options

              The everest add-on in CCE presets the options described in Table 1 for mounting SFS volumes. You can set other mount options if needed. For details, see Mounting an NFS File System to ECSs (Linux).

              + +
              + + + + + + + + + + + + + + + + +
              Table 1 SFS volume mount options

              Option

              +

              Description

              +

              vers=3

              +

              File system version. Currently, only NFSv3 is supported, Value: 3

              +

              nolock

              +

              Whether to lock files on the server using the NLM protocol. If nolock is selected, the lock is valid for applications on one host. For applications on another host, the lock is invalid.

              +

              timeo=600

              +

              Waiting time before the NFS client retransmits a request. The unit is 0.1 seconds. Recommended value: 600

              +

              hard/soft

              +

              Mounting mode.

              +
              • hard: If the NFS request times out, the client keeps resending the request until the request is successful.
              • soft: If the NFS request times out, the client returns an error to the invoking program.
              +

              The default value is hard.

              +
              +
              +
              +

              OBS Volume Mount Options

              When mounting file storage, the everest add-on presets the options described in Table 2 and Table 3 by default. The options in Table 2 are mandatory.

              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 2 Mandatory mount options configured by default

              Option

              +

              Description

              +

              use_ino

              +

              If enabled, obsfs allocates the inode number. Enabled by default in read/write mode.

              +

              big_writes

              +

              If configured, the maximum size of the cache can be modified.

              +

              nonempty

              +

              Allows non-empty mount paths.

              +

              allow_other

              +

              Allows other users to access the parallel file system.

              +

              no_check_certificate

              +

              Disables server certificate verification.

              +

              enable_noobj_cache

              +

              Enables cache entries for objects that do not exist, which can improve performance. Enabled by default in object bucket read/write mode.

              +

              This option is no longer set by default since everest 1.2.40.

              +

              sigv2

              +

              Specifies the signature version. Used by default in object buckets.

              +
              +
              + +
              + + + + + + + + + + + + + + + + +
              Table 3 Optional mount options configured by default

              Option

              +

              Description

              +

              max_write=131072

              +

              This parameter is valid only when big_writes is configured. The recommended value is 128 KB.

              +

              ssl_verify_hostname=0

              +

              Disables verifying the SSL certificate based on the host name.

              +

              max_background=100

              +

              Allows setting the maximum number of waiting requests in the background. Used by default in parallel file systems.

              +

              public_bucket=1

              +

              If set to 1, public buckets are mounted anonymously. Enabled by default in object bucket read/write mode.

              +
              +
              +

              You can log in to the node to which the pod is scheduled and view all mount options used for mounting the OBS volume in the process details.

              +
              • Object bucket: ps -ef | grep s3fs
                root     22142     1  0 Jun03 ?        00:00:00 /usr/bin/s3fs pvc-82fe2cbe-3838-43a2-8afb-f994e402fb9d /mnt/paas/kubernetes/kubelet/pods/0b13ff68-4c8e-4a1c-b15c-724fd4d64389/volumes/kubernetes.io~csi/pvc-82fe2cbe-3838-43a2-8afb-f994e402fb9d/mount -o url=https://{{endpoint}}:443 -o endpoint=xxxxxx -o passwd_file=/opt/everest-host-connector/1622707954357702943_obstmpcred/pvc-82fe2cbe-3838-43a2-8afb-f994e402fb9d -o nonempty -o big_writes -o enable_noobj_cache -o sigv2 -o allow_other -o no_check_certificate -o ssl_verify_hostname=0 -o max_write=131072 -o multipart_size=20 -o umask=0
                +
              • Parallel file system: ps -ef | grep obsfs
                root      1355     1  0 Jun03 ?        00:03:16 /usr/bin/obsfs pvc-86720bb9-5aa8-4cde-9231-5253994f8468 /mnt/paas/kubernetes/kubelet/pods/c959a91d-eced-4b41-91c6-96cbd65324f9/volumes/kubernetes.io~csi/pvc-86720bb9-5aa8-4cde-9231-5253994f8468/mount -o url=https://{{endpoint}}:443 -o endpoint=xxxxxx -o passwd_file=/opt/everest-host-connector/1622714415305160399_obstmpcred/pvc-86720bb9-5aa8-4cde-9231-5253994f8468 -o allow_other -o nonempty -o big_writes -o use_ino -o no_check_certificate -o ssl_verify_hostname=0 -o umask=0027 -o max_write=131072 -o max_background=100 -o uid=10000 -o gid=10000
                +
              +
              +

              Prerequisites

              • The everest add-on version must be 1.2.8 or later.
              • The add-on identifies the mount options and transfers them to the underlying storage resources, which determine whether the specified options are valid.
              +
              +

              Notes and Constraints

              Mount options cannot be configured for secure containers.

              +
              +

              Setting Mount Options in a PV

              You can use the mountOptions field to set mount options in a PV. The options you can configure in mountOptions are listed in SFS Volume Mount Options and OBS Volume Mount Options.

              +
              apiVersion: v1
              +kind: PersistentVolume
              +metadata:
              +  name: pv-obs-example
              +  annotations:
              +    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
              +spec:
              +  mountOptions:
              +  - umask=0027
              +  - uid=10000
              +  - gid=10000
              +  accessModes:
              +  - ReadWriteMany
              +  capacity:
              +    storage: 1Gi
              +  claimRef:
              +    apiVersion: v1
              +    kind: PersistentVolumeClaim
              +    name: pvc-obs-example
              +    namespace: default
              +  csi:
              +    driver: obs.csi.everest.io
              +    fsType: obsfs
              +    volumeAttributes:
              +      everest.io/obs-volume-type: STANDARD
              +      everest.io/region: eu-de
              +      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
              +    volumeHandle: obs-normal-static-pv
              +  persistentVolumeReclaimPolicy: Delete
              +  storageClassName: csi-obs
              +

              After a PV is created, you can create a PVC and bind it to the PV, and then mount the PV to the container in the workload.

              +
              +

              Setting Mount Options in a StorageClass

              You can use the mountOptions field to set mount options in a StorageClass. The options you can configure in mountOptions are listed in SFS Volume Mount Options and OBS Volume Mount Options.

              +
              apiVersion: storage.k8s.io/v1
              +kind: StorageClass
              +metadata:
              +  name: csi-obs-mount-option
              +mountOptions:
              +- umask=0027
              +- uid=10000
              +- gid=10000
              +parameters:
              +  csi.storage.k8s.io/csi-driver-name: obs.csi.everest.io
              +  csi.storage.k8s.io/fstype: s3fs
              +  everest.io/obs-volume-type: STANDARD
              +provisioner: everest-csi-provisioner
              +reclaimPolicy: Delete
              +volumeBindingMode: Immediate
              +

              After the StorageClass is configured, you can use it to create a PVC. By default, the dynamically created PVs inherit the mount options set in the StorageClass.

              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0338.html b/docs/cce/umn/cce_10_0338.html new file mode 100644 index 00000000..df6a512f --- /dev/null +++ b/docs/cce/umn/cce_10_0338.html @@ -0,0 +1,45 @@ + + +

              Removing a Node

              +

              Scenario

              Removing a node from a cluster will re-install the node OS and clear CCE components on the node.

              +

              Removing a node will not delete the server corresponding to the node. You are advised to remove nodes at off-peak hours to avoid impacts on your services.

              +

              After a node is removed from the cluster, the node is still running.

              +
              +

              Notes and Constraints

              • Nodes can be removed only when the cluster is in the Available or Unavailable state.
              • A CCE node can be removed only when it is in the Active, Abnormal, or Error state.
              • A CCE node in the Active state can have its OS re-installed and CCE components cleared after it is removed.
              • If the OS fails to be re-installed after the node is removed, manually re-install the OS. After the re-installation, log in to the node and run the clearance script to clear CCE components. For details, see Handling Failed OS Reinstallation.
              +
              +

              Precautions

              • Removing a node will lead to pod migration, which may affect services. Perform this operation during off-peak hours.
              • Unexpected risks may occur during the operation. Back up data in advance.
              • While the node is being deleted, the backend will set the node to the unschedulable state.
              • After you remove the node and re-install the OS, the original LVM partitions will be cleared and the data managed by LVM will be cleared. Therefore, back up data in advance.
              +
              +

              Procedure

              1. Log in to the CCE console and click the cluster name to access the cluster.
              2. Choose Nodes from the navigation pane and choose More > Remove in the Operation column of the target node.
              3. In the dialog box displayed, configure the login information required for re-installing the OS and click Yes. Wait until the node is removed.

                After the node is removed, workload pods on the node are automatically migrated to other available nodes.

                +

              +
              +

              Handling Failed OS Reinstallation

              You can perform the following steps to re-install the OS and clear the CCE components on the node if previous attempts fail:

              +
              1. Log in to the management console of the server and re-install the OS.
              2. Log in to the server and run the following commands to clear the CCE components and LVM data:

                Write the following scripts to the clean.sh file:

                +
                lsblk
                +vgs --noheadings | awk '{print $1}' | xargs vgremove -f
                +pvs --noheadings | awk '{print $1}' | xargs pvremove -f
                +lvs --noheadings | awk '{print $1}' | xargs -i lvremove -f --select {}
                +function init_data_disk() {
                +    all_devices=$(lsblk -o KNAME,TYPE | grep disk | grep -v nvme | awk '{print $1}' | awk '{ print "/dev/"$1}')
                +    for device in ${all_devices[@]}; do
                +        isRootDisk=$(lsblk -o KNAME,MOUNTPOINT $device 2>/dev/null| grep -E '[[:space:]]/$' | wc -l )
                +        if [[ ${isRootDisk} != 0 ]]; then
                +            continue
                +        fi
                +        dd if=/dev/urandom of=${device} bs=512 count=64
                +        return
                +    done
                +    exit 1
                +}
                +init_data_disk
                +lsblk
                +

                Run the following command:

                +

                bash clean.sh

                +

              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0341.html b/docs/cce/umn/cce_10_0341.html new file mode 100644 index 00000000..078a4ab0 --- /dev/null +++ b/docs/cce/umn/cce_10_0341.html @@ -0,0 +1,50 @@ + + +

              Data Disk Space Allocation

              +

              This section describes how to allocate data disk space.

              +

              When creating a node, you need to configure a data disk whose capacity is greater than or equal to 100GB for the node. You can click Expand to customize the data disk space allocation.

              +

              +
              • Allocate Disk Space: CCE divides the data disk space for container engines and pods. The container engine space stores the Docker/containerd working directories, container images, and image metadata. The pod space stores kubelet components and emptyDir volumes. The available container engine space affects image download and container startup and running.
                • Container engine and container image space (90% by default): functions as the container runtime working directory and stores container image data and image metadata.
                • kubelet component and emptyDir volume space (10% by default): stores pod configuration files, secrets, and mounted storage such as emptyDir volumes.
                +
              • Allocate Pod Basesize: indicates the base size of a container, that is, the upper limit of the disk space occupied by each workload pod (including the space occupied by container images). This setting prevents the pods from taking all the disk space available, which may cause service exceptions. It is recommended that the value be smaller than or equal to 80% of the container engine space. This parameter is related to the node OS and container storage rootfs and is not supported in some scenarios.
              +

              Setting Container Engine Space

              A data disk, 100 GB for example, is divided as follows (depending on the container storage rootfs):

              +

              You can log in to the node and run the docker info command to view the storage engine type.

              +
              # docker info
              +Containers: 20
              + Running: 17
              + Paused: 0
              + Stopped: 3
              +Images: 16
              +Server Version: 18.09.0
              +Storage Driver: devicemapper
              +
              • Rootfs (Device Mapper)
                By default, 90% of the data disk is the container engine and container image space, which can be divided into the following two parts:
                • The /var/lib/docker directory is the Docker working directory and occupies 20% of the container runtime space by default. (Space size of the /var/lib/docker directory = Data disk space x 90% x 20%)
                • The thin pool stores container image data, image metadata, and container data, and occupies 80% of the container runtime space by default. (Thin pool space = Data disk space x 90% x 80%)

                  The thin pool is dynamically mounted. You can view it by running the lsblk command on a node, but not the df -h command.

                  +
                +
                +

                +
              +
              • Rootfs (OverlayFS)

                No separate thin pool. The entire container engine and container image space (90% of the data disk by default) are in the /var/lib/docker directory.

                +

                +
              +
              Using rootfs for container storage in CCE
              • CCE cluster: EulerOS 2.5 nodes use Device Mapper and EulerOS 2.9 nodes use OverlayFS. CentOS 7.x nodes in clusters earlier than v1.19.16 use Device Mapper, and use OverlayFS in clusters of v1.19.16 and later.
              • CCE Turbo cluster: BMSs use Device Mapper. ECSs use OverlayFS.
              +
              +
              +

              Allocating Basesize for Pods

              The capability of customizing pod basesize is related to the node OS and container storage rootfs. You can log in to the node and run the docker info command to view the container storage rootfs.

              +
              • Device Mapper supports custom pod basesize. The default value is 10 GB.
              • When OverlayFS is used, basesize is not limited by default. In clusters of latest versions (1.19.16, 1.21.3, 1.23.3, and later), EulerOS 2.9 supports basesize if the Docker engine is used. Other OSs do not support basesize.

                In the case of using Docker on EulerOS 2.9 nodes, basesize will not take effect if CAP_SYS_RESOURCE or privileged is configured for a container.

                +
                +
              +

              When configuring basesize, consider the maximum number of pods on a node. The container engine space should be greater than the total disk space used by containers. Formula: the container engine space and container image space (90% by default) > Number of containers x basesize. Otherwise, the container engine space allocated to the node may be insufficient and the container cannot be started.

              +

              +

              For nodes that support basesize, when Device Mapper is used, although you can limit the size of the /home directory of a single container (to 10 GB by default), all containers on the node still share the thin pool of the node for storage. They are not completely isolated. When the sum of the thin pool space used by certain containers reaches the upper limit, other containers cannot run properly.

              +

              In addition, after a file is deleted in the /home directory of the container, the thin pool space occupied by the file is not released immediately. Therefore, even if basesize is set to 10 GB, the thin pool space occupied by files keeps increasing until 10 GB when files are created in the container. The space released after file deletion will be reused but after a while. If the number of containers on the node multiplied by basesize is greater than the thin pool space size of the node, there is a possibility that the thin pool space has been used up.

              +
              +

              Garbage Collection Policies for Container Images

              When the container engine space is insufficient, image garbage collection is triggered.

              +

              The policy for garbage collecting images takes two factors into consideration: HighThresholdPercent and LowThresholdPercent. Disk usage above the high threshold (default: 85%) will trigger garbage collection. The garbage collection will delete least recently used images until the low threshold (default: 80%) has been met.

              +
              +

              Recommended Configuration for the Container Engine Space

              • The container engine space should be greater than the total disk space used by containers. Formula: Container engine space > Number of containers x basesize
              • You are advised to create and delete files of containerized services in local storage volumes (such as emptyDir and hostPath volumes) or cloud storage directories mounted to the containers. In this way, the thin pool space is not occupied. emptyDir volumes occupy the kubelet space. Therefore, properly plan the size of the kubelet space.
              • If OverlayFS is used by in CCE clusters, you can deploy services on these nodes so that the disk space occupied by files created or deleted in containers can be released immediately.
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0342.html b/docs/cce/umn/cce_10_0342.html new file mode 100644 index 00000000..6b620951 --- /dev/null +++ b/docs/cce/umn/cce_10_0342.html @@ -0,0 +1,79 @@ + + +

              CCE Turbo Clusters and CCE Clusters

              +

              Comparison Between CCE Turbo Clusters and CCE Clusters

              The following table lists the differences between CCE Turbo clusters and CCE clusters:

              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Cluster types

              Dimension

              +

              Sub-dimension

              +

              CCE Turbo Cluster

              +

              CCE Cluster

              +

              Cluster

              +

              Positioning

              +

              Next-gen container cluster, with accelerated computing, networking, and scheduling. Designed for Cloud Native 2.0

              +

              Standard cluster for common commercial use

              +

              Node type

              +

              Hybrid deployment of VMs and bare-metal servers

              +

              Hybrid deployment of VMs

              +

              Networking

              +

              Model

              +

              Cloud Native Network 2.0: applies to large-scale and high-performance scenarios.

              +

              Max networking scale: 2,000 nodes

              +

              Cloud-native network 1.0: applies to common, smaller-scale scenarios.

              +
              • Tunnel network model
              • VPC network model
              +

              Performance

              +

              Flattens the VPC network and container network into one. No performance loss.

              +

              Overlays the VPC network with the container network, causing certain performance loss.

              +

              Container network isolation

              +

              Associates pods with security groups. Unifies security isolation in and out the cluster via security groups' network policies.

              +
              • Tunnel network model: supports network policies for intra-cluster communications.
              • VPC network model: supports no isolation.
              +

              Security

              +

              Isolation

              +
              • Physical machine: runs Kata containers, allowing VM-level isolation.
              • VM: runs common containers.
              +

              Runs common containers, isolated by cgroups.

              +
              +
              +
              +

              QingTian Architecture

              +

              The QingTian architecture consists of data plane (software-hardware synergy) and management plane (Alkaid Smart Cloud Brain). The data plane innovates in five dimensions: simplified data center, diversified computing power, QingTian cards, ultra-fast engines, and simplified virtualization, to fully offload and accelerate compute, storage, networking, and security components. VMs, bare metal servers, and containers can run together. As a distributed operating system, the Alkaid Smart Cloud Brain focuses on the cloud, AI, and 5G, and provide all-domain scheduling to achieve cloud-edge-device collaboration and governance.

              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0343.html b/docs/cce/umn/cce_10_0343.html new file mode 100644 index 00000000..21b20726 --- /dev/null +++ b/docs/cce/umn/cce_10_0343.html @@ -0,0 +1,645 @@ + + +

              How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?

              +

              In clusters later than v1.15.11-r1, CSI (the everest add-on) has taken over all functions of fuxi FlexVolume (the storage-driver add-on) for managing container storage. You are advised to use CSI Everest.

              +

              To migrate your storage volumes, create a static PV to associate with the original underlying storage, and then create a PVC to associate with this static PV. When you upgrade your application, mount the new PVC to the original mounting path to migrate the storage volumes.

              +

              Services will be interrupted during the migration. Therefore, properly plan the migration and back up data.

              +
              +

              Procedure

              1. (Optional) Back up data to prevent data loss in case of exceptions.
              2. Configure a YAML file of the PV in the CSI format according to the PV in the FlexVolume format and associate the PV with the existing storage.

                To be specific, run the following commands to configure the pv-example.yaml file, which is used to create a PV.

                +

                touch pv-example.yaml

                +

                vi pv-example.yaml

                +
                Configuration example of a PV for an EVS volume:
                apiVersion: v1
                +kind: PersistentVolume
                +metadata:
                +  labels:
                +    failure-domain.beta.kubernetes.io/region: eu-de
                +    failure-domain.beta.kubernetes.io/zone: <zone name>
                +  annotations:
                +    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
                +  name: pv-evs-example
                +spec:
                +  accessModes:
                +  - ReadWriteOnce
                +  capacity:
                +    storage: 10Gi
                +  csi:
                +    driver: disk.csi.everest.io
                +    fsType: ext4
                +    volumeAttributes:
                +      everest.io/disk-mode: SCSI
                +      everest.io/disk-volume-type: SAS
                +      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
                +    volumeHandle: 0992dbda-6340-470e-a74e-4f0db288ed82
                +  persistentVolumeReclaimPolicy: Delete
                +  storageClassName: csi-disk
                +
                +

                +

                Pay attention to the fields in bold and red. The parameters are described as follows:

                + +
                + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                Table 1 EVS volume configuration parameters

                Parameter

                +

                Description

                +

                failure-domain.beta.kubernetes.io/region

                +

                Region where the EVS disk is located. Use the same value as that of the FlexVolume PV.

                +

                failure-domain.beta.kubernetes.io/zone

                +

                AZ where the EVS disk is located. Use the same value as that of the FlexVolume PV.

                +

                name

                +

                Name of the PV, which must be unique in the cluster.

                +

                storage

                +

                EVS volume capacity in the unit of Gi. Use the value of spec.capacity.storage of the FlexVolume PV.

                +

                driver

                +

                Storage driver used to attach the volume. Set the driver to disk.csi.everest.io for the EVS volume.

                +

                volumeHandle

                +

                Volume ID of the EVS disk. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

                +

                everest.io/disk-mode

                +

                EVS disk mode. Use the value of spec.flexVolume.options.disk-mode of the FlexVolume PV.

                +

                everest.io/disk-volume-type

                +

                EVS disk type. Use the value of kubernetes.io/volumetype in the storage class corresponding to spec.storageClassName of the FlexVolume PV.

                +

                storageClassName

                +

                Name of the Kubernetes storage class associated with the storage volume. Set this field to csi-disk for EVS disks.

                +
                +
                +

                Configuration example of a PV for an SFS volume:

                +
                apiVersion: v1
                +kind: PersistentVolume
                +metadata:
                +  name: pv-sfs-example
                +  annotations:
                +    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
                +spec:
                +  accessModes:
                +  - ReadWriteMany
                +  capacity:
                +    storage: 10Gi
                +  csi:
                +    driver: nas.csi.everest.io
                +    fsType: nfs
                +    volumeAttributes:
                +      everest.io/share-export-location:  # Shared path of the file storage
                +      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
                +    volumeHandle: 682f00bb-ace0-41d8-9b3e-913c9aa6b695
                +  persistentVolumeReclaimPolicy: Delete
                +  storageClassName: csi-nas
                +

                +

                Pay attention to the fields in bold and red. The parameters are described as follows:

                + +
                + + + + + + + + + + + + + + + + + + + + + + +
                Table 2 SFS volume configuration parameters

                Parameter

                +

                Description

                +

                name

                +

                Name of the PV, which must be unique in the cluster.

                +

                storage

                +

                File storage size in the unit of Gi. Use the value of spec.capacity.storage of the FlexVolume PV.

                +

                driver

                +

                Storage driver used to attach the volume. Set the driver to nas.csi.everest.io for the file system.

                +

                everest.io/share-export-location

                +

                Shared path of the file system. Use the value of spec.flexVolume.options.deviceMountPath of the FlexVolume PV.

                +

                volumeHandle

                +

                File system ID. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

                +

                storageClassName

                +

                Name of the Kubernetes storage class. Set this field to csi-nas.

                +
                +
                +

                Configuration example of a PV for an OBS volume:

                +
                apiVersion: v1
                +kind: PersistentVolume
                +metadata:
                +  name: pv-obs-example
                +  annotations:
                +    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
                +spec:
                +  accessModes:
                +  - ReadWriteMany
                +  capacity:
                +    storage: 1Gi
                +  csi:
                +    driver: obs.csi.everest.io
                +    fsType: s3fs
                +    volumeAttributes:
                +      everest.io/obs-volume-type: STANDARD
                +      everest.io/region: eu-de
                +      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
                +    volumeHandle: obs-normal-static-pv
                +  persistentVolumeReclaimPolicy: Delete
                +  storageClassName: csi-obs
                +

                +

                Pay attention to the fields in bold and red. The parameters are described as follows:

                + +
                + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                Table 3 OBS volume configuration parameters

                Parameter

                +

                Description

                +

                name

                +

                Name of the PV, which must be unique in the cluster.

                +

                storage

                +

                Storage capacity, in the unit of Gi. Set this parameter to the fixed value 1Gi.

                +

                driver

                +

                Storage driver used to attach the volume. Set the driver to obs.csi.everest.io for the OBS volume.

                +

                fsType

                +

                File type. Value options are obsfs or s3fs. If the value is s3fs, an OBS bucket is created and mounted using s3fs. If the value is obsfs, an OBS parallel file system is created and mounted using obsfs. Set this parameter according to the value of spec.flexVolume.options.posix of the FlexVolume PV. If the value of spec.flexVolume.options.posix is true, set this parameter to obsfs. If the value is false, set this parameter to s3fs.

                +

                everest.io/obs-volume-type

                +

                Storage class, including STANDARD (standard bucket) and WARM (infrequent access bucket). Set this parameter according to the value of spec.flexVolume.options.storage_class of the FlexVolume PV. If the value of spec.flexVolume.options.storage_class is standard, set this parameter to STANDARD. If the value is standard_ia, set this parameter to WARM.

                +

                everest.io/region

                +

                Region where the OBS bucket is located. Use the value of spec.flexVolume.options.region of the FlexVolume PV.

                +

                volumeHandle

                +

                OBS bucket name. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

                +

                storageClassName

                +

                Name of the Kubernetes storage class. Set this field to csi-obs.

                +
                +
                +

                Configuration example of a PV for an SFS Turbo volume:

                +
                apiVersion: v1
                +kind: PersistentVolume
                +metadata:
                +  name: pv-efs-example
                +  annotations:
                +    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
                +spec:
                +  accessModes:
                +  - ReadWriteMany
                +  capacity:
                +    storage: 10Gi
                +  csi:
                +    driver: sfsturbo.csi.everest.io
                +    fsType: nfs
                +    volumeAttributes:
                +      everest.io/share-export-location: 192.168.0.169:/
                +      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
                +    volumeHandle: 8962a2a2-a583-4b7f-bb74-fe76712d8414
                +  persistentVolumeReclaimPolicy: Delete
                +  storageClassName: csi-sfsturbo
                +

                +

                Pay attention to the fields in bold and red. The parameters are described as follows:

                + +
                + + + + + + + + + + + + + + + + + + + + + + +
                Table 4 SFS Turbo volume configuration parameters

                Parameter

                +

                Description

                +

                name

                +

                Name of the PV, which must be unique in the cluster.

                +

                storage

                +

                File system size. Use the value of spec.capacity.storage of the FlexVolume PV.

                +

                driver

                +

                Storage driver used to attach the volume. Set it to sfsturbo.csi.everest.io.

                +

                everest.io/share-export-location

                +

                Shared path of the SFS Turbo volume. Use the value of spec.flexVolume.options.deviceMountPath of the FlexVolume PV.

                +

                volumeHandle

                +

                SFS Turbo volume ID. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

                +

                storageClassName

                +

                Name of the Kubernetes storage class. Set this field to csi-sfsturbo for SFS Turbo volumes.

                +
                +
                +

              3. Configure a YAML file of the PVC in the CSI format according to the PVC in the FlexVolume format and associate the PVC with the PV created in 2.

                To be specific, run the following commands to configure the pvc-example.yaml file, which is used to create a PVC.

                +

                touch pvc-example.yaml

                +

                vi pvc-example.yaml

                +

                Configuration example of a PVC for an EVS volume:

                +
                apiVersion: v1  
                +kind: PersistentVolumeClaim
                +metadata:
                +  labels:
                +    failure-domain.beta.kubernetes.io/region: eu-de
                +    failure-domain.beta.kubernetes.io/zone: <zone name>
                +  annotations:
                +    everest.io/disk-volume-type: SAS
                +    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
                +  name: pvc-evs-example
                +  namespace: default
                +spec:
                +  accessModes:
                +  - ReadWriteOnce
                +  resources:
                +    requests:
                +      storage: 10Gi
                +  volumeName:  pv-evs-example
                +  storageClassName: csi-disk
                +

                +

                Pay attention to the fields in bold and red. The parameters are described as follows:

                + +
                + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                Table 5 PVC configuration parameters for an EVS volume

                Parameter

                +

                Description

                +

                failure-domain.beta.kubernetes.io/region

                +

                Region where the cluster is located. Use the same value as that of the FlexVolume PVC.

                +

                failure-domain.beta.kubernetes.io/zone

                +

                AZ where the EVS disk is deployed. Use the same value as that of the FlexVolume PVC.

                +

                everest.io/disk-volume-type

                +

                Storage class of the EVS disk. The value can be SAS or SSD. Set this parameter to the same value as that of the PV created in 2.

                +

                name

                +

                PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

                +

                namespace

                +

                Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

                +

                storage

                +

                Requested capacity of the PVC, which must be the same as the storage size of the existing PV.

                +

                volumeName

                +

                Name of the PV. Set this parameter to the name of the static PV in 2.

                +

                storageClassName

                +

                Name of the Kubernetes storage class. Set this field to csi-disk for EVS disks.

                +
                +
                +

                Configuration example of a PVC for an SFS volume:

                +
                apiVersion: v1
                +kind: PersistentVolumeClaim
                +metadata:
                +  annotations:
                +    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
                +  name: pvc-sfs-example
                +  namespace: default
                +spec:
                +  accessModes:
                +  - ReadWriteMany
                +  resources:
                +    requests:
                +      storage: 10Gi
                +  storageClassName: csi-nas
                +  volumeName: pv-sfs-example
                +

                +

                Pay attention to the fields in bold and red. The parameters are described as follows:

                + +
                + + + + + + + + + + + + + + + + + + + +
                Table 6 PVC configuration parameters for an SFS volume

                Parameter

                +

                Description

                +

                name

                +

                PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

                +

                namespace

                +

                Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

                +

                storage

                +

                Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

                +

                storageClassName

                +

                Set this field to csi-nas.

                +

                volumeName

                +

                Name of the PV. Set this parameter to the name of the static PV in 2.

                +
                +
                +

                Configuration example of a PVC for an OBS volume:

                +
                apiVersion: v1
                +kind: PersistentVolumeClaim
                +metadata:
                +  annotations:
                +    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
                +    everest.io/obs-volume-type: STANDARD
                +    csi.storage.k8s.io/fstype: s3fs
                +  name: pvc-obs-example
                +  namespace: default
                +spec:
                +  accessModes:
                +  - ReadWriteMany
                +  resources:
                +    requests:
                +      storage: 1Gi
                +  storageClassName: csi-obs
                +  volumeName: pv-obs-example
                +

                +

                Pay attention to the fields in bold and red. The parameters are described as follows:

                + +
                + + + + + + + + + + + + + + + + + + + + + + + + + +
                Table 7 PVC configuration parameters for an OBS volume

                Parameter

                +

                Description

                +

                everest.io/obs-volume-type

                +

                OBS volume type, which can be STANDARD (standard bucket) and WARM (infrequent access bucket). Set this parameter to the same value as that of the PV created in 2.

                +

                csi.storage.k8s.io/fstype

                +

                File type, which can be obsfs or s3fs. The value must be the same as that of fsType of the static OBS volume PV.

                +

                name

                +

                PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

                +

                namespace

                +

                Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

                +

                storage

                +

                Storage capacity, in the unit of Gi. Set this parameter to the fixed value 1Gi.

                +

                storageClassName

                +

                Name of the Kubernetes storage class. Set this field to csi-obs.

                +

                volumeName

                +

                Name of the PV. Set this parameter to the name of the static PV created in 2.

                +
                +
                +

                Configuration example of a PVC for an SFS Turbo volume:

                +
                apiVersion: v1
                +kind: PersistentVolumeClaim
                +metadata:
                +  annotations:
                +    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
                +  name: pvc-efs-example
                +  namespace: default
                +spec:
                +  accessModes:
                +  - ReadWriteMany
                +  resources:
                +    requests:
                +      storage: 10Gi
                +  storageClassName: csi-sfsturbo
                +  volumeName: pv-efs-example
                +

                +

                Pay attention to the fields in bold and red. The parameters are described as follows:

                + +
                + + + + + + + + + + + + + + + + + + + +
                Table 8 PVC configuration parameters for an SFS Turbo volume

                Parameter

                +

                Description

                +

                name

                +

                PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

                +

                namespace

                +

                Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

                +

                storageClassName

                +

                Name of the Kubernetes storage class. Set this field to csi-sfsturbo.

                +

                storage

                +

                Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

                +

                volumeName

                +

                Name of the PV. Set this parameter to the name of the static PV created in 2.

                +
                +
                +

              4. Upgrade the workload to use a new PVC.

                For Deployments
                1. Run the kubectl create -f commands to create a PV and PVC.

                  kubectl create -f pv-example.yaml

                  +

                  kubectl create -f pvc-example.yaml

                  +

                  Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 2 and 3.

                  +
                  +
                2. Go to the CCE console. On the workload upgrade page, click Upgrade > Advanced Settings > Data Storage > Cloud Storage.

                  +
                3. Uninstall the old storage and add the PVC in the CSI format. Retain the original mounting path in the container.
                4. Click Submit.
                5. Wait until the pods are running.
                +
                +

                For StatefulSets that use existing storage

                +
                1. Run the kubectl create -f commands to create a PV and PVC.

                  kubectl create -f pv-example.yaml

                  +

                  kubectl create -f pvc-example.yaml

                  +

                  Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 2 and 3.

                  +
                  +
                2. Run the kubectl edit command to edit the StatefulSet and use the newly created PVC.

                  kubectl edit sts sts-example -n xxx

                  +

                  +

                  Replace sts-example in the preceding command with the actual name of the StatefulSet to upgrade. xxx indicates the namespace to which the StatefulSet belongs.

                  +
                  +
                3. Wait until the pods are running.
                +

                The current console does not support the operation of adding new cloud storage for StatefulSets. Use the kubectl commands to replace the storage with the newly created PVC.

                +
                +

                For StatefulSets that use dynamically allocated storage

                +
                1. Back up the PV and PVC in the flexVolume format used by the StatefulSet.

                  kubectl get pvc xxx -n {namespaces} -oyaml > pvc-backup.yaml

                  +

                  kubectl get pv xxx -n {namespaces} -oyaml > pv-backup.yaml

                  +
                2. Change the number of pods to 0.
                3. On the storage page, disassociate the flexVolume PVC used by the StatefulSet.
                4. Run the kubectl create -f commands to create a PV and PVC.

                  kubectl create -f pv-example.yaml

                  +

                  kubectl create -f pvc-example.yaml

                  +

                  Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 2 and 3.

                  +
                  +
                5. Change the number of pods back to the original value and wait until the pods are running.
                +

                The dynamic allocation of storage for StatefulSets is achieved by using volumeClaimTemplates. This field cannot be modified by Kubernetes. Therefore, data cannot be migrated by using a new PVC.

                +

                The PVC naming rule of the volumeClaimTemplates is fixed. When a PVC that meets the naming rule exists, this PVC is used.

                +

                Therefore, disassociate the original PVC first, and then create a PVC with the same name in the CSI format.

                +
                +

                6. (Optional) Recreate the stateful application to ensure that a CSI PVC is used when the application is scaled out. Otherwise, FlexVolume PVCs are used in scaling out.

                +
                • Run the following command to obtain the YAML file of the StatefulSet:
                +

                kubectl get sts xxx -n {namespaces} -oyaml > sts.yaml

                +
                • Run the following command to back up the YAML file of the StatefulSet:
                +

                cp sts.yaml sts-backup.yaml

                +
                • Modify the definition of volumeClaimTemplates in the YAML file of the StatefulSet.
                +

                vi sts.yaml

                +

                Configuration example of volumeClaimTemplates for an EVS volume:

                +
                  volumeClaimTemplates:
                +    - metadata:
                +        name: pvc-161070049798261342
                +        namespace: default
                +        creationTimestamp: null
                +        annotations:
                +          everest.io/disk-volume-type: SAS
                +      spec:
                +        accessModes:
                +          - ReadWriteOnce
                +        resources:
                +          requests:
                +            storage: 10Gi
                +        storageClassName: csi-disk
                +

                The parameter value must be the same as the PVC of the EVS volume created in 3.

                +

                Configuration example of volumeClaimTemplates for an SFS volume:

                +
                  volumeClaimTemplates:
                +    - metadata:
                +        name: pvc-161063441560279697
                +        namespace: default
                +        creationTimestamp: null
                +      spec:
                +        accessModes:
                +          - ReadWriteMany
                +        resources:
                +          requests:
                +            storage: 10Gi
                +        storageClassName: csi-nas
                +

                The parameter value must be the same as the PVC of the SFS volume created in 3.

                +

                Configuration example of volumeClaimTemplates for an OBS volume:

                +
                  volumeClaimTemplates:
                +    - metadata:
                +        name: pvc-161070100417416148
                +        namespace: default
                +        creationTimestamp: null
                +        annotations:
                +          csi.storage.k8s.io/fstype: s3fs
                +          everest.io/obs-volume-type: STANDARD
                +      spec:
                +        accessModes:
                +          - ReadWriteMany
                +        resources:
                +          requests:
                +            storage: 1Gi
                +        storageClassName: csi-obs
                +

                The parameter value must be the same as the PVC of the OBS volume created in 3.

                +
                • Delete the StatefulSet.
                +

                kubectl delete sts xxx -n {namespaces}

                +
                • Create the StatefulSet.
                +

                kubectl create -f sts.yaml

                +

              5. Check service functions.

                1. Check whether the application is running properly.
                2. Checking whether the data storage is normal.
                +

                If a rollback is required, perform 4. Select the PVC in FlexVolume format and upgrade the application.

                +
                +

              6. Uninstall the PVC in the FlexVolume format.

                If the application functions normally, unbind the PVC in the FlexVolume format on the storage management page.

                +

                You can also run the kubectl command to delete the PVC and PV of the FlexVolume format.

                +

                Before deleting a PV, change the persistentVolumeReclaimPolicy of the PV to Retain. Otherwise, the underlying storage will be reclaimed after the PV is deleted.

                +

                If the cluster has been upgraded before the storage migration, PVs may fail to be deleted. You can remove the PV protection field finalizers to delete PVs.

                +

                kubectl patch pv {pv_name} -p '{"metadata":{"finalizers":null}}'

                +
                +

              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0345.html b/docs/cce/umn/cce_10_0345.html new file mode 100644 index 00000000..ec10e4c0 --- /dev/null +++ b/docs/cce/umn/cce_10_0345.html @@ -0,0 +1,93 @@ + + +

              GPU Scheduling

              +

              You can use GPUs in CCE containers.

              +

              Prerequisites

              • A GPU node has been created. For details, see Creating a Node.
              • The gpu-beta add-on has been installed. During the installation, select the GPU driver on the node. For details, see gpu-beta.
              • gpu-beta mounts the driver directory to /usr/local/nvidia/lib64. To use GPU resources in a container, you need to add /usr/local/nvidia/lib64 to the LD_LIBRARY_PATH environment variable.

                Generally, you can use any of the following methods to add a file:

                +
                1. Configure the LD_LIBRARY_PATH environment variable in the Dockerfile used for creating an image. (Recommended)
                  ENV LD_LIBRARY_PATH /usr/local/nvidia/lib64:$LD_LIBRARY_PATH
                  +
                2. Configure the LD_LIBRARY_PATH environment variable in the image startup command.
                  /bin/bash -c "export LD_LIBRARY_PATH=/usr/local/nvidia/lib64:$LD_LIBRARY_PATH && ..."
                  +
                3. Define the LD_LIBRARY_PATH environment variable when creating a workload. (Ensure that this variable is not configured in the container. Otherwise, it will be overwritten.)
                            env:
                  +            - name: LD_LIBRARY_PATH
                  +              value: /usr/local/nvidia/lib64
                  +
                +
              +
              +

              Using GPUs

              Create a workload and request GPUs. You can specify the number of GPUs as follows:

              +
              apiVersion: apps/v1
              +kind: Deployment
              +metadata:
              +  name: gpu-test
              +  namespace: default
              +spec:
              +  replicas: 1
              +  selector:
              +    matchLabels:
              +      app: gpu-test
              +  template:
              +    metadata:
              +      labels:
              +        app: gpu-test
              +    spec:
              +      containers:
              +      - image: nginx:perl
              +        name: container-0
              +        resources:
              +          requests:
              +            cpu: 250m
              +            memory: 512Mi
              +            nvidia.com/gpu: 1   # Number of requested GPUs
              +          limits:
              +            cpu: 250m
              +            memory: 512Mi
              +            nvidia.com/gpu: 1   # Maximum number of GPUs that can be used
              +      imagePullSecrets:
              +      - name: default-secret
              +

              nvidia.com/gpu specifies the number of GPUs to be requested. The value can be smaller than 1. For example, nvidia.com/gpu: 0.5 indicates that multiple pods share a GPU. In this case, all the requested GPU resources come from the same GPU card.

              +

              After nvidia.com/gpu is specified, workloads will not be scheduled to nodes without GPUs. If the node is GPU-starved, Kubernetes events similar to the following are reported:

              +
              • 0/2 nodes are available: 2 Insufficient nvidia.com/gpu.
              • 0/4 nodes are available: 1 InsufficientResourceOnSingleGPU, 3 Insufficient nvidia.com/gpu.
              +

              To use GPUs on the CCE console, select the GPU quota and specify the percentage of GPUs reserved for the container when creating a workload.

              +
              Figure 1 Using GPUs
              +
              +

              GPU Node Labels

              CCE will label GPU-enabled nodes after they are created. Different types of GPU-enabled nodes have different labels.

              +
              $ kubectl get node -L accelerator
              +NAME           STATUS   ROLES    AGE     VERSION                                    ACCELERATOR
              +10.100.2.179   Ready    <none>   8m43s   v1.19.10-r0-CCE21.11.1.B006-21.11.1.B006   nvidia-t4
              +

              When using GPUs, you can enable the affinity between pods and nodes based on labels so that the pods can be scheduled to the correct nodes.

              +
              apiVersion: apps/v1
              +kind: Deployment
              +metadata:
              +  name: gpu-test
              +  namespace: default
              +spec:
              +  replicas: 1
              +  selector:
              +    matchLabels:
              +      app: gpu-test
              +  template:
              +    metadata:
              +      labels:
              +        app: gpu-test
              +    spec:
              +      nodeSelector:
              +        accelerator: nvidia-t4
              +      containers:
              +      - image: nginx:perl
              +        name: container-0
              +        resources:
              +          requests:
              +            cpu: 250m
              +            memory: 512Mi
              +            nvidia.com/gpu: 1   # Number of requested GPUs
              +          limits:
              +            cpu: 250m
              +            memory: 512Mi
              +            nvidia.com/gpu: 1   # Maximum number of GPUs that can be used
              +      imagePullSecrets:
              +      - name: default-secret
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0348.html b/docs/cce/umn/cce_10_0348.html new file mode 100644 index 00000000..798d9c12 --- /dev/null +++ b/docs/cce/umn/cce_10_0348.html @@ -0,0 +1,25 @@ + + +

              Maximum Number of Pods That Can Be Created on a Node

              +

              The maximum number of pods that can be created on a node is determined by the following parameters:

              +
              • Number of container IP addresses that can be allocated on a node (alpha.cce/fixPoolMask): Set this parameter when creating a CCE cluster. This parameter is available only when Network Model is VPC network.
              • Maximum number of pods of a node (maxPods): Set this parameter when creating a node. It is a configuration item of kubelet.
              • Number of ENIs of a CCE Turbo cluster node: In a CCE Turbo cluster, ECS nodes use sub-ENIs and BMS nodes use ENIs. The maximum number of pods that can be created on a node depends on the number of ENIs that can be used by the node.
              +

              The maximum number of pods that can be created on a node depends on the minimum value of these parameters.

              + +

              Container Network vs. Host Network

              When creating a pod, you can select the container network or host network for the pod.

              +
              • Container network (default): Each pod is assigned an IP address by the cluster networking add-ons, which occupies the IP addresses of the container network.
              • Host network: The pod uses the host network (hostNetwork: true needs to be configured for the pod) and occupies the host port. The pod IP address is the host IP address. The pod does not occupy the IP addresses of the container network. To use the host network, you must confirm whether the container ports conflict with the host ports. Do not use the host network unless you know exactly which host port is used by which container.
              +
              +

              Number of Container IP Addresses That Can Be Allocated on a Node

              If you select VPC network for Network Model when creating a CCE cluster, you also need to set the number of container IP addresses that can be allocated to each node.

              +

              This parameter affects the maximum number of pods that can be created on a node. Each pod occupies an IP address (when the container network is used). If the number of available IP addresses is insufficient, pods cannot be created.

              +

              +

              By default, a node occupies three container IP addresses (network address, gateway address, and broadcast address). Therefore, the number of container IP addresses that can be allocated to a node equals the number of selected container IP addresses minus 3. For example, in the preceding figure, the number of container IP addresses that can be allocated to a node is 125 (128 – 3).

              +
              +

              Maximum Number of Pods on a Node

              When creating a node, you can configure the maximum number of pods that can be created on the node. This parameter is a configuration item of kubelet and determines the maximum number of pods that can be created by kubelet.

              +

              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0349.html b/docs/cce/umn/cce_10_0349.html new file mode 100644 index 00000000..52e1c092 --- /dev/null +++ b/docs/cce/umn/cce_10_0349.html @@ -0,0 +1,25 @@ + + +

              Comparing iptables and IPVS

              +

              kube-proxy is a key component of a Kubernetes cluster. It is responsible for load balancing and forwarding between a Service and its backend pod.

              +

              CCE supports two forwarding modes: iptables and IPVS.

              +
              • IPVS allows higher throughput and faster forwarding. This mode applies to scenarios where the cluster scale is large or the number of Services is large.
              • iptables is the traditional kube-proxy mode. This mode applies to the scenario where the number of Services is small or a large number of short connections are concurrently sent on the client.
              +

              Notes and Constraints

              In a cluster using the IPVS proxy mode, if the ingress and Service use the same ELB load balancer, the ingress cannot be accessed from the nodes and containers in the cluster because kube-proxy mounts the LoadBalancer Service address to the ipvs-0 bridge. This bridge intercepts the traffic of the load balancer connected to the ingress. You are advised to use different ELB load balancers for the ingress and Service.

              +
              +

              iptables

              iptables is a Linux kernel function that provides a large amount of data packet processing and filtering capabilities. It allows flexible sequences of rules to be attached to various hooks in the packet processing pipeline. When iptables is used, kube-proxy implements NAT and load balancing in the NAT pre-routing hook.

              +

              kube-proxy is an O(n) algorithm, in which n increases with the cluster scale. The cluster scale refers to the number of Services and backend pods.

              +
              +

              IPVS

              IP Virtual Server (IPVS) is constructed on top of Netfilter and implements transport-layer load balancing as part of the Linux kernel. IPVS can direct requests for TCP- and UDP-based services to the real servers, and make services of the real servers appear as virtual services on a single IP address.

              +

              In the IPVS mode, kube-proxy uses IPVS load balancing instead of iptables. IPVS is designed to balance loads for a large number of Services. It has a set of optimized APIs and uses optimized search algorithms instead of simply searching for rules from a list.

              +
              +

              The complexity of the connection process of IPVS-based kube-proxy is O(1). In other words, in most cases, the connection processing efficiency is irrelevant to the cluster scale.

              +

              IPVS involves multiple load balancing algorithms, such as round-robin, shortest expected delay, least connections, and various hashing methods. However, iptables has only one algorithm for random selection.

              +

              Compared with iptables, IPVS has the following advantages:

              +
              1. Provides better scalability and performance for large clusters.
              2. Supports better load balancing algorithms than iptables.
              3. Supports functions including server health check and connection retries.
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0351.html b/docs/cce/umn/cce_10_0351.html new file mode 100644 index 00000000..185ae921 --- /dev/null +++ b/docs/cce/umn/cce_10_0351.html @@ -0,0 +1,51 @@ + + +

              Binding CPU Cores

              +

              By default, kubelet uses CFS quotas to enforce pod CPU limits. When the node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether the pod is throttled and which CPU cores are available at scheduling time. Many workloads are not sensitive to this migration and thus work fine without any intervention. Some applications are CPU-sensitive. They are sensitive to:

              +
              • CPU throttling
              • Context switching
              • Processor cache misses
              • Cross-socket memory access
              • Hyperthreads that are expected to run on the same physical CPU card
              +

              If your workloads are sensitive to any of these items and CPU cache affinity and scheduling latency significantly affect workload performance, kubelet allows alternative CPU management policies to determine some placement preferences on the node. The CPU manager preferentially allocates resources on a socket and full physical cores to avoid interference.

              +

              Binding CPU Cores to a Pod

              Prerequisites:

              +
              • The static core binding policy is enabled on the node. For details, see Enabling the CPU Management Policy.
              • Both requests and limits must be set in the pod definition and their values must be the same.
              • The value of requests must be an integer for the container.
              • For an init container, it is recommended that you set its requests to the same as that of the service container. Otherwise, the service container does not inherit the CPU allocation result of the init container, and the CPU manager reserves more CPU resources than supposed. For more information, see App Containers can't inherit Init Containers CPUs - CPU Manager Static Policy.
              +

              You can use Scheduling Policy (Affinity/Anti-affinity) to schedule the configured pods to the nodes where the static CPU policy is enabled. In this way, cores can be bound.

              +
              +

              Enabling the CPU Management Policy

              A CPU management policy is specified by the kubelet flag --cpu-manager-policy. The following policies are supported:

              +
              • Disabled (none): the default policy. The none policy explicitly enables the existing default CPU affinity scheme, providing no affinity beyond what the OS scheduler does automatically.
              • Enabled (static): The static policy allows containers in Guaranteed pods with integer CPU requests to be granted increased CPU affinity and exclusivity on the node.
              +

              When creating a cluster, you can configure the CPU management policy in Advanced Settings, as shown in the following figure.

              +

              +

              You can also configure the policy in a node pool. The configuration will change the kubelet flag --cpu-manager-policy on the node. Log in to the CCE console, click the cluster name, access the cluster details page, and choose Nodes in the navigation pane. On the page displayed, click the Node Pools tab. Choose More > Manage in the Operation column of the target node pool, and change the value of cpu-manager-policy to static.

              +
              +

              Pod Configuration

              For CPU, both requests and limits must be set to the same, and requests must be an integer.

              +
              kind: Deployment
              +apiVersion: apps/v1
              +metadata:
              +  name: test
              +spec:
              +  replicas: 1
              +  selector:
              +    matchLabels:
              +      app: test
              +  template:
              +    metadata:
              +      labels:
              +        app: test
              +    spec:
              +      containers:
              +        - name: container-1
              +          image: nginx:alpine
              +          resources:
              +            requests:
              +              cpu: 2           # The value must be an integer and must be the same as that in limits.
              +              memory: 2048Mi
              +            limits:
              +              cpu: 2           # The value must be an integer and must be the same as that in requests.
              +              memory: 2048Mi
              +      imagePullSecrets:
              +        - name: default-secret
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0352.html b/docs/cce/umn/cce_10_0352.html new file mode 100644 index 00000000..95cdd682 --- /dev/null +++ b/docs/cce/umn/cce_10_0352.html @@ -0,0 +1,86 @@ + + +

              Managing Node Taints

              +

              Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.

              +

              Taints

              A taint is a key-value pair associated with an effect. The following effects are available:

              +
              • NoSchedule: No pod will be able to schedule onto the node unless it has a matching toleration. Existing pods will not be evicted from the node.
              • PreferNoSchedule: Kubernetes prevents pods that cannot tolerate this taint from being scheduled onto the node.
              • NoExecute: If the pod has been running on a node, the pod will be evicted from the node. If the pod has not been running on a node, the pod will not be scheduled onto the node.
              +

              To add a taint to a node, run the kubectl taint node nodename command as follows:

              +
              $ kubectl get node
              +NAME             STATUS   ROLES    AGE    VERSION
              +192.168.10.170   Ready    <none>   73d    v1.19.8-r1-CCE21.4.1.B003
              +192.168.10.240   Ready    <none>   4h8m   v1.19.8-r1-CCE21.6.1.2.B001
              +$ kubectl taint node 192.168.10.240 key1=value1:NoSchedule
              +node/192.168.10.240 tainted
              +

              To view the taint configuration, run the describe and get commands as follows:

              +
              $ kubectl describe node 192.168.10.240
              +Name:               192.168.10.240
              +...
              +Taints:             key1=value1:NoSchedule
              +...
              +$ kubectl get node 192.168.10.240 -oyaml
              +apiVersion: v1
              +...
              +spec:
              +  providerID: 06a5ea3a-0482-11ec-8e1a-0255ac101dc2
              +  taints:
              +  - effect: NoSchedule
              +    key: key1
              +    value: value1
              +...
              +

              To remove a taint, run the following command with a hyphen (-) added after NoSchedule:

              +
              $ kubectl taint node 192.168.10.240 key1=value1:NoSchedule-
              +node/192.168.10.240 untainted
              +$ kubectl describe node 192.168.10.240
              +Name:               192.168.10.240
              +...
              +Taints:             <none>
              +...
              +

              On the CCE console, you can also manage taints of a node in batches.

              +
              1. Log in to the CCE console.
              2. Click the cluster name, access the cluster details page, and choose Nodes in the navigation pane. On the page displayed, select a node and click Manage Labels and Taints.
              3. In the displayed dialog box, click Add batch operations under Batch Operation, choose Add/Update, and select Taint.

                Enter the key and value of the taint to be added, select the taint effect, and click OK.

                +

              4. After the taint is added, check the added taint in node data.
              +
              +

              Node Scheduling Settings

              To configure scheduling settings, log in to the CCE console, click the cluster, choose Nodes in the navigation pane, and click More > Disable Scheduling in the Operation column of a node in the node list.

              +

              +

              In the dialog box that is displayed, click OK to set the node to be unschedulable.

              +

              +

              This operation will add a taint to the node. You can use kubectl to view the content of the taint.

              +
              $ kubectl describe node 192.168.10.240
              +...
              +Taints:             node.kubernetes.io/unschedulable:NoSchedule
              +...
              +

              On the CCE console, perform the same operations again to remove the taint and set the node to be schedulable.

              +

              +
              +

              Tolerations

              Tolerations are applied to pods, and allow (but do not require) the pods to schedule onto nodes with matching taints.

              +

              Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. One or more taints are applied to a node. This marks that the node should not accept any pods that do not tolerate the taints.

              +

              Here's an example of a pod that uses tolerations:

              +
              apiVersion: v1
              +kind: Pod
              +metadata:
              +  name: nginx
              +  labels:
              +    env: test
              +spec:
              +  containers:
              +  - name: nginx
              +    image: nginx
              +    imagePullPolicy: IfNotPresent
              +  tolerations:
              +  - key: "key1"
              +    operator: "Equal"
              +    value: "value1"
              +    effect: "NoSchedule"  
              +

              In the preceding example, the toleration label of the pod is key1=value1 and the taint effect is NoSchedule. Therefore, the pod can be scheduled onto the corresponding node.

              +

              You can also configure tolerations similar to the following information, which indicates that the pod can be scheduled onto a node when the node has the taint key1:

              +
              tolerations:
              +- key: "key1"
              +  operator: "Exists"
              +  effect: "NoSchedule"
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0353.html b/docs/cce/umn/cce_10_0353.html new file mode 100644 index 00000000..5b15aa34 --- /dev/null +++ b/docs/cce/umn/cce_10_0353.html @@ -0,0 +1,35 @@ + + +

              Configuring an Image Pull Policy

              +

              When a workload is created, the container image is pulled from the image repository to the node. The image is also pulled when the workload is restarted or upgraded.

              +

              By default, imagePullPolicy is set to IfNotPresent, indicating that if the image exists on the node, the existing image is used. If the image does not exist on the node, the image is pulled from the image repository.

              +

              The image pull policy can also be set to Always, indicating that the image is pulled from the image repository and overwrites the image on the node regardless of whether the image exists on the node.

              +
              apiVersion: v1
              +kind: Pod 
              +metadata:
              +  name: nginx 
              +spec: 
              +  containers:
              +  - image: nginx:alpine 
              +    name: container-0 
              +    resources:
              +      limits:
              +        cpu: 100m
              +        memory: 200Mi
              +      requests:
              +        cpu: 100m
              +        memory: 200Mi
              +    imagePullPolicy: Always
              +  imagePullSecrets:                 
              +  - name: default-secret
              +

              You can also set the image pull policy when creating a workload on the CCE console. As shown in the following figure, if you select Always, the image is always pulled. If you do not select it, the policy will be IfNotPresent, which means that the image is not pulled.

              +

              +

              You are advised to use a new tag each time you create an image. If you do not update the tag but only update the image, when Pull Policy is set to IfNotPresent, CCE considers that an image with the tag already exists on the current node and will not pull the image again.

              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0354.html b/docs/cce/umn/cce_10_0354.html new file mode 100644 index 00000000..7380d8ce --- /dev/null +++ b/docs/cce/umn/cce_10_0354.html @@ -0,0 +1,43 @@ + + +

              Configuring Time Zone Synchronization

              +

              When creating a workload, you can configure containers to use the same time zone as the node. You can enable time zone synchronization when creating a workload.

              +

              +

              The time zone synchronization function depends on the local disk (hostPath) mounted to the container. After time zone synchronization is enabled, /etc/localtime of the node is mounted to /etc/localtime of the container in HostPath mode, in this way, the node and container use the same time zone configuration file.

              +
              kind: Deployment
              +apiVersion: apps/v1
              +metadata:
              +  name: test
              +  namespace: default
              +spec:
              +  replicas: 2
              +  selector:
              +    matchLabels:
              +      app: test
              +  template:
              +    metadata:
              +      labels:
              +        app: test
              +    spec:
              +      volumes:
              +        - name: vol-162979628557461404
              +          hostPath:
              +            path: /etc/localtime
              +            type: ''
              +      containers:
              +        - name: container-0
              +          image: 'nginx:alpine'
              +          volumeMounts:
              +            - name: vol-162979628557461404
              +              readOnly: true
              +              mountPath: /etc/localtime
              +          imagePullPolicy: IfNotPresent
              +      imagePullSecrets:
              +        - name: default-secret
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0359.html b/docs/cce/umn/cce_10_0359.html new file mode 100644 index 00000000..4ee73a81 --- /dev/null +++ b/docs/cce/umn/cce_10_0359.html @@ -0,0 +1,23 @@ + + + +

              DNS

              + +

              +
              + + + diff --git a/docs/cce/umn/cce_10_0360.html b/docs/cce/umn/cce_10_0360.html new file mode 100644 index 00000000..918f84d1 --- /dev/null +++ b/docs/cce/umn/cce_10_0360.html @@ -0,0 +1,35 @@ + + +

              Overview

              +

              Introduction to CoreDNS

              When you create a cluster, the coredns add-on is installed to resolve domain names in the cluster.

              +

              You can view the pod of the coredns add-on in the kube-system namespace.

              +
              $ kubectl get po --namespace=kube-system
              +NAME                                      READY   STATUS    RESTARTS   AGE
              +coredns-7689f8bdf-295rk                   1/1     Running   0          9m11s
              +coredns-7689f8bdf-h7n68                   1/1     Running   0          11m
              +

              After coredns is installed, it becomes a DNS. After the Service is created, coredns records the Service name and IP address. In this way, the pod can obtain the Service IP address by querying the Service name from coredns.

              +

              nginx.<namespace>.svc.cluster.local is used to access the Service. nginx is the Service name, <namespace> is the namespace, and svc.cluster.local is the domain name suffix. In actual use, you can omit <namespace>.svc.cluster.local in the same namespace and use the ServiceName.

              +

              An advantage of using ServiceName is that you can write ServiceName into the program when developing the application. In this way, you do not need to know the IP address of a specific Service.

              +

              After the coredns add-on is installed, there is also a Service in the kube-system namespace, as shown below.

              +
              $ kubectl get svc -n kube-system
              +NAME               TYPE           CLUSTER-IP      EXTERNAL-IP                    PORT(S)                      AGE
              +coredns            ClusterIP      10.247.3.10     <none>                         53/UDP,53/TCP,8080/TCP       13d
              +

              By default, after other pods are created, the address of the coredns Service is written as the address of the domain name resolution server in the /etc/resolv.conf file of the pod. Create a pod and view the /etc/resolv.conf file as follows:

              +
              $ kubectl exec test01-6cbbf97b78-krj6h -it -- /bin/sh
              +/ # cat /etc/resolv.conf
              +nameserver 10.247.3.10
              +search default.svc.cluster.local svc.cluster.local cluster.local
              +options ndots:5 timeout single-request-reopen
              +

              When a user accesses the Service name:Port of the Nginx pod, the IP address of the Nginx Service is resolved from CoreDNS, and then the IP address of the Nginx Service is accessed. In this way, the user can access the backend Nginx pod.

              +
              Figure 1 Example of domain name resolution in a cluster
              +
              +

              Related Operations

              You can also configure DNS in a workload. For details, see DNS Configuration.

              +

              You can also use coredns to implement user-defined domain name resolution. For details, see Using CoreDNS for Custom Domain Name Resolution.

              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0361.html b/docs/cce/umn/cce_10_0361.html new file mode 100644 index 00000000..de760df9 --- /dev/null +++ b/docs/cce/umn/cce_10_0361.html @@ -0,0 +1,185 @@ + + +

              Using CoreDNS for Custom Domain Name Resolution

              +

              Challenges

              When using CCE, you may need to resolve custom internal domain names in the following scenarios:

              +
              • In the legacy code, a fixed domain name is configured for calling other internal services. If the system decides to use Kubernetes Services, the code refactoring workload could be heavy.
              • A service is created outside the cluster. Data in the cluster needs to be sent to the service through a fixed domain name.
              +
              +

              Solution

              There are several CoreDNS-based solutions for custom domain name resolution:

              + +
              +

              Precautions

              Improper modification on CoreDNS configuration may cause domain name resolution failures in the cluster. Perform tests before and after the modification.

              +
              +

              Configuring the Stub Domain for CoreDNS

              Cluster administrators can modify the ConfigMap for the CoreDNS Corefile to change how service discovery works.

              +

              Assume that a cluster administrator has a Consul DNS server located at 10.150.0.1 and all Consul domain names have the suffix .consul.local.

              +
              1. Log in to the CCE console and access the cluster console.
              2. In the navigation pane, choose Add-ons. On the displayed page, click Edit under CoreDNS.
              3. Add a stub domain in the Parameters area.

                Modify the stub_domains parameter in the format of a key-value pair. The key is a DNS suffix domain name, and the value is a DNS IP address or a group of DNS IP addresses.
                {
                +	"stub_domains": {
                +                "consul.local": [
                +			"10.150.0.1"
                +		]
                +	},
                +	"upstream_nameservers": []
                +}
                +
                +

              4. Click OK.
              +

              You can also modify the ConfigMap as follows:

              +
              $ kubectl edit configmap coredns -n kube-system
              +apiVersion: v1
              +data:
              +  Corefile: |-
              +    .:5353 {
              +        bind {$POD_IP}
              +        cache 30
              +        errors
              +        health {$POD_IP}:8080
              +        kubernetes cluster.local in-addr.arpa ip6.arpa {
              +            pods insecure
              +            fallthrough in-addr.arpa ip6.arpa
              +        }
              +        loadbalance round_robin
              +        prometheus {$POD_IP}:9153
              +        forward . /etc/resolv.conf {
              +            policy random
              +        }
              +        reload
              +    }
              +
              +    consul.local:5353 {
              +        bind {$POD_IP}
              +        errors
              +        cache 30
              +        forward . 10.150.0.1
              +    }
              +kind: ConfigMap
              +metadata:
              +  creationTimestamp: "2022-05-04T04:42:24Z"
              +  labels:
              +    app: coredns
              +    k8s-app: coredns
              +    kubernetes.io/cluster-service: "true"
              +    kubernetes.io/name: CoreDNS
              +    release: cceaddon-coredns
              +  name: coredns
              +  namespace: kube-system
              +  resourceVersion: "8663493"
              +  uid: bba87142-9f8d-4056-b8a6-94c3887e9e1d
              +
              +

              Modifying the CoreDNS Hosts Configuration File

              1. Use kubectl to connect to the cluster.
              2. Modify the CoreDNS configuration file and add the custom domain name to the hosts file.

                Point www.example.com to 192.168.1.1. When CoreDNS resolves www.example.com, 192.168.1.1 is returned.

                +

                The fallthrough field must be configured. fallthrough indicates that when the domain name to be resolved cannot be found in the hosts file, the resolution task is transferred to the next CoreDNS plug-in. If fallthrough is not specified, the task ends and the domain name resolution stops. As a result, the domain name resolution in the cluster fails.

                +

                For details about how to configure the hosts file, visit https://coredns.io/plugins/hosts/.

                +
                +
                $ kubectl edit configmap coredns -n kube-system
                +apiVersion: v1
                +data:
                +  Corefile: |-
                +    .:5353 {
                +        bind {$POD_IP}
                +        cache 30
                +        errors
                +        health {$POD_IP}:8080
                +        kubernetes cluster.local in-addr.arpa ip6.arpa {
                +          pods insecure
                +          fallthrough in-addr.arpa ip6.arpa
                +        }
                +        hosts {
                +          192.168.1.1 www.example.com
                +          fallthrough
                +        }
                +        loadbalance round_robin
                +        prometheus {$POD_IP}:9153
                +        forward . /etc/resolv.conf
                +        reload
                +    }
                +kind: ConfigMap
                +metadata:
                +  creationTimestamp: "2021-08-23T13:27:28Z"
                +  labels:
                +    app: coredns
                +    k8s-app: coredns
                +    kubernetes.io/cluster-service: "true"
                +    kubernetes.io/name: CoreDNS
                +    release: cceaddon-coredns
                +  name: coredns
                +  namespace: kube-system
                +  resourceVersion: "460"
                +  selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
                +  uid: be64aaad-1629-441f-8a40-a3efc0db9fa9
                +

                After modifying the hosts file in CoreDNS, you do not need to configure the hosts file in each pod.

                +

              +
              +

              Adding the CoreDNS Rewrite Configuration to Point the Domain Name to Services in the Cluster

              Use the Rewrite plug-in of CoreDNS to resolve a specified domain name to the domain name of a Service.

              +
              1. Use kubectl to connect to the cluster.
              2. Modify the CoreDNS configuration file to point example.com to the example service in the default namespace.

                $ kubectl edit configmap coredns -n kube-system
                +apiVersion: v1
                +data:
                +  Corefile: |-
                +    .:5353 {
                +        bind {$POD_IP}
                +        cache 30
                +        errors
                +        health {$POD_IP}:8080
                +        kubernetes cluster.local in-addr.arpa ip6.arpa {
                +          pods insecure
                +          fallthrough in-addr.arpa ip6.arpa
                +        }
                +        rewrite name example.com example.default.svc.cluster.local
                +        loadbalance round_robin
                +        prometheus {$POD_IP}:9153
                +        forward . /etc/resolv.conf
                +        reload
                +    }
                +kind: ConfigMap
                +metadata:
                +  creationTimestamp: "2021-08-23T13:27:28Z"
                +  labels:
                +    app: coredns
                +    k8s-app: coredns
                +    kubernetes.io/cluster-service: "true"
                +    kubernetes.io/name: CoreDNS
                +    release: cceaddon-coredns
                +  name: coredns
                +  namespace: kube-system
                +  resourceVersion: "460"
                +  selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
                +  uid: be64aaad-1629-441f-8a40-a3efc0db9fa9
                +

              +
              +

              Using CoreDNS to Cascade Self-Built DNS

              1. Use kubectl to connect to the cluster.
              2. Modify the CoreDNS configuration file and change /etc/resolv.conf following forward to the IP address of the external DNS server.

                $ kubectl edit configmap coredns -n kube-system
                +apiVersion: v1
                +data:
                +  Corefile: |-
                +    .:5353 {
                +        bind {$POD_IP}
                +        cache 30
                +        errors
                +        health {$POD_IP}:8080
                +        kubernetes cluster.local in-addr.arpa ip6.arpa {
                +          pods insecure
                +          fallthrough in-addr.arpa ip6.arpa
                +        }
                +        loadbalance round_robin
                +        prometheus {$POD_IP}:9153
                +        forward . 192.168.1.1
                +        reload
                +    }
                +kind: ConfigMap
                +metadata:
                +  creationTimestamp: "2021-08-23T13:27:28Z"
                +  labels:
                +    app: coredns
                +    k8s-app: coredns
                +    kubernetes.io/cluster-service: "true"
                +    kubernetes.io/name: CoreDNS
                +    release: cceaddon-coredns
                +  name: coredns
                +  namespace: kube-system
                +  resourceVersion: "460"
                +  selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
                +  uid: be64aaad-1629-441f-8a40-a3efc0db9fa9
                +

              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0363.html b/docs/cce/umn/cce_10_0363.html new file mode 100644 index 00000000..44f8d147 --- /dev/null +++ b/docs/cce/umn/cce_10_0363.html @@ -0,0 +1,197 @@ + + +

              Creating a Node

              +

              Prerequisites

              • At least one cluster has been created.
              • A key pair has been created for identity authentication upon remote node login.
              +
              +

              Notes and Constraints

              • The node has 2-core or higher CPU, 4 GB or larger memory.
              • To ensure node stability, a certain amount of CCE node resources will be reserved for Kubernetes components (such as kubelet, kube-proxy, and docker) based on the node specifications. Therefore, the total number of node resources and assignable node resources in Kubernetes are different. The larger the node specifications, the more the containers deployed on the node. Therefore, more node resources need to be reserved to run Kubernetes components. For details, see Formula for Calculating the Reserved Resources of a Node.
              • The node networking (such as the VM networking and container networking) is taken over by CCE. You are not allowed to add and delete NICs or change routes. If you modify the networking configuration, the availability of CCE may be affected. For example, the NIC named gw_11cbf51a@eth0 on the node is the container network gateway and cannot be modified.
              • During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
              • Once a node is created, its AZ cannot be changed.
              +
              +

              Procedure

              After a cluster is created, you can create nodes for the cluster.

              +
              1. Log in to the CCE console. In the navigation pane, choose Clusters. Click the target cluster name to access its details page.
              2. In the navigation pane on the left, choose Nodes. On the page displayed, click Create Node. In the Node Settings step, set node parameters by referring to the following table.

                Compute Settings

                +
                You can configure the specifications and OS of a cloud server, on which your containerized applications run. +
                + + + + + + + + + + + + + + + + + + + + + + + + + +
                Table 1 Configuration parameters

                Parameter

                +

                Description

                +

                AZ

                +

                AZ where the node is located. Nodes in a cluster can be created in different AZs for higher reliability. The value cannot be changed after the node is created.

                +

                You are advised to select Random to deploy your node in a random AZ based on the selected node flavor.

                +

                An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network. To enhance workload availability, create nodes in different AZs.

                +

                Node Type

                +

                CCE clusters support Elastic Cloud Servers (ECSs).

                +

                CCE Turbo clusters support Elastic Cloud Servers (ECSs) and bare metal servers (BMSs).

                +

                Container Engine

                +

                CCE clusters support Docker and containerd in some scenarios.

                +
                • VPC network clusters of v1.23 and later versions support containerd. Container tunnel network clusters of v1.23.2-r0 and later versions support containerd.
                • For a CCE Turbo cluster, both Docker and containerd are supported. For details, see Mapping between Node OSs and Container Engines.
                +

                Specifications

                +

                Select the node specifications based on service requirements. The available node specifications vary depending on AZs.

                +

                OS

                +

                Select an OS type. Different types of nodes support different OSs.

                +

                Public image: Select an OS for the node.

                +

                Private image: You can use private images.

                +

                Node Name

                +

                Name of the node. When nodes (ECSs) are created in batches, the value of this parameter is used as the name prefix for each ECS.

                +

                The system generates a default name for you, which can be modified.

                +

                A node name must start with a lowercase letter and cannot end with a hyphen (-). Only digits, lowercase letters, and hyphens (-) are allowed.

                +

                Login Mode

                +
                • Key Pair

                  Select the key pair used to log in to the node. You can select a shared key.

                  +

                  A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create Key Pair.

                  +
                +
                +
                +
                +

                Storage Settings

                +
                Configure storage resources on a node for the containers running on it. Set the disk size according to site requirements. +
                + + + + + + + + + + +
                Table 2 Configuration parameters

                Parameter

                +

                Description

                +

                System Disk

                +

                System disk used by the node OS. The value ranges from 40 GB to 1,024 GB. The default value is 50 GB.

                +
                Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function. This function is available only in certain regions.
                • Encryption is not selected by default.
                • After you select Encryption, you can select an existing key in the displayed dialog box. If no key is available, click View Key List to create a key. After the key is created, click the refresh icon.
                +
                +

                Data Disk

                +

                Data disk used by the container runtime and kubelet on the node.

                +

                At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

                +

                Click Expand to set the following parameters:

                +
                • Allocate Disk Space: Select this option to define the disk space occupied by the container runtime to store the working directories, container image data, and image metadata. For details about how to allocate data disk space, see Data Disk Space Allocation.
                • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function. This function is available only in certain regions.
                  • Encryption is not selected by default.
                  • After you select Encryption, you can select an existing key in the displayed dialog box. If no key is available, click View Key List to create a key. After the key is created, click the refresh icon.
                  +
                +

                Adding Multiple Data Disks

                +

                A maximum of four data disks can be added. By default, raw disks are created without any processing. You can also click Expand and select any of the following options:

                +
                • Default: By default, a raw disk is created without any processing.
                • Mount Disk: The data disk is attached to a specified directory.
                +

                Local Disk Description

                +

                If the node flavor is disk-intensive or ultra-high I/O, one data disk can be a local disk.

                +

                Local disks may break down and do not ensure data reliability. It is recommended that you store service data in EVS disks, which are more reliable than local disks.

                +
                +
                +
                +

                Network Settings

                +
                Configure networking resources to allow node and containerized application access. +
                + + + + + + + + + + +
                Table 3 Configuration parameters

                Parameter

                +

                Description

                +

                Node Subnet

                +

                The node subnet selected during cluster creation is used by default. You can choose another subnet instead.

                +

                Node IP Address

                +

                IP address of the specified node. By default, the value is randomly allocated.

                +
                +
                +
                +

                Advanced Settings

                +
                Configure advanced node capabilities such as labels, taints, and startup command. +
                + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                Table 4 Advanced configuration parameters

                Parameter

                +

                Description

                +

                Kubernetes Label

                +

                Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

                +

                Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

                +

                Resource Tag

                +

                You can add resource tags to classify resources.

                +

                You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use these tags to improve tagging and resource migration efficiency.

                +

                CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

                +

                Taint

                +
                This parameter is left blank by default. You can add taints to set anti-affinity for the node. A maximum of 10 taints are allowed for each node. Each taint contains the following parameters:
                • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
                • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
                • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
                +
                +

                For details, see Managing Node Taints.

                +
                NOTE:

                For a cluster of v1.19 or earlier, the workload may have been scheduled to a node before the taint is added. To avoid such a situation, select a cluster of v1.19 or later.

                +
                +

                Max. Pods

                +

                Maximum number of pods that can run on the node, including the default system pods.

                +

                This limit prevents the node from being overloaded with pods.

                +

                This number is also decided by other factors. For details, see Maximum Number of Pods That Can Be Created on a Node.

                +

                ECS Group

                +

                An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.

                +

                Anti-affinity: ECSs in an ECS group are deployed on different physical hosts to improve service reliability.

                +

                Select an existing ECS group, or click Add ECS Group to create one. After the ECS group is created, click the refresh button.

                +

                Pre-installation Command

                +

                Enter commands. A maximum of 1,000 characters are allowed.

                +

                The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

                +

                Post-installation Command

                +

                Enter commands. A maximum of 1,000 characters are allowed.

                +

                The script will be executed after Kubernetes software is installed and will not affect the installation.

                +

                Agency

                +

                An agency is created by the account administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources.

                +

                If no agency is available, click Create Agency on the right to create one.

                +
                +
                +
                +

              3. Click Next: Confirm. Confirm the configured parameters, specifications.
              4. Click Submit.

                The node list page is displayed. If the node status is Running, the node is created successfully. It takes about 6 to 10 minutes to create a node.

                +

              5. Click Back to Node List. The node is created successfully if it changes to the Running state.
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0365.html b/docs/cce/umn/cce_10_0365.html new file mode 100644 index 00000000..70278765 --- /dev/null +++ b/docs/cce/umn/cce_10_0365.html @@ -0,0 +1,237 @@ + + +

              DNS Configuration

              +

              Every Kubernetes cluster has a built-in DNS add-on (Kube-DNS or CoreDNS) to provide domain name resolution for workloads in the cluster. When handling a high concurrency of DNS queries, Kube-DNS/CoreDNS may encounter a performance bottleneck, that is, it may fail occasionally to fulfill DNS queries. There are cases when Kubernetes workloads initiate unnecessary DNS queries. This makes DNS overloaded if there are many concurrent DNS queries. Tuning DNS configuration for workloads will reduce the risks of DNS query failures to some extent.

              +

              For more information about DNS, see coredns (System Resource Add-On, Mandatory).

              +

              DNS Configuration Items

              Run the cat /etc/resolv.conf command on a Linux node or container to view the DNS resolver configuration file. The following is an example DNS resolver configuration of a container in a Kubernetes cluster:
              nameserver 10.247.x.x
              +search default.svc.cluster.local svc.cluster.local cluster.local
              +options ndots:5
              +
              +
              Configuration Options
              • nameserver: an IP address list of a name server that the resolver will query. If this parameter is set to 10.247.x.x, the resolver will query the kube-dns/CoreDNS. If this parameter is set to another IP address, the resolver will query a cloud or on-premises DNS server.
              • search: a search list for host-name lookup. When a domain name cannot be resolved, DNS queries will be attempted combining the domain name with each domain in the search list in turn until a match is found or all domains in the search list are tried. For CCE clusters, the search list is currently limited to three domains per container. When a nonexistent domain name is being resolved, eight DNS queries will be initiated because each domain name (including those in the search list) will be queried twice, one for IPv4 and the other for IPv6.
              • options: options that allow certain internal resolver variables to be modified. Common options include timeout and ndots.

                The value ndots:5 means that if a domain name has fewer than 5 dots (.), DNS queries will be attempted by combining the domain name with each domain in the search list in turn. If no match is found after all the domains in the search list are tried, the domain name is then used for DNS query. If the domain name has 5 or more than 5 dots, it will be tried first for DNS query. In case that the domain name cannot be resolved, DNS queries will be attempted by combining the domain name with each domain in the search list in turn.

                +

                For example, the domain name www.***.com has only two dots (smaller than the value of ndots), and therefore the sequence of DNS queries is as follows: www.***.default.svc.cluster.local, www.***.com.svc.cluster.local, www.***.com.cluster.local, and www.***.com. This means that at least seven DNS queries will be initiated before the domain name is resolved into an IP address. It is clear that when many unnecessary DNS queries will be initiated to access an external domain name. There is room for improvement in workload's DNS configuration.

                +
              +
              +

              For more information about configuration options in the resolver configuration file used by Linux operating systems, visit http://man7.org/linux/man-pages/man5/resolv.conf.5.html.

              +
              +
              +

              Configuring DNS Using the Workload YAML

              When creating a workload using a YAML file, you can configure the DNS settings in the YAML. The following is an example for an Nginx application:
              apiVersion: apps/v1
              +kind: Deployment
              +metadata:
              +  name: nginx
              +  namespace: default
              +spec:
              +  replicas: 1
              +  selector:
              +    matchLabels:
              +      app: nginx
              +  template:
              +    metadata:
              +      labels:
              +        app: nginx
              +    spec:
              +      containers:
              +        - name: container-1
              +          image: nginx:latest
              +          imagePullPolicy: IfNotPresent
              +      imagePullSecrets:
              +        - name: default-secret
              +      dnsPolicy: None
              +      dnsConfig:
              +        options:
              +          - name: ndots
              +            value: '5'
              +          - name: timeout
              +            value: '3'
              +        nameservers:
              +          - 10.2.3.4
              +        searches:
              +          - my.dns.search.suffix
              +
              +

              dnsPolicy

              +
              The dnsPolicy field is used to configure a DNS policy for an application. The default value is ClusterFirst. The DNS parameters in dnsConfig will be merged to the DNS file generated according to dnsPolicy. The merge rules are later explained in Table 2. Currently, dnsPolicy supports the following four values: +
              + + + + + + + + + + + + + + + + +
              Table 1 dnsPolicy

              Parameter

              +

              Description

              +

              ClusterFirst (default value)

              +

              CCE cluster's CoreDNS, which is cascaded with the cloud DNS by default, is used for workloads. Containers can resolve both the cluster-internal domain names registered by a Service and the external domain names exposed to public networks. The search list (search option) and ndots: 5 are present in the DNS configuration file. Therefore, when accessing an external domain name and a long cluster-internal domain name (for example, kubernetes.default.svc.cluster.local), the search list will usually be traversed first, resulting in at least six invalid DNS queries. The issue of invalid DNS queries disappears only when a short cluster-internal domain name (for example, kubernetes) is being accessed.

              +

              ClusterFirstWithHostNet

              +
              By default, the DNS configuration file that the --resolv-conf flag points to is configured for workloads running with hostNetwork=true, that is, a cloud DNS is used for CCE clusters. If workloads need to use Kube-DNS/CoreDNS of the cluster, set dnsPolicy to ClusterFirstWithHostNet and container's DNS configuration file is the same as ClusterFirst, in which invalid DNS queries still exist.
              ...
              +spec:
              +  containers:
              +  - image: nginx:latest
              +    imagePullPolicy: IfNotPresent
              +    name: container-1
              +  restartPolicy: Always
              +  hostNetwork: true
              +  dnsPolicy: ClusterFirstWithHostNet
              +
              +

              Default

              +

              Container's DNS configuration file is the DNS configuration file that the kubelet's --resolv-conf flag points to. In this case, a cloud DNS is used for CCE clusters. Both search and options fields are left unspecified. This configuration can only resolve the external domain names registered with the Internet, and not cluster-internal domain names. This configuration is free from the issue of invalid DNS queries.

              +

              None

              +

              If dnsPolicy is set to None, the dnsConfig field must be specified because all DNS settings are supposed to be provided using the dnsConfig field.

              +
              +
              +

              If the dnsPolicy field is not specified, the default value is ClusterFirst instead of Default.

              +
              +
              +

              dnsConfig

              +
              The dnsConfig field is used to configure DNS parameters for workloads. The configured parameters are merged to the DNS configuration file generated according to dnsPolicy. If dnsPolicy is set to None, the workload's DNS configuration file is specified by the dnsConfig field. If dnsPolicy is not set to None, the DNS parameters configured in dnsConfig are added to the DNS configuration file generated according to dnsPolicy. +
              + + + + + + + + + + + + + +
              Table 2 dnsConfig

              Parameter

              +

              Description

              +

              options

              +

              An optional list of objects where each object may have a name property (required) and a value property (optional). The contents in this property will be merged to the options generated from the specified DNS policy in dnsPolicy. Duplicate entries are removed.

              +

              nameservers

              +

              A list of IP addresses that will be used as DNS servers. If workload's dnsPolicy is set to None, the list must contain at least one IP address, otherwise this property is optional. The servers listed will be combined to the nameservers generated from the specified DNS policy in dnsPolicy with duplicate addresses removed.

              +

              searches

              +

              A list of DNS search domains for hostname lookup in the Pod. This property is optional. When specified, the provided list will be merged into the search domain names generated from the chosen DNS policy in dnsPolicy. Duplicate domain names are removed. Kubernetes allows for at most 6 search domains.

              +
              +
              +
              +
              +

              Configuring DNS for a Workload Using the Console

              Kubernetes provides DNS-related configuration options for applications. The use of application's DNS configuration can effectively reduce unnecessary DNS queries in certain scenarios and improve service concurrency. The following procedure uses an Nginx application as an example to describe how to add DNS configurations for a workload on the console.

              +
              1. Log in to the CCE console, access the cluster console, select Workloads in the navigation pane, and click Create Workload in the upper right corner.
              2. Configure basic information about the workload. For details, see Creating a Deployment.
              3. In the Advanced Settings area, click the DNS tab and set the following parameters as required:

                • DNS Policy: The DNS policies provided on the console correspond to the dnsPolicy field in the YAML file. For details, see Table 1.
                  • Supplement defaults: corresponds to dnsPolicy=ClusterFirst. Containers can resolve both the cluster-internal domain names registered by a Service and the external domain names exposed to public networks.
                  • Replace defaults: corresponds to dnsPolicy=None. You must configure IP Address and Search Domain. Containers only use the user-defined IP address and search domain configurations for domain name resolution.
                  • Inherit defaults: corresponds to dnsPolicy=Default. Containers use the domain name resolution configuration from the node that pods run on and cannot resolve the cluster-internal domain names.
                  +
                • Optional Objects: The options parameters in the dnsConfig field. Each object may have a name property (required) and a value property (optional). After setting the properties, click confirm to add.
                  • timeout: Timeout interval, in seconds.
                  • ndots: Number of dots (.) that must be present in a domain name. If a domain name has dots fewer than this value, the operating system will look up the name in the search domain. If not, the name is a fully qualified domain name (FQDN) and will be tried first as an absolute name.
                  +
                • IP Address: nameservers in the dnsConfig field. You can configure the domain name server for the custom domain name. The value is one or a group of DNS IP addresses.
                • Search Domain: searches in the dnsConfig field. A list of DNS search domains for hostname lookup in the pod. This property is optional. When specified, the provided list will be merged into the search domain names generated from the chosen DNS policy in dnsPolicy. Duplicate domain names are removed.
                +

                +

              4. Click Create Workload.
              +
              +

              Configuration Examples

              The following example describes how to configure DNS for workloads.

              +
              • Use Case 1: Using Kube-DNS/CoreDNS Built in Kubernetes Clusters

                Scenario

                +

                Kubernetes in-cluster Kube-DNS/CoreDNS is applicable to resolving only cluster-internal domain names or cluster-internal domain names + external domain names. This is the default DNS for workloads.

                +

                Example:

                +
                apiVersion: v1
                +kind: Pod
                +metadata:
                +  namespace: default
                +  name: dns-example
                +spec:
                +  containers:
                +  - name: test
                +    image: nginx:alpine
                +  dnsPolicy: ClusterFirst
                +

                Container's DNS configuration file:

                +
                nameserver 10.247.3.10
                +search default.svc.cluster.local svc.cluster.local cluster.local
                +options ndots:5
                +
              • Use Case 2: Using a Cloud DNS

                Scenario

                +

                A DNS cannot resolve cluster-internal domain names and therefore is applicable to the scenario where workloads access only external domain names registered with the Internet.

                +

                Example:

                +
                apiVersion: v1
                +kind: Pod
                +metadata:
                +  namespace: default
                +  name: dns-example
                +spec:
                +  containers:
                +  - name: test
                +    image: nginx:alpine
                +  dnsPolicy: Default//The DNS configuration file that the kubelet's --resolv-conf flag points to is used. In this case, a DNS is used for CCE clusters.
                +

                Container's DNS configuration file:

                +
                nameserver 100.125.x.x
                +
              • Use Case 3: Using Kube-DNS/CoreDNS for Workloads Running with hostNetwork

                Scenario

                +

                By default, a DNS is used for workloads running with hostNetwork. If workloads need to use Kube-DNS/CoreDNS, set dnsPolicy to ClusterFirstWithHostNet.

                +

                Example:

                +
                apiVersion: v1
                +kind: Pod
                +metadata:
                +  name: nginx
                +spec:
                +  hostNetwork: true
                +  dnsPolicy: ClusterFirstWithHostNet
                +  containers:
                +  - name: nginx
                +    image: nginx:alpine
                +    ports:
                +    - containerPort: 80
                +

                Container's DNS configuration file:

                +
                nameserver 10.247.3.10
                +search default.svc.cluster.local svc.cluster.local cluster.local
                +options ndots:5
                +
              • Use Case 4: Customizing Application's DNS Configuration

                Scenario

                +

                You can flexibly customize the DNS configuration file for applications. Using dnsPolicy and dnsConfig together can address almost all scenarios, including the scenarios in which an on-premises DNS will be used, multiple DNSs will be cascaded, and DNS configuration options will be modified.

                +

                Example 1: Using Your On-Premises DNS

                +

                Set dnsPolicy to None so application's DNS configuration file is generated based on dnsConfig.

                +
                apiVersion: v1
                +kind: Pod
                +metadata:
                +  namespace: default
                +  name: dns-example
                +spec:
                +  containers:
                +  - name: test
                +    image: nginx:alpine
                +  dnsPolicy: "None"
                +  dnsConfig:
                +    nameservers:
                +    - 10.2.3.4 //IP address of your on-premises DNS
                +    searches:
                +    - ns1.svc.cluster.local
                +    - my.dns.search.suffix
                +    options:
                +    - name: ndots
                +      value: "2"
                +    - name: timeout
                +      value: "3"
                +

                Container's DNS configuration file:

                +
                nameserver 10.2.3.4
                +search ns1.svc.cluster.local my.dns.search.suffix
                +options timeout:3 ndots:2
                +

                Example 2: Modifying the ndots Option in the DNS Configuration File to Reduce Invalid DNS Queries

                +

                Set dnsPolicy to a value other than None so the DNS parameters configured in dnsConfig are added to the DNS configuration file generated based on dnsPolicy.

                +
                apiVersion: v1
                +kind: Pod
                +metadata:
                +  namespace: default
                +  name: dns-example
                +spec:
                +  containers:
                +  - name: test
                +    image: nginx:alpine
                +  dnsPolicy: "ClusterFirst"
                +  dnsConfig:
                +    options:
                +    - name: ndots
                +      value: "2" //Changes the ndots:5 option in the DNS configuration file generated based on the ClusterFirst policy to ndots:2.
                +

                Container's DNS configuration file:

                +
                nameserver 10.247.3.10
                +search default.svc.cluster.local svc.cluster.local cluster.local
                +options ndots:2
                +
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0367.html b/docs/cce/umn/cce_10_0367.html new file mode 100644 index 00000000..843f653e --- /dev/null +++ b/docs/cce/umn/cce_10_0367.html @@ -0,0 +1,24 @@ + + +

              Customizing a Cluster Certificate SAN

              +

              Scenario

              A Subject Alternative Name (SAN) can be signed in to a cluster server certificate. A SAN is usually used by the client to verify the server validity in TLS handshakes. Specifically, the validity check includes whether the server certificate is issued by a CA trusted by the client and whether the SAN in the certificate matches the IP address or DNS domain name that the client actually accesses.

              +

              If the client cannot directly access the private IP or EIP of the cluster, you can sign the IP address or DNS domain name that can be directly accessed by the client into the cluster server certificate to enable two-way authentication on the client, which improves security. Typical use cases include DNAT access and domain name access.

              +
              +

              Notes and Constraints

              This feature is available only to clusters of v1.19 and later.

              +
              +

              Customizing a SAN

              1. Log in to the CCE console.
              2. Click the target cluster in the cluster list to go to the cluster details page.
              3. In the Connection Information area, click next to Custom SAN. In the dialog box displayed, add the IP address or domain name and click Save.

                +

                1. This operation will restart kube-apiserver and update the kubeconfig.json file for a short period of time. Do not perform operations on the cluster during this period.

                +

                2. A maximum of 128 domain names or IP addresses, separated by commas (,), are allowed.

                +

                3. If a custom domain name needs to be bound to an EIP, ensure that an EIP has been configured.

                +
                +

              +
              +

              Typical Domain Name Access Scenarios

              • Add the response domain name mapping when specifying the DNS domain name address in the host domain name configuration on the client, or configuring /etc/hosts on the client host.
              • Use domain name access in the intranet. DNS allows you to configure mappings between cluster EIPs and custom domain names. After an EIP is updated, you can continue to use two-way authentication and the domain name to access the cluster without downloading the kubeconfig.json file again.
              • Add A records on a self-built DNS server.
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0374.html b/docs/cce/umn/cce_10_0374.html new file mode 100644 index 00000000..80ad67df --- /dev/null +++ b/docs/cce/umn/cce_10_0374.html @@ -0,0 +1,28 @@ + + +

              Storage

              +

              +
              + + diff --git a/docs/cce/umn/cce_10_0377.html b/docs/cce/umn/cce_10_0377.html new file mode 100644 index 00000000..6c3b3b9b --- /dev/null +++ b/docs/cce/umn/cce_10_0377.html @@ -0,0 +1,231 @@ + + +

              Using Local Disks as Storage Volumes

              +

              You can mount a file directory of the host where a container is located to a specified container path (the hostPath mode in Kubernetes) for persistent data storage. Alternatively, you can leave the source path empty (the emptyDir mode in Kubernetes), and a temporary directory of the host will be mounted to the mount point of the container for temporary storage.

              +

              Using Local Volumes

              CCE supports four types of local volumes.

              +
              +
              • hostPath: mounts a file directory of the host where the container is located to the specified mount point of the container. For example, if the container needs to access /etc/hosts, you can use a hostPath volume to map /etc/hosts.
              • emptyDir: stores data temporarily. An emptyDir volume is first created when a pod is assigned to a node, and exists as long as that pod is running on that node. When a container pod is terminated, EmptyDir will be deleted and the data is permanently lost.
              • ConfigMap: A ConfigMap can be mounted as a volume, and all contents stored in its key are mounted onto the specified container directory. A ConfigMap is a type of resource that stores configuration information required by a workload. Its content is user-defined. For details about how to create a ConfigMap, see Creating a ConfigMap. For details about how to use a ConfigMap, see Using a ConfigMap.
              • Secret mounting: Data in the secret is mounted to a path of the container. A secret is a type of resource that holds sensitive data, such as authentication and key information. All content is user-defined. For details about how to create a secret, see Creating a Secret. For details on how to use a secret, see Using a Secret.
              +

              The following describes how to mount these four types of volumes.

              +

              hostPath

              You can mount a path on the host to a specified container path. A hostPath volume is usually used to store workload logs permanently or used by workloads that need to access internal data structure of the Docker engine on the host.

              +
              +
              1. Log in to the CCE console.
              2. When creating a workload, click Data Storage in the Container Settings. Click Add Volume and choose hostPath from the drop-down list.
              3. Set parameters for adding a local volume, as listed in Table 1.

                +

                + + + + + + + + + + + + + +
                Table 1 Setting parameters for mounting a hostPath volume

                Parameter

                +

                Description

                +

                Storage Type

                +

                Select hostPath.

                +

                Host Path

                +

                Path of the host to which the local volume is to be mounted, for example, /etc/hosts.

                +
                NOTE:

                Host Path cannot be set to the root directory /. Otherwise, the mounting fails. Mount paths can be as follows:

                +
                • /opt/xxxx (excluding /opt/cloud)
                • /mnt/xxxx (excluding /mnt/paas)
                • /tmp/xxx
                • /var/xxx (excluding key directories such as /var/lib, /var/script, and /var/paas)
                • /xxxx (It cannot conflict with the system directory, such as bin, lib, home, root, boot, dev, etc, lost+found, mnt, proc, sbin, srv, tmp, var, media, opt, selinux, sys, and usr.)
                +

                Do not set this parameter to /home/paas, /var/paas, /var/lib, /var/script, /mnt/paas, or /opt/cloud. Otherwise, the system or node installation will fail.

                +
                +

                Add Container Path

                +

                Configure the following parameters:

                +
                1. subPath: Enter a subpath, for example, tmp.

                  A subpath is used to mount a local disk so that the same data volume is used in a single pod. If this parameter is left blank, the root path is used by default.

                  +
                2. Container Path: Enter the path of the container, for example, /tmp.
                  This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
                  NOTICE:

                  When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

                  +
                  +
                  +
                3. Permission
                  • Read-only: You can only read the data volumes mounted to the path.
                  • Read/Write: You can modify the data volumes mounted to the path. Newly written data is not migrated if the container is migrated, which may cause a data loss.
                  +
                +

                You can click to add multiple paths and subpaths.

                +
                +
                +

              +

              emptyDir

              emptyDir applies to temporary data storage, disaster recovery, and runtime data sharing. It will be deleted upon deletion or transfer of workload pods.

              +
              +
              1. Log in to the CCE console.
              2. When creating a workload, click Data Storage in the Container Settings. Click Add Volume and choose emptyDir from the drop-down list.
              3. Set the local volume type to emptyDir and set parameters for adding a local volume, as described in Table 2.

                +

                + + + + + + + + + + + + + +
                Table 2 Setting parameters for mounting an emptyDir volume

                Parameter

                +

                Description

                +

                Storage Type

                +

                Select emptyDir.

                +

                Storage Medium

                +
                • Default: Data is stored in hard disks, which is applicable to a large amount of data with low requirements on reading and writing efficiency.
                • Memory: Selecting this option can improve the running speed, but the storage capacity is subject to the memory size. This mode applies to scenarios where the data volume is small and the read and write efficiency is high.
                +
                NOTE:
                • If you select Memory, any files you write will count against your container's memory limit. Pay attention to the memory quota. If the memory usage exceeds the threshold, OOM may occur.
                • If Memory is selected, the size of an emptyDir volume is 50% of the pod specifications and cannot be changed.
                • If Memory is not selected, emptyDir volumes will not occupy the system memory.
                +
                +

                Add Container Path

                +

                Configure the following parameters:

                +
                1. subPath: Enter a subpath, for example, tmp.

                  A subpath is used to mount a local disk so that the same data volume is used in a single pod. If this parameter is left blank, the root path is used by default.

                  +
                2. Container Path: Enter the path of the container, for example, /tmp.
                  This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
                  NOTICE:

                  When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

                  +
                  +
                  +
                3. Permission
                  • Read-only: You can only read the data volumes mounted to the path.
                  • Read/Write: You can modify the data volumes mounted to the path. Newly written data is not migrated if the container is migrated, which may cause a data loss.
                  +
                +

                You can click to add multiple paths and subpaths.

                +
                +
                +

              +

              ConfigMap

              The data stored in a ConfigMap can be referenced in a volume of type ConfigMap. You can mount such a volume to a specified container path. The platform supports the separation of workload codes and configuration files. ConfigMap volumes are used to store workload configuration parameters. Before that, you need to create ConfigMaps in advance. For details, see Creating a ConfigMap.

              +
              +
              1. Log in to the CCE console.
              2. When creating a workload, click Data Storage in the Container Settings. Click Add Volume and choose ConfigMap from the drop-down list.
              3. Set the local volume type to ConfigMap and set parameters for adding a local volume, as shown in Table 3.

                +

                + + + + + + + + + + + + + +
                Table 3 Setting parameters for mounting a ConfigMap volume

                Parameter

                +

                Description

                +

                Storage Type

                +

                Select ConfigMap.

                +

                Option

                +

                Select the desired ConfigMap name.

                +

                A ConfigMap must be created in advance. For details, see Creating a ConfigMap.

                +

                Add Container Path

                +

                Configure the following parameters:

                +
                1. subPath: Enter a subpath, for example, tmp.
                  • A subpath is used to mount a local volume so that the same data volume is used in a single pod.
                  • The subpath can be the key and value of a ConfigMap or secret. If the subpath is a key-value pair that does not exist, the data import does not take effect.
                  • The data imported by specifying a subpath will not be updated along with the ConfigMap/secret updates.
                  +
                2. Container Path: Enter the path of the container, for example, /tmp.
                  This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
                  NOTICE:

                  When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

                  +
                  +
                  +
                3. Set the permission to Read-only. Data volumes in the path are read-only.
                +

                You can click to add multiple paths and subpaths.

                +
                +
                +

              +

              Secret

              You can mount a secret as a volume to the specified container path. Contents in a secret are user-defined. Before that, you need to create a secret. For details, see Creating a Secret.

              +
              +
              1. Log in to the CCE console.
              2. When creating a workload, click Data Storage in the Container Settings. Click Add Volume and choose Secret from the drop-down list.
              3. Set the local volume type to Secret and set parameters for adding a local volume, as shown in Table 4.

                +

                + + + + + + + + + + + + + +
                Table 4 Setting parameters for mounting a secret volume

                Parameter

                +

                Description

                +

                Storage Type

                +

                Select Secret.

                +

                Secret

                +

                Select the desired secret name.

                +

                A secret must be created in advance. For details, see Creating a Secret.

                +

                Add Container Path

                +

                Configure the following parameters:

                +
                1. subPath: Enter a subpath, for example, tmp.
                  • A subpath is used to mount a local volume so that the same data volume is used in a single pod.
                  • The subpath can be the key and value of a ConfigMap or secret. If the subpath is a key-value pair that does not exist, the data import does not take effect.
                  • The data imported by specifying a subpath will not be updated along with the ConfigMap/secret updates.
                  +
                2. Container Path: Enter the path of the container, for example, /tmp.
                  This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
                  NOTICE:

                  When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

                  +
                  +
                  +
                3. Set the permission to Read-only. Data volumes in the path are read-only.
                +

                You can click to add multiple paths and subpaths.

                +
                +
                +

              +

              Mounting a hostPath Volume Using kubectl

              You can use kubectl to mount a file directory of the host where the container is located to a specified mount path of the container.

              +
              1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
              2. Run the following commands to configure the hostPath-pod-example.yaml file, which is used to create a pod.

                touch hostPath-pod-example.yaml

                +

                vi hostPath-pod-example.yaml

                +

                Mount the hostPath volume for the Deployment. The following is an example:

                +
                apiVersion: apps/v1 
                +kind: Deployment 
                +metadata: 
                +  name: hostpath-pod-example 
                +  namespace: default 
                +spec: 
                +  replicas: 1 
                +  selector: 
                +    matchLabels: 
                +      app: hostpath-pod-example 
                +  template: 
                +    metadata: 
                +      labels: 
                +        app: hostpath-pod-example 
                +    spec: 
                +      containers: 
                +      - image: nginx
                +        name: container-0 
                +        volumeMounts: 
                +        - mountPath: /tmp 
                +          name: hostpath-example 
                +      imagePullSecrets:
                +        - name: default-secret
                +      restartPolicy: Always 
                +      volumes: 
                +      - name: hostpath-example 
                +        hostPath: 
                +          path: /tmp/test
                + +
                + + + + + + + + + + +
                Table 5 Local disk storage dependency parameters

                Parameter

                +

                Description

                +

                mountPath

                +

                Mount path of the container. In this example, the volume is mounted to the /tmp directory.

                +

                hostPath

                +

                Host path. In this example, the host path is /tmp/test.

                +
                +
                +

                spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

                +
                +

              3. Run the following command to create the pod:

                kubectl create -f hostPath-pod-example.yaml

                +

              4. Verify the mounting.

                1. Query the pod name of the workload (hostpath-pod-example is used as an example).
                  kubectl get po|grep hostpath-pod-example
                  +

                  Expected outputs:

                  +
                  hostpath-pod-example-55c8d4dc59-md5d9   1/1     Running   0          35s
                  +
                2. Create the test1 file in the container mount path /tmp.
                  kubectl exec hostpath-pod-example-55c8d4dc59-md5d9 -- touch /tmp/test1
                  +
                3. Verify that the file is created in the host path /tmp/test/.
                  ll /tmp/test/
                  +

                  Expected outputs:

                  +
                  -rw-r--r--  1 root root    0 Jun  1 16:12 test1
                  +
                4. Create the test2 file in the host path /tmp/test/.
                  touch /tmp/test/test2
                  +
                5. Verify that the file is created in the container mount path.
                  kubectl exec hostpath-pod-example-55c8d4dc59-md5d9 -- ls -l /tmp
                  +

                  Expected outputs:

                  +
                  -rw-r--r-- 1 root root 0 Jun  1 08:12 test1
                  +-rw-r--r-- 1 root root 0 Jun  1 08:14 test2
                  +
                +

              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0378.html b/docs/cce/umn/cce_10_0378.html new file mode 100644 index 00000000..b15c9794 --- /dev/null +++ b/docs/cce/umn/cce_10_0378.html @@ -0,0 +1,299 @@ + + +

              PersistentVolumeClaims (PVCs)

              +

              A PVC describes a workload's request for storage resources. This request consumes existing PVs in the cluster. If there is no PV available, underlying storage and PVs are dynamically created. When creating a PVC, you need to describe the attributes of the requested persistent storage, such as the size of the volume and the read/write permissions.

              +

              Notes and Constraints

              When a PVC is created, the system checks whether there is an available PV with the same configuration in the cluster. If yes, the PVC binds the available PV to the cluster. If no PV meets the matching conditions, the system dynamically creates a storage volume.

              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

              Description

              +

              PVC Field

              +

              PV Field

              +

              Matching Logic

              +

              region

              +

              pvc.metadata.labels (failure-domain.beta.kubernetes.io/region or topology.kubernetes.io/region)

              +

              pv.metadata.labels (failure-domain.beta.kubernetes.io/region or topology.kubernetes.io/region)

              +

              Defined or not defined at the same time. If defined, the settings must be consistent.

              +

              zone

              +

              pvc.metadata.labels (failure-domain.beta.kubernetes.io/zone or topology.kubernetes.io/zone)

              +

              pv.metadata.labels (failure-domain.beta.kubernetes.io/zone or topology.kubernetes.io/zone)

              +

              Defined or not defined at the same time. If defined, the settings must be consistent.

              +

              EVS disk type

              +

              pvc.metadata.annotations (everest.io/disk-volume-type)

              +

              pv.spec.csi.volumeAttributes (everest.io/disk-volume-type)

              +

              Defined or not defined at the same time. If defined, the settings must be consistent.

              +

              Key ID

              +

              pvc.metadata.annotations (everest.io/crypt-key-id)

              +

              pv.spec.csi.volumeAttributes (everest.io/crypt-key-id)

              +

              Defined or not defined at the same time. If defined, the settings must be consistent.

              +

              accessMode

              +

              accessMode

              +

              accessMode

              +

              The settings must be consistent.

              +

              Storage class

              +

              storageclass

              +

              storageclass

              +

              The settings must be consistent.

              +
              +
              +
              +

              Volume Access Modes

              PVs can be mounted to the host system only in the mode supported by underlying storage resources. For example, a file storage system can be read and written by multiple nodes, but an EVS disk can be read and written by only one node.

              +
              • ReadWriteOnce: A volume can be mounted as read-write by a single node. This access mode is supported by EVS.
              • ReadWriteMany: A volume can be mounted as read-write by multiple nodes. This access mode is supported by SFS, SFS Turbo, and OBS.
              + +
              + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Supported access modes

              Storage Type

              +

              ReadWriteOnce

              +

              ReadWriteMany

              +

              EVS

              +

              √

              +

              ×

              +

              SFS

              +

              ×

              +

              √

              +

              OBS

              +

              ×

              +

              √

              +

              SFS Turbo

              +

              ×

              +

              √

              +
              +
              +
              +

              Using a Storage Class to Create a PVC

              StorageClass describes the storage class used in the cluster. You need to specify StorageClass to dynamically create PVs and underlying storage resources when creating a PVC.

              +

              Using the CCE Console

              +
              1. Log in to the CCE console.
              2. Click the cluster name and go to the cluster console. Choose Storage from the navigation pane, and click the PersistentVolumeClaims (PVCs) tab.
              3. Click Create PVC in the upper right corner. In the dialog box displayed, set the PVC parameters.

                • Storage Volume Claim Type: Select a storage type as required.
                • PVC Name: Enter a PVC name.
                • Creation Method: Select Dynamic creation.
                • Storage Classes: Select the required storage class. The following storage resources can be dynamically provisioned:
                  • csi-disk: EVS disk.
                  • csi-nas: SFS Capacity-Oriented file storage.
                  • csi-obs: OBS bucket.
                  +
                • AZ (supported only by EVS): Select the AZ where the EVS disk is located.
                • Disk Type (supported only by EVS disks): Select an EVS disk type as required. EVS disk types vary in different regions.
                  • Common I/O
                  • High I/O
                  • Ultra-high I/O
                  +
                • Access Mode: ReadWriteOnce and ReadWriteMany are supported. For details, see Volume Access Modes.
                • Capacity (GiB) (only EVS and SFS are supported): storage capacity. This parameter is not available for OBS.
                • Encryption (supported only for EVS and SFS): Select Encryption. After selecting this option, you need to select a key.
                • Secret (supported only for OBS): Select an access key for OBS. For details, see Using a Custom AK/SK to Mount an OBS Volume.
                +

              4. Click Create.
              +

              Using YAML

              +

              Example YAML for EVS

              +
              • failure-domain.beta.kubernetes.io/region: region where the cluster is located.

                For details about the value of region, see Regions and Endpoints.

                +
              • failure-domain.beta.kubernetes.io/zone: AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

                For details about the value of zone, see Regions and Endpoints.

                +
              +
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  name: pvc-evs-auto-example
              +  namespace: default
              +  annotations:
              +    everest.io/disk-volume-type: SSD    # EVS disk type.
              +    everest.io/crypt-key-id: 0992dbda-6340-470e-a74e-4f0db288ed82  # (Optional) Key ID. The key is used to encrypt EVS disks.
              +    
              +  labels:
              +    failure-domain.beta.kubernetes.io/region: eu-de
              +    failure-domain.beta.kubernetes.io/zone: 
              +spec:
              +  accessModes:
              +  - ReadWriteOnce               # The value must be ReadWriteOnce for EVS.
              +  resources:
              +    requests:
              +      storage: 10Gi             # EVS disk capacity, ranging from 1 to 32768.
              +  storageClassName: csi-disk    # The storage class type is EVS.
              +
              Example YAML for file storage:
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  name:  pvc-sfs-auto-example
              +  namespace: default
              +  annotations: 
              +    everest.io/crypt-key-id: 0992dbda-6340-470e-a74e-4f0db288ed82  # (Optional) Key ID. The key is used to encrypt file systems.
              +    everest.io/crypt-alias: sfs/default                            # (Optional) Key name. Mandatory for encrypted volumes.
              +    everest.io/crypt-domain-id: 2cd7ebd02e4743eba4e6342c09e49344   # (Optional) ID of the tenant to which the encrypted volume belongs. Mandatory for encrypted volumes.
              +spec:
              +  accessModes:
              +  - ReadWriteMany               # The value must be ReadWriteMany for SFS.
              +  resources:
              +    requests:
              +      storage: 10Gi                # SFS file system size.
              +  storageClassName: csi-nas        # The storage class type is SFS.
              +
              +

              Example YAML for OBS:

              +
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  name: obs-warm-provision-pvc
              +  namespace: default
              +  annotations:
              +    everest.io/obs-volume-type: STANDARD      # OBS bucket type. Currently, standard (STANDARD) and infrequent access (WARM) are supported.
              +    csi.storage.k8s.io/fstype: obsfs          # File type. obsfs indicates to create a parallel file system (recommended), and s3fs indicates to create an OBS bucket.
              +    
              +spec:
              +  accessModes:
              +  - ReadWriteMany             # The value must be ReadWriteMany for OBS.
              +  resources:
              +    requests:
              +      storage: 1Gi                 # This field is valid only for verification (fixed to 1, cannot be empty or 0). The value setting does not take effect for OBS buckets.
              +  storageClassName: csi-obs        # The storage class type is OBS.
              +
              +

              Using a PV to Create a PVC

              If a PV has been created, you can create a PVC to apply for PV resources.

              +

              Using the CCE Console

              +
              1. Log in to the CCE console.
              2. Click the cluster name and go to the cluster console. Choose Storage from the navigation pane, and click the PersistentVolumeClaims (PVCs) tab.
              3. Click Create PVC in the upper right corner. In the dialog box displayed, set the PVC parameters.

                • Storage Volume Claim Type: Select a storage type as required.
                • PVC Name: name of a PVC.
                • Creation Method: Select Existing storage volume.
                • PV: Select the volume to be associated, that is, the PV.
                +

              4. Click Create.
              +

              Using YAML

              +

              Example YAML for EVS

              +
              • failure-domain.beta.kubernetes.io/region: region where the cluster is located.

                For details about the value of region, see Regions and Endpoints.

                +
              • failure-domain.beta.kubernetes.io/zone: AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

                For details about the value of zone, see Regions and Endpoints.

                +
              +
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  name: pvc-test
              +  namespace: default
              +  annotations:
              +    everest.io/disk-volume-type: SAS                                # EVS disk type.
              +    everest.io/crypt-key-id: fe0757de-104c-4b32-99c5-ee832b3bcaa3   # (Optional) Key ID. The key is used to encrypt EVS disks.
              +    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
              +    
              +  labels:
              +    failure-domain.beta.kubernetes.io/region: eu-de
              +    failure-domain.beta.kubernetes.io/zone: 
              +spec:
              +  accessModes:
              +  - ReadWriteOnce               # The value must be ReadWriteOnce for EVS.
              +  resources:
              +    requests:
              +      storage: 10Gi              
              +  storageClassName: csi-disk     # Storage class name. The value is csi-disk for EVS.
              +  volumeName: cce-evs-test       # PV name.
              +
              Example YAML for SFS:
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  name: pvc-sfs-test
              +  namespace: default
              +  annotations:
              +    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
              +spec:
              +  accessModes:
              +  - ReadWriteMany              # The value must be ReadWriteMany for SFS.
              +  resources:
              +    requests:
              +      storage: 100Gi           # Requested PVC capacity
              +  storageClassName: csi-nas    # Storage class name
              +  volumeName: cce-sfs-test     # PV name
              +
              +

              Example YAML for OBS:

              +
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  name: pvc-obs-test
              +  namespace: default
              +  annotations:
              +    everest.io/obs-volume-type: STANDARD                         # OBS bucket type. Currently, standard (STANDARD) and infrequent access (WARM) are supported.
              +    csi.storage.k8s.io/fstype: s3fs                              # File type. obsfs indicates to create a parallel file system (recommended), and s3fs indicates to create an OBS bucket.
              +    csi.storage.k8s.io/node-publish-secret-name: test-user
              +    csi.storage.k8s.io/node-publish-secret-namespace: default
              +    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
              +    
              +spec:
              +  accessModes:
              +  - ReadWriteMany             # The value must be ReadWriteMany for OBS.
              +  resources:
              +    requests:
              +      storage: 1Gi            # Requested PVC capacity. This field is valid only for verification (fixed to 1, cannot be empty or 0). The value setting does not take effect for OBS buckets.
              +  storageClassName: csi-obs   # Storage class name. The value is csi-obs for OBS.
              +  volumeName: cce-obs-test    # PV name.
              +
              Example YAML for SFS Turbo:
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  name: pvc-test
              +  namespace: default
              +  annotations:
              +    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
              +spec:
              +  accessModes:
              +    - ReadWriteMany               # The value must be ReadWriteMany for SFS Turbo.
              +  resources:
              +    requests:
              +      storage: 100Gi              # Requested PVC capacity.
              +  storageClassName: csi-sfsturbo  # Storage class name. The value is csi-sfsturbo for SFS Turbo.
              +  volumeName: pv-sfsturbo-test         # PV name.
              +
              +
              +

              Using a Snapshot to Creating a PVC

              The disk type, encryption setting, and disk mode of the created EVS PVC are consistent with those of the snapshot's source EVS disk.

              +

              Using the CCE Console

              +
              1. Log in to the CCE console.
              2. Click the cluster name and go to the cluster console. Choose Storage from the navigation pane, and click the Snapshots and Backups tab.
              3. Locate the snapshot for which you want to create a PVC, click Create PVC, and specify the PVC name in the displayed dialog box.
              4. Click Create.
              +

              Creating from YAML

              +
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  name: pvc-test
              +  namespace: default
              +  annotations:
              +    everest.io/disk-volume-type: SSD     # EVS disk type, which must be the same as that of the source EVS disk of the snapshot.
              +  labels:
              +    failure-domain.beta.kubernetes.io/region: eu-de
              +    failure-domain.beta.kubernetes.io/zone: 
              +spec:
              +  accessModes:
              +  - ReadWriteOnce
              +  resources:
              +    requests:
              +      storage: '10'
              +  storageClassName: csi-disk
              +  dataSource:
              +    name: cce-disksnap-test             # Snapshot name
              +    kind: VolumeSnapshot
              +    apiGroup: snapshot.storage.k8s.io
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0379.html b/docs/cce/umn/cce_10_0379.html new file mode 100644 index 00000000..6da88d0d --- /dev/null +++ b/docs/cce/umn/cce_10_0379.html @@ -0,0 +1,398 @@ + + +

              PersistentVolumes (PVs)

              +

              A PV is a persistent storage volume in a cluster. Same as a node, a PV is a cluster-level resource.

              +

              Notes and Constraints

              • On the new CCE console (the cluster needs to be upgraded to v1.19.10 or later and the everest add-on needs to be upgraded to v1.2.10 or later), PVs are open to you for management. On the old CCE console, PVs can only be imported or dynamically created. You cannot manage the PV lifecycle on the console.
              • Multiple PVs can use the same SFS or SFS Turbo file system with the following restrictions:
                • An error may occur if multiple PVCs/PVs that use the same underlying SFS or SFS Turbo file system are mounted to the same pod.
                • The persistentVolumeReclaimPolicy parameter in the PVs must be set to Retain. Otherwise, when a PV is deleted, the associated underlying volume may be deleted. In this case, other PVs associated with the underlying volume may be abnormal.
                • When the underlying volume is repeatedly used, it is recommended that ReadWriteMany be implemented at the application layer to prevent data overwriting and loss.
                +
              +
              +

              Volume Access Modes

              PVs can be mounted to the host system only in the mode supported by underlying storage resources. For example, a file storage system can be read and written by multiple nodes, but an EVS disk can be read and written by only one node.

              +
              • ReadWriteOnce: A volume can be mounted as read-write by a single node. This access mode is supported by EVS.
              • ReadWriteMany: A volume can be mounted as read-write by multiple nodes. This access mode is supported by SFS, OBS, and SFS Turbo.
              + +
              + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Access modes supported by cloud storage

              Storage Type

              +

              ReadWriteOnce

              +

              ReadWriteMany

              +

              EVS

              +

              √

              +

              ×

              +

              SFS

              +

              ×

              +

              √

              +

              OBS

              +

              ×

              +

              √

              +

              SFS Turbo

              +

              ×

              +

              √

              +
              +
              +
              +

              PV Reclaim Policy

              A PV reclaim policy is used to delete or reclaim underlying volumes when a PVC is deleted. The value can be Delete or Retain.

              +
              • Delete: When a PVC is deleted, the PV and underlying storage resources are deleted.
              • Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After a PVC is deleted, the PV resource is in the Released state and cannot be bound to the PVC again.
              +

              Everest also allows you to delete a PVC without deleting underlying storage resources. This function can be achieved only by using a YAML file. Set the PV reclaim policy to Delete and add annotations"everest.io/reclaim-policy: retain-volume-only". In this way, when the PVC is deleted, the PV resource is deleted, but the underlying storage resources are retained.

              +
              +

              Creating an EVS Volume

              The requirements for creating an EVS volume are as follows:

              +
              • System disks, DSS disks, and shared disks cannot be used.
              • The EVS disk is one of the supported types (common I/O, high I/O, and ultra-high I/O), and the EVS disk device type is SCSI.
              • The EVS disk is not frozen or used, and the status is available.
              • If the EVS disk is encrypted, the key must be available.
              +
              +

              Using the CCE Console

              +
              1. Log in to the CCE console.
              2. Click the cluster name and access the cluster console. Choose Storage from the navigation pane, and click the PersistentVolumes (PVs) tab.
              3. Click Create Volume in the upper right corner. In the dialog box displayed, set the volume parameters.

                • Volume Type: Select EVS.
                • EVS:
                • PV Name: Enter a PV name.
                • Access Mode: ReadWriteOnce
                • Reclaim Policy: Select Delete or Retain as required. For details, see PV Reclaim Policy.
                +

              4. Click Create.
              +

              Using YAML

              +
              apiVersion: v1
              +kind: PersistentVolume
              +metadata:
              +  annotations:
              +    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
              +    everest.io/reclaim-policy: retain-volume-only         # (Optional) The PV is deleted while the underlying volume is retained.
              +  name: cce-evs-test
              +  labels:
              +    failure-domain.beta.kubernetes.io/region: eu-de
              +    failure-domain.beta.kubernetes.io/zone: 
              +spec:
              +  accessModes:
              +    - ReadWriteOnce     # Access mode. The value is fixed to ReadWriteOnce for EVS.
              +  capacity:
              +    storage: 10Gi       #  EVS disk capacity, in the unit of Gi. The value ranges from 1 to 32768.
              +  csi:
              +    driver: disk.csi.everest.io     # Dependent storage driver for the mounting.
              +    fsType: ext4
              +    volumeHandle: 459581af-e78c-4356-9e78-eaf9cd8525eb   # Volume ID of the EVS disk.
              +    volumeAttributes:
              +      everest.io/disk-mode: SCSI           # Device type of the EVS disk. Only SCSI is supported.
              +      everest.io/disk-volume-type: SAS     # EVS disk type.
              +      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
              +      everest.io/crypt-key-id: 0992dbda-6340-470e-a74e-4f0db288ed82    # (Optional) Encryption key ID. Mandatory for an encrypted disk.
              +  persistentVolumeReclaimPolicy: Delete    # Reclain policy.
              +  storageClassName: csi-disk               # Storage class name. The value must be csi-disk.
              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 2 Key parameters

              Parameter

              +

              Description

              +

              everest.io/reclaim-policy: retain-volume-only

              +

              This field is optional.

              +

              Currently, only retain-volume-only is supported.

              +

              This field is valid only when the everest version is 1.2.9 or later and the reclaim policy is Delete. If the reclaim policy is Delete and the current value is retain-volume-only, the associated PV is deleted while the underlying storage volume is retained, when a PVC is deleted.

              +

              failure-domain.beta.kubernetes.io/region

              +

              Region where the cluster is located.

              +

              For details about the value of region, see Regions and Endpoints.

              +

              failure-domain.beta.kubernetes.io/zone

              +

              AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

              +

              For details about the value of zone, see Regions and Endpoints.

              +

              volumeHandle

              +

              Volume ID of the EVS disk.

              +

              To obtain the volume ID, log in to the Cloud Server Console. In the navigation pane, choose Elastic Volume Service > Disks. Click the name of the target EVS disk to go to its details page. On the Summary tab page, click the copy button after ID.

              +

              everest.io/disk-volume-type

              +

              EVS disk type. All letters are in uppercase.

              +
              • SATA: common I/O
              • SAS: high I/O
              • SSD: ultra-high I/O
              +

              everest.io/crypt-key-id

              +

              Encryption key ID. This field is mandatory when the volume is an encrypted volume.

              +

              persistentVolumeReclaimPolicy

              +

              A reclaim policy is supported when the cluster version is equal to or later than 1.19.10 and the everest version is equal to or later than 1.2.9.

              +

              The Delete and Retain policies are supported.

              +

              Delete:

              +
              • If everest.io/reclaim-policy is not specified, both the PV and EVS disk are deleted when a PVC is deleted.
              • If everest.io/reclaim-policy is set to retain-volume-only set, when a PVC is deleted, the PV is deleted but the EVS resources are retained.
              +

              Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.

              +

              If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

              +
              +
              +
              +

              Creating an SFS Volume

              • The SFS file system and the cluster must be in the same VPC.
              +
              +

              Using the CCE Console

              +
              1. Log in to the CCE console.
              2. Click the cluster name and access the cluster console. Choose Storage from the navigation pane, and click the PersistentVolumes (PVs) tab.
              3. Click Create Volume in the upper right corner. In the dialog box displayed, set the volume parameters.

                • Volume Type: Select SFS.
                • Select SFS resources.
                • PV Name: Enter a PV name.
                • Access Mode: ReadWriteMany
                • Reclaim Policy: Select Delete or Retain as required. For details, see PV Reclaim Policy.
                • Mount Options: mount options. For details about the options, see Setting Mount Options.
                +

              4. Click Create.
              +

              Using YAML

              +
              apiVersion: v1
              +kind: PersistentVolume
              +metadata:
              +  annotations:
              +    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
              +    everest.io/reclaim-policy: retain-volume-only      # (Optional) The PV is deleted while the underlying volume is retained.
              +  name: cce-sfs-test
              +spec:
              +  accessModes:
              +  - ReadWriteMany      # Access mode. The value must be ReadWriteMany for SFS.
              +  capacity:
              +    storage: 1Gi       # File storage capacity.
              +  csi:
              +    driver: disk.csi.everest.io   # Mount the dependent storage driver.
              +    fsType: nfs
              +    volumeHandle: 30b3d92a-0bc7-4610-b484-534660db81be   # SFS file system ID.
              +    volumeAttributes:
              +      everest.io/share-export-location:   # Shared path of the file storage
              +      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
              +  persistentVolumeReclaimPolicy: Retain    # Reclaim policy.
              +  storageClassName: csi-nas                # Storage class name
              +  mountOptions: []                         # Mount options
              + +
              + + + + + + + + + + + + + + + + + + + + + + +
              Table 3 Key parameters

              Parameter

              +

              Description

              +

              everest.io/reclaim-policy: retain-volume-only

              +

              This field is optional.

              +

              Currently, only retain-volume-only is supported.

              +

              This field is valid only when the everest version is 1.2.9 or later and the reclaim policy is Delete. If the reclaim policy is Delete and the current value is retain-volume-only, the associated PV is deleted while the underlying storage volume is retained, when a PVC is deleted.

              +

              volumeHandle

              +
              • If SFS Capacity-Oriented file storage is used, enter the file storage ID.

                On the management console, choose Service List > Storage > Scalable File Service. In the SFS file system list, click the name of the target file system and copy the content following ID on the page displayed.

                +
              +

              everest.io/share-export-location

              +

              Shared path of the file system.

              +

              On the management console, choose Service List > Storage > Scalable File Service. You can obtain the shared path of the file system from the Mount Address column.

              +

              mountOptions

              +

              Mount options.

              +

              If not specified, the following configurations are used by default. For details, see SFS Volume Mount Options.

              +
              mountOptions:
              +- vers=3
              +- timeo=600
              +- nolock
              +- hard
              +

              everest.io/crypt-key-id

              +

              Encryption key ID. This field is mandatory when the volume is an encrypted volume.

              +

              persistentVolumeReclaimPolicy

              +

              A reclaim policy is supported when the cluster version is equal to or later than 1.19.10 and the everest version is equal to or later than 1.2.9.

              +

              The options are as follows:

              +

              Delete:

              +
              • If everest.io/reclaim-policy is not specified, both the PV and SFS volume are deleted when a PVC is deleted.
              • If everest.io/reclaim-policy is set to retain-volume-only set, when a PVC is deleted, the PV is deleted but the file storage resources are retained.
              +

              Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.

              +

              If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

              +
              +
              +
              +

              Creating an OBS Volume

              Secure containers do not support OBS volumes.

              +

              A single user can create a maximum of 100 OBS buckets on the console. If you have a large number of CCE workloads and you want to mount an OBS bucket to every workload, you may easily run out of buckets. In this scenario, you are advised to use OBS through the OBS API or SDK and do not mount OBS buckets to the workload on the console.

              +
              +

              Using the CCE Console

              +
              1. Log in to the CCE console.
              2. Click the cluster name and access the cluster console. Choose Storage from the navigation pane, and click the PersistentVolumes (PVs) tab.
              3. Click Create Volume in the upper right corner. In the dialog box displayed, set the volume parameters.

                • Volume Type: Select OBS.
                • Select OBS resources.
                • PV Name: Enter a PV name.
                • Access Mode: ReadWriteMany
                • Reclaim Policy: Select Delete or Retain as required. For details, see PV Reclaim Policy.
                • Secret: You can customize the access key (AK/SK) for mounting an OBS volume. You can use the AK/SK to create a secret and mount the secret to the PV. For details, see Using a Custom AK/SK to Mount an OBS Volume.
                • Mount Options: mount options. For details about the options, see Setting Mount Options.
                +

              4. Click Create.
              +

              Using YAML

              +
              apiVersion: v1
              +kind: PersistentVolume
              +metadata:
              +  annotations:
              +    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
              +    everest.io/reclaim-policy: retain-volume-only         # (Optional) The PV is deleted while the underlying volume is retained.
              +  name: cce-obs-test
              +spec:
              +  accessModes:
              +  - ReadWriteMany                      # Access mode. The value must be ReadWriteMany for OBS.
              +  capacity:
              +    storage: 1Gi      # Storage capacity. This parameter is set only to meet the PV format requirements. It can be set to any value. The actual OBS space size is not limited by this value.
              +  csi:
              +    driver: obs.csi.everest.io        # Dependent storage driver for the mounting.
              +    fsType: obsfs                      # OBS file type.
              +    volumeHandle: cce-obs-bucket       # OBS bucket name.
              +    volumeAttributes:
              +      everest.io/obs-volume-type: STANDARD
              +      everest.io/region: eu-de
              +      
              +      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
              +    nodePublishSecretRef:
              +      name: test-user
              +      namespace: default
              +  persistentVolumeReclaimPolicy: Retain       # Reclaim policy.
              +  storageClassName: csi-obs                   # Storage class name. The value must be csi-obs for OBS.
              +  mountOptions: []                            # Mount options.
              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 4 Key parameters

              Parameter

              +

              Description

              +

              everest.io/reclaim-policy: retain-volume-only

              +

              This field is optional.

              +

              Currently, only retain-volume-only is supported.

              +

              This field is valid only when the everest version is 1.2.9 or later and the reclaim policy is Delete. If the reclaim policy is Delete and the current value is retain-volume-only, the associated PV is deleted while the underlying storage volume is retained, when a PVC is deleted.

              +

              fsType

              +

              File type. The value can be obsfs or s3fs. If the value is s3fs, an OBS bucket is created and mounted using s3fs. If the value is obsfs, an OBS parallel file system is created and mounted using obsfs. You are advised to set this field to obsfs.

              +

              volumeHandle

              +

              OBS bucket name.

              +

              everest.io/obs-volume-type

              +

              Storage class, including STANDARD (standard bucket) and WARM (infrequent access bucket).

              +

              everest.io/region

              +

              Region where the OBS bucket is deployed.

              +

              For details about the value of region, see Regions and Endpoints.

              +

              nodePublishSecretRef

              +

              Access key (AK/SK) used for mounting the object storage volume. You can use the AK/SK to create a secret and mount it to the PV. For details, see Using a Custom AK/SK to Mount an OBS Volume.

              +

              mountOptions

              +

              Mount options. For details, see OBS Volume Mount Options.

              +

              persistentVolumeReclaimPolicy

              +

              A reclaim policy is supported when the cluster version is equal to or later than 1.19.10 and the everest version is equal to or later than 1.2.9.

              +

              The Delete and Retain policies are supported.

              +

              Delete:

              +
              • If everest.io/reclaim-policy is not specified, both the PV and OBS volume are deleted when a PVC is deleted.
              • If everest.io/reclaim-policy is set to retain-volume-only set, when a PVC is deleted, the PV is deleted but the object storage resources are retained.
              +

              Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.

              +

              If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

              +
              +
              +
              +

              Creating an SFS Turbo Volume

              SFS Turbo and the cluster must be in the same VPC.

              +
              +

              Using the CCE Console

              +
              1. Log in to the CCE console.
              2. Click the cluster name and access the cluster console. Choose Storage from the navigation pane, and click the PersistentVolumes (PVs) tab.
              3. Click Create Volume in the upper right corner. In the dialog box displayed, set the volume parameters.

                • Volume Type: Select SFS Turbo.
                • SFS Turbo: Select SFS Turbo resources.
                • PV Name: Enter a PV name.
                • Access Mode: ReadWriteMany
                • Reclaim Policy: Select Retain. For details, see PV Reclaim Policy.
                • Mount Options: mount options. For details about the options, see Setting Mount Options.
                +

              4. Click Create.
              +

              Using YAML

              +
              apiVersion: v1
              +kind: PersistentVolume
              +metadata:
              +  annotations:
              +    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
              +  name: cce-sfsturbo-test
              +spec:
              +  accessModes:
              +    - ReadWriteMany       # Access mode. The value must be ReadWriteMany for SFS Turbo.
              +  capacity:
              +    storage: 100.00Gi     # SFS Turbo volume capacity.
              +  csi:
              +    driver: sfsturbo.csi.everest.io    # Dependent storage driver for the mounting.
              +    fsType: nfs
              +    volumeHandle: 6674bd0a-d760-49de-bb9e-805c7883f047      # SFS Turbo volume ID.
              +    volumeAttributes:
              +      everest.io/share-export-location: 192.168.0.85:/      # Shared path of the SFS Turbo volume.
              +      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
              +  persistentVolumeReclaimPolicy: Retain     # Reclaim policy.
              +  storageClassName: csi-sfsturbo            # Storage class name. The value must be csi-sfsturbo for SFS Turbo.
              +  mountOptions: []                          # Mount options.
              + +
              + + + + + + + + + + + + + + + + +
              Table 5 Key parameters

              Parameter

              +

              Description

              +

              volumeHandle

              +

              SFS Turbo volume ID.

              +

              You can obtain the ID on the SFS Turbo storage instance details page on the SFS console.

              +

              everest.io/share-export-location

              +

              Shared path of the SFS Turbo volume.

              +

              mountOptions

              +

              Mount options.

              +

              If not specified, the following configurations are used by default. For details, see SFS Volume Mount Options.

              +
              mountOptions:
              +- vers=3
              +- timeo=600
              +- nolock
              +- hard
              +

              persistentVolumeReclaimPolicy

              +

              A reclaim policy is supported when the cluster version is equal to or later than 1.19.10 and the everest version is equal to or later than 1.2.9.

              +

              The Delete and Retain policies are supported.

              +

              Delete:

              +
              • If everest.io/reclaim-policy is not specified, both the PV and SFS Turbo volume are deleted when a PVC is deleted.
              • If everest.io/reclaim-policy is set to retain-volume-only set, when a PVC is deleted, the PV is deleted but the SFF Turbo resources are retained.
              +

              Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.

              +

              If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

              +
              +
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0380.html b/docs/cce/umn/cce_10_0380.html new file mode 100644 index 00000000..857d6b61 --- /dev/null +++ b/docs/cce/umn/cce_10_0380.html @@ -0,0 +1,211 @@ + + +

              StorageClass

              +

              StorageClass describes the storage class used in the cluster. You need to specify StorageClass when creating a PVC or PV. As of now, CCE provides storage classes such as csi-disk, csi-nas, and csi-obs by default. When defining a PVC, you can use a StorageClassName to automatically create a PV of the corresponding type and automatically create underlying storage resources.

              +

              You can run the following command to query the storage classes that CCE supports. You can use the CSI plug-in provided by CCE to customize a storage class, which functions similarly as the default storage classes in CCE.

              +
              # kubectl get sc
              +NAME                PROVISIONER                     AGE
              +csi-disk            everest-csi-provisioner         17d          # Storage class for EVS disks
              +csi-nas             everest-csi-provisioner         17d          # Storage class for SFS 1.0 file systems
              +csi-obs             everest-csi-provisioner         17d          # Storage class for OBS buckets
              +

              After a StorageClass is set, PVs can be automatically created and maintained. You only need to specify the StorageClass when creating a PVC, which greatly reduces the workload.

              +

              In addition to the predefined storage classes provided by CCE, you can also customize storage classes. The following sections describe the application status, solutions, and methods of customizing storage classes.

              +

              Challenges

              When using storage resources in CCE, the most common method is to specify storageClassName to define the type of storage resources to be created when creating a PVC. The following configuration shows how to use a PVC to apply for an SAS (high I/O) EVS disk (block storage).

              +
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  name: pvc-evs-example
              +  namespace: default
              +  annotations:
              +    everest.io/disk-volume-type: SAS
              +spec:
              +  accessModes:
              +  - ReadWriteOnce
              +  resources:
              +    requests:
              +      storage: 10Gi
              +  storageClassName: csi-disk
              +

              If you need to specify the EVS disk type, you can set the everest.io/disk-volume-type field. The value SAS is used as an example here, indicating the high I/O EVS disk type. Or you can choose SATA (common I/O) and SSD (ultra-high I/O).

              +

              This configuration method may not work if you want to:

              +
              • Set storageClassName only, which is simpler than specifying the EVS disk type by using everest.io/disk-volume-type.
              • Avoid modifying YAML files or Helm charts. Some users switch from self-built or other Kubernetes services to CCE and have written YAML files of many applications. In these YAML files, different types of storage resources are specified by different StorageClassNames. When using CCE, they need to modify a large number of YAML files or Helm charts to use storage resources, which is labor-consuming and error-prone.
              • Set the default storageClassName for all applications to use the default storage class. In this way, you can create storage resources of the default type without needing to specify storageClassName in the YAML file.
              +
              +

              Solution

              This section describes how to set a custom storage class in CCE and how to set the default storage class. You can specify different types of storage resources by setting storageClassName.

              +
              • For the first scenario, you can define custom storageClassNames for SAS and SSD EVS disks. For example, define a storage class named csi-disk-sas for creating SAS disks. The following figure shows the differences before and after you use a custom storage class.

                +
              • For the second scenario, you can define a storage class with the same name as that in the existing YAML file without needing to modify storageClassName in the YAML file.
              • For the third scenario, you can set the default storage class as described below to create storage resources without specifying storageClassName in YAML files.
                apiVersion: v1
                +kind: PersistentVolumeClaim
                +metadata:
                +  name: pvc-evs-example
                +  namespace: default
                +spec:
                +  accessModes:
                +  - ReadWriteOnce
                +  resources:
                +    requests:
                +      storage: 10Gi
                +
              +
              +

              Custom Storage Classes

              You can customize a high I/O storage class in a YAML file. For example, the name csi-disk-sas indicates that the disk type is SAS (high I/O).

              +
              apiVersion: storage.k8s.io/v1
              +kind: StorageClass
              +metadata:
              +  name: csi-disk-sas                          # Name of the high I/O storage class, which can be customized.
              +parameters:
              +  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
              +  csi.storage.k8s.io/fstype: ext4
              +  everest.io/disk-volume-type: SAS            # High I/O EVS disk type, which cannot be customized.
              +  everest.io/passthrough: "true"
              +provisioner: everest-csi-provisioner
              +reclaimPolicy: Delete
              +volumeBindingMode: Immediate
              +allowVolumeExpansion: true                    # true indicates that capacity expansion is allowed.
              +

              For an ultra-high I/O storage class, you can set the class name to csi-disk-ssd to create SSD EVS disk (ultra-high I/O).

              +
              apiVersion: storage.k8s.io/v1
              +kind: StorageClass
              +metadata:
              +  name: csi-disk-ssd                       # Name of the ultra-high I/O storage class, which can be customized.
              +parameters:
              +  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
              +  csi.storage.k8s.io/fstype: ext4
              +  everest.io/disk-volume-type: SSD         # Ultra-high I/O EVS disk type, which cannot be customized.
              +  everest.io/passthrough: "true"
              +provisioner: everest-csi-provisioner
              +reclaimPolicy: Delete
              +volumeBindingMode: Immediate
              +allowVolumeExpansion: true
              +

              reclaimPolicy: indicates the recycling policies of the underlying cloud storage. The value can be Delete or Retain.

              +
              • Delete: When a PVC is deleted, both the PV and the EVS disk are deleted.
              • Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.
              +

              The reclamation policy set here has no impact on the SFS Turbo storage. Therefore, the yearly/monthly SFS Turbo resources will not be reclaimed when the cluster or PVC is deleted.

              +
              +

              If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

              +

              After the definition is complete, run the kubectl create commands to create storage resources.

              +
              # kubectl create -f sas.yaml
              +storageclass.storage.k8s.io/csi-disk-sas created
              +# kubectl create -f ssd.yaml
              +storageclass.storage.k8s.io/csi-disk-ssd created
              +

              Query the storage class again. Two more types of storage classes are displayed in the command output, as shown below.

              +
              # kubectl get sc
              +NAME                PROVISIONER                     AGE
              +csi-disk            everest-csi-provisioner         17d
              +csi-disk-sas        everest-csi-provisioner         2m28s
              +csi-disk-ssd        everest-csi-provisioner         16s
              +csi-disk-topology   everest-csi-provisioner         17d
              +csi-nas             everest-csi-provisioner         17d
              +csi-obs             everest-csi-provisioner         17d
              +csi-sfsturbo        everest-csi-provisioner         17d
              +

              Other types of storage resources can be defined in the similar way. You can use kubectl to obtain the YAML file and modify it as required.

              +
              • File storage
                # kubectl get sc csi-nas -oyaml
                +kind: StorageClass
                +apiVersion: storage.k8s.io/v1
                +metadata:
                +  name: csi-nas
                +provisioner: everest-csi-provisioner
                +parameters:
                +  csi.storage.k8s.io/csi-driver-name: nas.csi.everest.io
                +  csi.storage.k8s.io/fstype: nfs
                +  everest.io/share-access-level: rw
                +  everest.io/share-access-to: 5e3864c6-e78d-4d00-b6fd-de09d432c632   # ID of the VPC to which the cluster belongs
                +  everest.io/share-is-public: 'false'
                +  everest.io/zone: xxxxx          # AZ
                +reclaimPolicy: Delete
                +allowVolumeExpansion: true
                +volumeBindingMode: Immediate
                +
              • Object storage
                # kubectl get sc csi-obs -oyaml
                +kind: StorageClass
                +apiVersion: storage.k8s.io/v1
                +metadata:
                +  name: csi-obs
                +provisioner: everest-csi-provisioner
                +parameters:
                +  csi.storage.k8s.io/csi-driver-name: obs.csi.everest.io
                +  csi.storage.k8s.io/fstype: s3fs           # Object storage type. s3fs indicates an object bucket, and obsfs indicates a parallel file system.
                +  everest.io/obs-volume-type: STANDARD      # Storage class of the OBS bucket
                +reclaimPolicy: Delete
                +volumeBindingMode: Immediate
                +
              +
              +

              Setting a Default Storage Class

              You can specify a storage class as the default class. In this way, if you do not specify storageClassName when creating a PVC, the PVC is created using the default storage class.

              +

              For example, to specify csi-disk-ssd as the default storage class, edit your YAML file as follows:

              +
              apiVersion: storage.k8s.io/v1
              +kind: StorageClass
              +metadata:
              +  name: csi-disk-ssd
              +  annotations:
              +    storageclass.kubernetes.io/is-default-class: "true"   # Specifies the default storage class in a cluster. A cluster can have only one default storage class.
              +parameters:
              +  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
              +  csi.storage.k8s.io/fstype: ext4
              +  everest.io/disk-volume-type: SSD
              +  everest.io/passthrough: "true"
              +provisioner: everest-csi-provisioner
              +reclaimPolicy: Delete
              +volumeBindingMode: Immediate
              +allowVolumeExpansion: true
              +

              Delete the created csi-disk-ssd disk, run the kubectl create command to create a csi-disk-ssd disk again, and then query the storage class. The following information is displayed.

              +
              # kubectl delete sc csi-disk-ssd
              +storageclass.storage.k8s.io "csi-disk-ssd" deleted
              +# kubectl create -f ssd.yaml
              +storageclass.storage.k8s.io/csi-disk-ssd created
              +# kubectl get sc
              +NAME                     PROVISIONER                     AGE
              +csi-disk                 everest-csi-provisioner         17d
              +csi-disk-sas             everest-csi-provisioner         114m
              +csi-disk-ssd (default)   everest-csi-provisioner         9s
              +csi-disk-topology        everest-csi-provisioner         17d
              +csi-nas                  everest-csi-provisioner         17d
              +csi-obs                  everest-csi-provisioner         17d
              +csi-sfsturbo             everest-csi-provisioner         17d
              +
              +

              Verification

              • Use csi-disk-sas to create a PVC.
                apiVersion: v1
                +kind: PersistentVolumeClaim
                +metadata:
                +  name:  sas-disk
                +spec:
                +  accessModes:
                +  - ReadWriteOnce
                +  resources:
                +    requests:
                +      storage: 10Gi
                +  storageClassName: csi-disk-sas
                +

                Create a storage class and view its details. As shown below, the object can be created and the value of STORAGECLASS is csi-disk-sas.

                +
                # kubectl create -f sas-disk.yaml 
                +persistentvolumeclaim/sas-disk created
                +# kubectl get pvc
                +NAME       STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
                +sas-disk   Bound    pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            csi-disk-sas   24s
                +# kubectl get pv
                +NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                     STORAGECLASS   REASON   AGE
                +pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            Delete           Bound       default/sas-disk          csi-disk-sas            30s
                +

                View the PVC details on the CCE console. On the PV details page, you can see that the disk type is high I/O.

                +

                +
              • If storageClassName is not specified, the default configuration is used, as shown below.
                apiVersion: v1
                +kind: PersistentVolumeClaim
                +metadata:
                +  name:  ssd-disk
                +spec:
                +  accessModes:
                +  - ReadWriteOnce
                +  resources:
                +    requests:
                +      storage: 10Gi
                +

                Create and view the storage resource. You can see that the storage class of PVC ssd-disk is csi-disk-ssd, indicating that csi-disk-ssd is used by default.

                +
                # kubectl create -f ssd-disk.yaml 
                +persistentvolumeclaim/ssd-disk created
                +# kubectl get pvc
                +NAME       STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
                +sas-disk   Bound    pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            csi-disk-sas   16m
                +ssd-disk   Bound    pvc-4d2b059c-0d6c-44af-9994-f74d01c78731   10Gi       RWO            csi-disk-ssd   10s
                +# kubectl get pv
                +NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                     STORAGECLASS   REASON   AGE
                +pvc-4d2b059c-0d6c-44af-9994-f74d01c78731   10Gi       RWO            Delete           Bound       default/ssd-disk          csi-disk-ssd            15s
                +pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            Delete           Bound       default/sas-disk          csi-disk-sas            17m
                +

                View the PVC details on the CCE console. On the PV details page, you can see that the disk type is ultra-high I/O.

                +

                +
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0381.html b/docs/cce/umn/cce_10_0381.html new file mode 100644 index 00000000..fda481b9 --- /dev/null +++ b/docs/cce/umn/cce_10_0381.html @@ -0,0 +1,66 @@ + + +

              Snapshots and Backups

              +

              CCE works with EVS to support snapshots. A snapshot is a complete copy or image of EVS disk data at a certain point of time, which can be used for data DR.

              +

              You can create snapshots to rapidly save the disk data at specified time points. In addition, you can use snapshots to create new disks so that the created disks will contain the snapshot data in the beginning.

              +

              Precautions

              • The snapshot function is available only for clusters of v1.15 or later and requires the CSI-based everest add-on.
              • The subtype (common I/O, high I/O, or ultra-high I/O), disk mode (SCSI or VBD), data encryption, sharing status, and capacity of an EVS disk created from a snapshot must be the same as those of the disk associated with the snapshot. These attributes cannot be modified after being queried or set.
              • Snapshots can be created only for available or in-use CSI disks. During the free trial, you can create up to 7 snapshots per disk.
              • Snapshot data of encrypted disks is stored encrypted, and that of non-encrypted disks is stored non-encrypted.
              +
              +

              Application Scenario

              The snapshot feature helps address your following needs:

              +
              • Routine data backup

                You can create snapshots for EVS disks regularly and use snapshots to recover your data in case that data loss or data inconsistency occurred due to misoperations, viruses, or attacks.

                +
              • Rapid data restoration

                You can create a snapshot or multiple snapshots before an OS change, application software upgrade, or a service data migration. If an exception occurs during the upgrade or migration, service data can be rapidly restored to the time point when the snapshot was created.

                +
                For example, a fault occurred on system disk A of ECS A, and therefore ECS A cannot be started. Because system disk A is already faulty, the data on system disk A cannot be restored by rolling back snapshots. In this case, you can use an existing snapshot of system disk A to create EVS disk B and attach it to ECS B that is running properly. Then, ECS B can read data from system disk A using EVS disk B.

                The snapshot capability provided by CCE is the same as the CSI snapshot function provided by the Kubernetes community. EVS disks can be created only based on snapshots, and snapshots cannot be rolled back to source EVS disks.

                +
                +
                +
              • Rapid deployment of multiple services

                You can use a snapshot to create multiple EVS disks containing the same initial data, and these disks can be used as data resources for various services, for example, data mining, report query, and development and testing. This method protects the initial data and creates disks rapidly, meeting the diversified service data requirements.

                +
              +
              +

              Creating a Snapshot

              Using the CCE Console

              +
              1. Log in to the CCE console.
              2. Click the cluster name and go to the cluster console. Choose Storage from the navigation pane, and click the Snapshots and Backups tab.
              3. Click Create Snapshot in the upper right corner. In the dialog box displayed, set related parameters.

                • Snapshot Name: Enter a snapshot name.
                • Storage: Select a PVC. Only EVS PVCs can create a snapshot.
                +

              4. Click Create.
              +

              Creating from YAML

              +
              kind: VolumeSnapshot
              +apiVersion: snapshot.storage.k8s.io/v1beta1
              +metadata:
              +  finalizers:
              +    - snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
              +    - snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
              +  name: cce-disksnap-test
              +  namespace: default
              +spec:
              +  source:
              +    persistentVolumeClaimName: pvc-evs-test     # PVC name. Only an EVS PVC can be created.
              +  volumeSnapshotClassName: csi-disk-snapclass
              +
              +

              Using a Snapshot to Creating a PVC

              The disk type, encryption setting, and disk mode of the created EVS PVC are consistent with those of the snapshot's source EVS disk.

              +

              Using the CCE Console

              +
              1. Log in to the CCE console.
              2. Click the cluster name and go to the cluster console. Choose Storage from the navigation pane, and click the Snapshots and Backups tab.
              3. Locate the snapshot for which you want to create a PVC, click Create PVC, and specify the PVC name in the displayed dialog box.
              4. Click Create.
              +

              Creating from YAML

              +
              apiVersion: v1
              +kind: PersistentVolumeClaim
              +metadata:
              +  name: pvc-test
              +  namespace: default
              +  annotations:
              +    everest.io/disk-volume-type: SSD     # EVS disk type, which must be the same as that of the source EVS disk of the snapshot.
              +  labels:
              +    failure-domain.beta.kubernetes.io/region: eu-de
              +    failure-domain.beta.kubernetes.io/zone: 
              +spec:
              +  accessModes:
              +  - ReadWriteOnce
              +  resources:
              +    requests:
              +      storage: '10'
              +  storageClassName: csi-disk
              +  dataSource:
              +    name: cce-disksnap-test             # Snapshot name
              +    kind: VolumeSnapshot
              +    apiGroup: snapshot.storage.k8s.io
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0384.html b/docs/cce/umn/cce_10_0384.html new file mode 100644 index 00000000..cacb3a4c --- /dev/null +++ b/docs/cce/umn/cce_10_0384.html @@ -0,0 +1,760 @@ + + +

              Hybrid Deployment of Online and Offline Jobs

              +
              +
              +

              Online and Offline Jobs

              +

              Jobs can be classified into online jobs and offline jobs based on whether + services are always online.

              +
                +
              • Online job: Such jobs run + for a long time, with regular traffic surges, tidal resource requests, and high requirements on SLA, such as + advertising and e-commerce services.
              • +
              • Offline jobs: Such jobs run + for a short time, have high computing requirements, and can tolerate high latency, such as AI and big data + services.
              • +
              +
              +
              +

              Resource Oversubscription and Hybrid Deployment

              +

              Many services see surges in traffic. To ensure performance and stability, + resources are often requested at the maximum needed. However, the surges may ebb very shortly and resources, if + not released, are wasted in non-peak hours. Especially for online jobs that request a large quantity of resources + to ensure SLA, resource utilization can be as low as it gets.

              +

              Resource oversubscription is the process of making use of idle requested + resources. Oversubscribed resources are suitable for deploying offline jobs, which focus on throughput but have + low SLA requirements and can tolerate certain failures.

              +

              Hybrid deployment of online and offline jobs in a cluster can better utilize + cluster resources.

              +
              Figure 1 Resource + oversubscription
              +
              +
              +

              Oversubscription for Hybrid Deployment

              +

              Hybrid deployment is supported, and CPU and memory resources can be + oversubscribed. The key features are as follows:

              +
                +
              • Offline jobs preferentially run on oversubscribed nodes.

                If + both oversubscribed and non-oversubscribed nodes exist, the former will score higher than the latter and + offline jobs are preferentially scheduled to oversubscribed nodes.

                +
              • +
              +
                +
              • Online jobs can use only non-oversubscribed resources if scheduled to an + oversubscribed node.

                Offline jobs can use both oversubscribed and non-oversubscribed resources of an + oversubscribed node.

                +
              • +
              +
                +
              • In the same scheduling period, online jobs take precedence over offline + jobs.

                If both online and offline jobs exist, online jobs are scheduled first. When the + node resource usage exceeds the upper limit and the node requests exceed 100%, offline jobs will be evicted. +

                +
              • +
              • CPU/memory isolation is provided by kernels.

                CPU + isolation: Online jobs can quickly preempt CPU resources of offline jobs and suppress the CPU usage of the + offline jobs.

                +

                Memory isolation: When system memory resources are used up and OOM Kill is + triggered, the kernel evicts offline jobs first.

                +
              • +
              • kubelet offline jobs admission rules:

                After the the pod is scheduled to a + node, kubelet starts the pod only when the node resources can meet the pod request + (predicateAdmitHandler.Admit). kubelet starts the pod when both of the following conditions are met:

                +
                  +
                • The total request of pods to be started and online running jobs < + allocatable nodes
                • +
                • The total request of pods to be started and online/offline running job + < allocatable nodes+oversubscribed nodes
                • +
                +
              • +
              • Resource oversubscription and hybrid deployment:

                If only hybrid deployment is used, you need to configure the label volcano.sh/colocation=true for the node and delete the node label + volcano.sh/oversubscription or set its value to false.

                +
                If the label volcano.sh/colocation=true is configured for a node, hybrid + deployment is enabled. If the label volcano.sh/oversubscription=true is configured, resource + oversubscription is enabled. The following table lists the available feature combinations after hybrid + deployment or resource oversubscription is enabled. +
                + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                +

                Hybrid Deployment Enabled (volcano.sh/colocation=true)

                +
                +

                Resource oversubscription Enabled (volcano.sh/oversubscription=true)

                +
                +

                Use Oversubscribed Resources?

                +
                +

                Conditions for Evicting Offline Pods

                +
                +

                No

                +
                +

                No

                +
                +

                No

                +
                +

                None

                +
                +

                Yes

                +
                +

                No

                +
                +

                No

                +
                +

                The node resource usage exceeds the high threshold.

                +
                +

                No

                +
                +

                Yes

                +
                +

                Yes

                +
                +

                The node resource usage exceeds the high threshold, and the node + request exceeds 100%.

                +
                +

                Yes

                +
                +

                Yes

                +
                +

                Yes

                +
                +

                The node resource usage exceeds the high threshold.

                +
                +
                +
                +
              • +
              +
              +
              +

              Notes and Constraints

              +
              Specifications +
                +
              • Kubernetes version:
                  +
                • 1.19: 1.19.16-r4 or later
                • +
                • 1.21: 1.21.7-r0 or later
                • +
                • 1.23: 1.23.5-r0 or later
                • +
                +
              • +
              • Cluster Type: CCE or CCE Turbo
              • +
              • Node OS: EulerOS 2.9 (kernel-4.18.0-147.5.1.6.h729.6.eulerosv2r9.x86_64)
              • +
              • Node Type: ECS
              • +
              • The volcano add-on version: 1.7.0 or later
              • +
              +
              +
              Constraints +
                +
              • Before enabling the volcano oversubscription plug-in, ensure that the + overcommit plug-in is not enabled.
              • +
              • Modifying the label of an oversubscribed node does not affect the running + pods.
              • +
              • Running pods cannot be converted between online and offline services. To + convert services, you need to rebuild pods.
              • +
              • If the label volcano.sh/oversubscription=true is configured for a node in the + cluster, the oversubscription configuration must be added to + the volcano add-on. Otherwise, the scheduling of oversubscribed nodes will be abnormal. Ensure that you have + correctly configure labels because the scheduler does not check the add-on and node configurations. For + details about the labels, see Configuring Oversubscription Labels + for Scheduling.
              • +
              • To disable oversubscription, perform the following operations:
                  +
                • Remove the volcano.sh/oversubscription label from the oversubscribed + node.
                • +
                • Set over-subscription-resource to false.
                • +
                • Modify the configmap of the volcano scheduler named volcano-scheduler-configmap and remove the oversubscription + add-on.
                • +
                +
              • +
              • If cpu-manager-policy is + set to static core binding on a node, do not assign the QoS class of Guaranteed to offline pods. If core + binding is required, change the pods to online pods. Otherwise, offline pods may occupy the CPUs of online + pods, causing online pod startup failures, and offline pods fail to be started although they are successfully + scheduled.
              • +
              • If cpu-manager-policy is + set to static core binding on a node, do not bind cores to all online pods. Otherwise, online pods occupy all + CPU or memory resources, leaving a small number of oversubscribed resources.
              • +
              +
              +
              +
              +

              Configuring Oversubscription Labels for Scheduling

              +

              If the label volcano.sh/oversubscription=true is configured for a node in the + cluster, the oversubscription configuration must be added to the + volcano add-on. Otherwise, the scheduling of oversubscribed nodes will be abnormal. For details about the related + configuration, see Table 1.

              +
              Ensure that you have correctly configure labels because the scheduler + does not check the add-on and node configurations. +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Configuring oversubscription labels for scheduling
              +

              Oversubscription in Add-on

              +
              +

              Oversubscription Label on Node

              +
              +

              Scheduling

              +
              +

              Yes

              +
              +

              Yes

              +
              +

              Triggered by oversubscription

              +
              +

              Yes

              +
              +

              No

              +
              +

              Triggered

              +
              +

              No

              +
              +

              No

              +
              +

              Triggered

              +
              +

              No

              +
              +

              Yes

              +
              +

              Not triggered or failed. Avoid this configuration.

              +
              +
              +
              +
              +
              +

              Using Hybrid Deployment

              +
                +
              1. Configure the volcano add-on. +

                +

                  +
                1. Use kubectl to connect to the cluster.
                2. +
                3. Install the volcano plug-in and add the oversubscription plug-in to volcano-scheduler-configmap. Ensure that the plug-in + configuration does not contain the overcommit plug-in. If + - name: overcommit exists, delete this configuration. +
                  # kubectl edit cm volcano-scheduler-configmap -n kube-system
                  +apiVersion: v1
                  +data:
                  +  volcano-scheduler.conf: |
                  +    actions: "enqueue, allocate, backfill"
                  +    tiers:
                  +    - plugins:
                  +      - name: gang
                  +      - name: priority
                  +      - name: conformance
                  +      - name: oversubscription
                  +    - plugins:
                  +      - name: drf
                  +      - name: predicates
                  +      - name: nodeorder
                  +      - name: binpack
                  +    - plugins:
                  +      - name: cce-gpu-topology-predicate
                  +      - name: cce-gpu-topology-priority
                  +      - name: cce-gpu
                  +
                4. +
                +

                +
              2. +
              3. Enable the node oversubscription feature. +

                +

                A label can be configured to use oversubscribed resources only after the + oversubscription feature is enabled for a node. Related nodes can be created only in a node pool. To enable + the oversubscription feature, perform the following steps:

                +
                  +
                1. Create a node pool.
                2. +
                3. Choose More > Manage in the Operation column of the created node pool.
                4. +
                5. In the Manage + Component window that is displayed, set over-subscription-resource under kubelet to true + and click OK.
                6. +
                +

                +

                +
              4. +
              5. Set the node oversubscription label. +

                +

                The volcano.sh/oversubscription label needs to be configured for an + oversubscribed node. If this label is set for a node and the value is true, the node is an oversubscribed node. Otherwise, the node is not + an oversubscribed node.

                +
                kubectl label node 192.168.0.0 volcano.sh/oversubscription=true
                +

                An oversubscribed node also supports the oversubscription thresholds, as + listed in Table 2. For example:

                +
                kubectl annotate node 192.168.0.0 volcano.sh/evicting-cpu-high-watermark=70
                +

                Querying the node information

                +
                # kubectl describe node 192.168.0.0
                +Name:             192.168.0.0
                +Roles:              <none>
                +Labels:           ...
                +                  volcano.sh/oversubscription=true
                +Annotations:      ...
                +                  volcano.sh/evicting-cpu-high-watermark: 70
                + +
                + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                Table 2 Node oversubscription annotations
                +

                Name

                +
                +

                Description

                +
                +

                volcano.sh/evicting-cpu-high-watermark

                +
                +

                When the CPU usage of a node exceeds the specified value, offline + job eviction is triggered and the node becomes unschedulable.

                +

                The default value is 80, indicating that offline job eviction is triggered when + the CPU usage of a node exceeds 80%.

                +
                +

                volcano.sh/evicting-cpu-low-watermark

                +
                +

                After eviction is triggered, the scheduling starts again when the + CPU usage of a node is lower than the specified value.

                +

                The default value is 30, indicating that scheduling starts again when the CPU + usage of a node is lower than 30%.

                +
                +

                volcano.sh/evicting-memory-high-watermark

                +
                +

                When the memory usage of a node exceeds the specified value, offline + job eviction is triggered and the node becomes unschedulable.

                +

                The default value is 60, indicating that offline job eviction is triggered + when the memory usage of a node exceeds 60%.

                +
                +

                volcano.sh/evicting-memory-low-watermark

                +
                +

                After eviction is triggered, the scheduling starts again when the + memory usage of a node is lower than the specified value.

                +

                The default value is 30, indicating that the scheduling starts again when + the memory usage of a node is less than 30%.

                +
                +

                volcano.sh/oversubscription-types

                +
                +

                Oversubscribed resource type. The options are as follows:

                +
                  +
                • CPU (oversubscribed CPU)
                • +
                • memory (oversubscribed memory)
                • +
                • cpu,memory (oversubscribed CPU and memory)
                • +
                +

                The default value is cpu,memory.

                +
                +
                +

                +
              6. +
              7. Deploy online and offline jobs. +

                +

                The volcano.sh/qos-level + label needs to be added to annotation to distinguish offline jobs. The value is an integer ranging from -7 to + 7. If the value is less than 0, the job is an offline job. If the value is greater than or equal to 0, the job + is a high-priority job, that is, online job. You do not need to set this label for online jobs. For both + online and offline jobs, set schedulerName to volcano to enable the Volcano scheduler.

                +
                +
                +

                The priorities of online/online and offline/offline jobs are not + differentiated, and the value validity is not verified. If the value of volcano.sh/qos-level of an offline job is not a negative + integer ranging from -7 to 0, the job is processed as an online job.

                +
                +
                +

                For an offline job:

                +
                kind: Deployment
                +apiVersion: apps/v1
                +spec:
                +  replicas: 4
                +  template:
                +    metadata:
                +      annotations:
                +        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
                +        volcano.sh/qos-level: "-1" # Offline job label
                +    spec:
                +      schedulerName: volcano             # The Volcano scheduler is used.
                +      ...
                +

                For an online job:

                +
                kind: Deployment
                +apiVersion: apps/v1
                +spec:
                +  replicas: 4
                +  template:
                +    metadata:
                +      annotations:
                +        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
                +    spec:
                +      schedulerName: volcano          # The Volcano scheduler is used.
                +      ...
                +

                +
              8. +
              9. Run the following command to check the number of oversubscribed + resources and the resource usage: +

                +

                kubectl describe node <nodeIP> +

                +
                # kubectl describe node 192.168.0.0
                +Name:             192.168.0.0
                +Roles:              <none>
                +Labels:           ...
                +                  volcano.sh/oversubscription=true
                +Annotations:      ...
                +                  volcano.sh/oversubscription-cpu: 2335
                +                  volcano.sh/oversubscription-memory: 341753856
                +Allocatable:
                +  cpu:               3920m
                +  memory:            6263988Ki
                +Allocated resources:
                +  (Total limits may be over 100 percent, i.e., overcommitted.)
                +  Resource           Requests      Limits
                +  --------           --------      ------
                +  cpu                 4950m (126%)  4950m (126%)
                +  memory             1712Mi (27%)  1712Mi (27%)
                +

                +
              10. +
              +
              +
              +

              Hybrid Deployment Example

              +

              The following uses an example to describe how to deploy online and offline jobs + in hybrid mode.

              +
                +
              1. Assume that a cluster has two nodes: one oversubscribed node and one + non-oversubscribed node. +

                +

                # kubectl get node
                +NAME           STATUS   ROLES    AGE    VERSION
                +192.168.0.173   Ready    <none>   4h58m   v1.19.16-r2-CCE22.5.1
                +192.168.0.3     Ready    <none>   148m    v1.19.16-r2-CCE22.5.1
                +
                  +
                • 192.168.0.173 is an oversubscribed node (with the volcano.sh/oversubscirption=true label).
                • +
                • 192.168.0.3 is a non-oversubscribed node (without the volcano.sh/oversubscirption=true label).
                • +
                +
                # kubectl describe node 192.168.0.173
                +Name:               192.168.0.173
                +Roles:              <none>
                +Labels:             beta.kubernetes.io/arch=amd64
                +                    ...
                +                    volcano.sh/oversubscription=true
                +

                +
              2. +
              3. Submit offline job creation requests. If resources are sufficient, all + offline jobs will be scheduled to the oversubscribed node. +

                +

                The offline job template is as follows: +
                apiVersion: apps/v1
                +kind: Deployment
                +metadata:
                +  name: offline
                +  namespace: default
                +spec:
                +  replicas: 2
                +  selector:
                +    matchLabels:
                +      app: offline
                +  template:
                +    metadata:
                +      labels:
                +        app: offline
                +      annotations:
                +        volcano.sh/qos-level: "-1"       #Offline job label
                +    spec:
                +      schedulerName: volcano             # The Volcano scheduler is used.
                +      containers:
                +        - name: container-1
                +          image: nginx:latest
                +          imagePullPolicy: IfNotPresent
                +          resources:
                +            requests:
                +              cpu: 500m
                +              memory: 512Mi
                +            limits:
                +              cpu: "1"
                +              memory: 512Mi
                +      imagePullSecrets:
                +        - name: default-secret
                +
                +
                Offline jobs are scheduled to the oversubscribed node. +
                # kubectl get pod -o wide
                +NAME                      READY   STATUS   RESTARTS  AGE     IP             NODE 
                +offline-69cdd49bf4-pmjp8   1/1    Running   0         5s    192.168.10.178   192.168.0.173
                +offline-69cdd49bf4-z8kxh   1/1    Running   0         5s    192.168.10.131   192.168.0.173
                +
                +

                +
              4. +
              5. Submit online job creation requests. If resources are sufficient, the + online jobs will be scheduled to the non-oversubscribed node. +

                +

                The online job template is as follows: +
                apiVersion: apps/v1
                +kind: Deployment
                +metadata:
                +  name: online
                +  namespace: default
                +spec:
                +  replicas: 2
                +  selector:
                +    matchLabels:
                +      app: online
                +  template:
                +    metadata:
                +      labels:
                +        app: online
                +    spec:
                +      schedulerName: volcano                 # The Volcano scheduler is used.
                +      containers:
                +        - name: container-1
                +          image: resource_consumer:latest
                +          imagePullPolicy: IfNotPresent
                +          resources:
                +            requests:
                +              cpu: 1400m
                +              memory: 512Mi
                +            limits:
                +              cpu: "2"
                +              memory: 512Mi
                +      imagePullSecrets:
                +        - name: default-secret
                +
                +
                Online jobs are scheduled to the non-oversubscribed node. +
                # kubectl get pod -o wide
                +NAME                   READY   STATUS   RESTARTS  AGE     IP             NODE 
                +online-ffb46f656-4mwr6  1/1    Running   0         5s    192.168.10.146   192.168.0.3
                +online-ffb46f656-dqdv2   1/1    Running   0         5s    192.168.10.67   192.168.0.3
                +
                +

                +
              6. +
              7. Improve the resource usage of the oversubscribed node and observe whether + offline job eviction is triggered. +

                +

                Deploy online jobs to the oversubscribed node (192.168.0.173). +
                apiVersion: apps/v1
                +kind: Deployment
                +metadata:
                +  name: online
                +  namespace: default
                +spec:
                +  replicas: 2
                +  selector:
                +    matchLabels:
                +      app: online
                +  template:
                +    metadata:
                +      labels:
                +        app: online
                +    spec:
                +       affinity:                              # Submit an online job to an oversubscribed node.
                +        nodeAffinity:
                +          requiredDuringSchedulingIgnoredDuringExecution:
                +            nodeSelectorTerms:
                +            - matchExpressions:
                +              - key: kubernetes.io/hostname
                +                operator: In
                +                values:
                +                - 192.168.0.173
                +      schedulerName: volcano                 # The Volcano scheduler is used.
                +      containers:
                +        - name: container-1
                +          image: resource_consumer:latest
                +          imagePullPolicy: IfNotPresent
                +          resources:
                +            requests:
                +              cpu: 700m
                +              memory: 512Mi
                +            limits:
                +              cpu: 700m
                +              memory: 512Mi
                +      imagePullSecrets:
                +        - name: default-secret
                +
                +
                Submit the online or offline jobs to the oversubscribed node + (192.168.0.173) at the same time. +
                # kubectl get pod -o wide
                +NAME                     READY   STATUS   RESTARTS  AGE     IP             NODE 
                +offline-69cdd49bf4-pmjp8  1/1     Running    0      13m   192.168.10.178   192.168.0.173 
                +offline-69cdd49bf4-z8kxh  1/1     Running     0      13m   192.168.10.131   192.168.0.173 
                +online-6f44bb68bd-b8z9p  1/1     Running     0     3m4s   192.168.10.18   192.168.0.173 
                +online-6f44bb68bd-g6xk8  1/1     Running     0     3m12s   192.168.10.69   192.168.0.173
                +
                +
                Observe the oversubscribed node (192.168.0.173). You can find + that oversubscribed resources exist and the CPU allocation rate exceeds 100%. +
                # kubectl describe node 192.168.0.173
                +Name:              192.168.0.173
                +Roles:              <none>
                +Labels:              …
                +                    volcano.sh/oversubscription=true
                +Annotations:         …                  
                +                    volcano.sh/oversubscription-cpu: 2343
                +                    volcano.sh/oversubscription-memory: 3073653200
                +                    …
                +Allocated resources:
                +  (Total limits may be over 100 percent, i.e., overcommitted.)
                +  Resource               Requests      Limits
                +  --------               --------        ------
                +  cpu                    4750m (121%)  7350m (187%)
                +  memory                 3760Mi (61%)  4660Mi (76%)
                +                         …
                +
                +
                Increase the CPU usage of online jobs on the node. Offline job + eviction is triggered. +
                # kubectl get pod -o wide
                +NAME                     READY   STATUS   RESTARTS  AGE     IP             NODE 
                +offline-69cdd49bf4-bwdm7  1/1    Running   0       11m   192.168.10.208  192.168.0.3 
                +offline-69cdd49bf4-pmjp8   0/1    Evicted    0       26m   <none>         192.168.0.173
                +offline-69cdd49bf4-qpdss   1/1     Running   0       11m   192.168.10.174  192.168.0.3  
                +offline-69cdd49bf4-z8kxh   0/1     Evicted    0       26m   <none>        192.168.0.173
                +online-6f44bb68bd-b8z9p   1/1     Running   0       24m   192.168.10.18  192.168.0.173
                +online-6f44bb68bd-g6xk8   1/1     Running   0       24m   192.168.10.69  192.168.0.173
                +
                +

                +
              8. +
              +
              +
              +

              Handling Suggestions

              +
                +
              • After kubelet of the oversubscribed node is restarted, the resource view of + the Volcano scheduler is not synchronized with that of kubelet. As a result, OutOfCPU occurs in some newly + scheduled jobs, which is normal. After a period of time, the Volcano scheduler can properly schedule online and + offline jobs.
              • +
              • After online and offline jobs are submitted, you are not advised to + dynamically change the job type (adding or deleting annotation volcano.sh/qos-level: "-1") because the current + kernel does not support the change of an offline job to an online job.
              • +
              • CCE collects the resource usage (CPU/memory) of all pods running on a node + based on the status information in the cgroups system. The resource usage may be different from the monitored + resource usage, for example, the resource statistics displayed by running the top command.
              • +
              • You can add oversubscribed resources (such as CPU and memory) at any time.

                You + can reduce the oversubscribed resource types only when the resource allocation rate does not exceed 100%.

                +
              • +
              +
              +
              +
              + +
              \ No newline at end of file diff --git a/docs/cce/umn/cce_10_0385.html b/docs/cce/umn/cce_10_0385.html new file mode 100644 index 00000000..5754de66 --- /dev/null +++ b/docs/cce/umn/cce_10_0385.html @@ -0,0 +1,376 @@ + + +

              Service Annotations

              +

              CCE allows you to add annotations to a YAML file to realize some advanced Service functions. The following table describes the annotations you can add.

              +

              The annotations of a Service are the parameters that need to be specified for connecting to a load balancer. For details about how to use the annotations, see Using kubectl to Create a Service (Automatically Creating a Load Balancer).

              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Service annotations

              Parameter

              +

              Type

              +

              Description

              +

              Default Value on the Console

              +

              Supported Cluster Version

              +

              kubernetes.io/elb.class

              +

              String

              +

              Select a proper load balancer type.

              +

              The value can be:

              +
              • union: shared load balancer
              • performance: dedicated load balancer, which can be used only in clusters of v1.17 and later.
              +

              performance

              +

              v1.9 or later

              +

              kubernetes.io/elb.id

              +

              String

              +

              ID of a load balancer. The value can contain 1 to 100 characters.

              +

              Mandatory when an existing load balancer is to be associated.

              +

              How to obtain:

              +

              On the management console, click Service List, and choose Networking > Elastic Load Balance. Click the name of the target load balancer. On the Summary tab page, find and copy the ID.

              +

              None

              +

              v1.9 or later

              +

              kubernetes.io/elb.subnet-id

              +

              String

              +

              ID of the subnet where the cluster is located. The value can contain 1 to 100 characters.

              +
              • Mandatory when a cluster of v1.11.7-r0 or earlier is to be automatically created.
              • Optional for clusters later than v1.11.7-r0.
              +

              None

              +

              Mandatory for versions earlier than v1.11.7-r0

              +

              Discarded in versions later than v1.11.7-r0

              +

              kubernetes.io/elb.autocreate

              +

              Table 2

              +

              Whether to automatically create a load balancer associated with the Service.

              +

              Example:

              +
              • If a public network load balancer will be automatically created, set this parameter to the following value:

                {"type":"public","bandwidth_name":"cce-bandwidth-1551163379627","bandwidth_chargemode":"bandwidth","bandwidth_size":5,"bandwidth_sharetype":"PER","eip_type":"5_bgp","name":"james"}

                +
              • If a private network load balancer will be automatically created, set this parameter to the following value:

                {"type":"inner","name":"A-location-d-test"}

                +
              +

              None

              +

              v1.9 or later

              +

              kubernetes.io/elb.lb-algorithm

              +

              String

              +

              Specifies the load balancing algorithm of the backend server group.

              +

              Value:

              +
              • ROUND_ROBIN: weighted round robin algorithm
              • LEAST_CONNECTIONS: weighted least connections algorithm
              • SOURCE_IP: source IP hash algorithm
              +

              When the value is SOURCE_IP, the weights of backend servers in the server group are invalid.

              +

              ROUND_ROBIN

              +

              v1.9 or later

              +

              kubernetes.io/elb.health-check-flag

              +

              String

              +

              Whether to enable the ELB health check.

              +
              • Enabling health check: Leave blank this parameter or set it to on.
              • Disabling health check: Set this parameter to off.
              +

              If this parameter is enabled, the kubernetes.io/elb.health-check-option field must also be specified at the same time.

              +

              off

              +

              v1.9 or later

              +

              kubernetes.io/elb.health-check-option

              +

              Table 3

              +

              ELB health check configuration items.

              +

              None

              +

              v1.9 or later

              +

              kubernetes.io/elb.session-affinity-mode

              +

              String

              +

              Listeners ensure session stickiness based on IP addresses. Requests from the same IP address will be forwarded to the same backend server.

              +
              • Disabling sticky session: Do not set this parameter.
              • Enabling sticky session: Set this parameter to SOURCE_IP, indicating that the sticky session is based on the source IP address.
              +

              None

              +

              v1.9 or later

              +

              kubernetes.io/elb.session-affinity-option

              +

              Table 4

              +

              Sticky session timeout.

              +

              None

              +

              v1.9 or later

              +

              kubernetes.io/hws-hostNetwork

              +

              Boolean

              +

              Whether the workload Services use the host network. Setting this parameter to true will enable the load balancer to forward requests to the host network.

              +

              The value is true or false.

              +

              The default value is false, indicating that the host network is not used.

              +

              None

              +

              v1.9 or later

              +
              +
              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 2 Data structure of the elb.autocreate field

              Parameter

              +

              Mandatory

              +

              Type

              +

              Description

              +

              name

              +

              No

              +

              String

              +

              Name of the load balancer that is automatically created.

              +

              Value range: 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

              +

              Default: cce-lb+service.UID

              +

              type

              +

              No

              +

              String

              +

              Network type of the load balancer.

              +
              • public: public network load balancer
              • inner: private network load balancer
              +

              Default: inner

              +

              bandwidth_name

              +

              Yes for public network load balancers

              +

              String

              +

              Bandwidth name. The default value is cce-bandwidth-******.

              +

              Value range: 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

              +

              bandwidth_chargemode

              +

              No

              +

              String

              +

              Bandwidth mode.

              + +

              bandwidth_size

              +

              Yes for public network load balancers

              +

              Integer

              +

              Bandwidth size. The default value is 1 to 2000 Mbit/s. Set this parameter based on the bandwidth range allowed in your region.

              +

              bandwidth_sharetype

              +

              Yes for public network load balancers

              +

              String

              +

              Bandwidth sharing mode.

              +
              • PER: dedicated bandwidth
              +

              eip_type

              +

              Yes for public network load balancers

              +

              String

              +

              EIP type.

              +
              • 5_bgp: dynamic BGP
              • 5_sbgp: static BGP
              +

              available_zone

              +

              Yes

              +

              Array of strings

              +

              AZ where the load balancer is located.

              +

              This parameter is available only for dedicated load balancers.

              +

              l4_flavor_name

              +

              Yes

              +

              String

              +

              Flavor name of the layer-4 load balancer.

              +

              This parameter is available only for dedicated load balancers.

              +

              l7_flavor_name

              +

              No

              +

              String

              +

              Flavor name of the layer-7 load balancer.

              +

              This parameter is available only for dedicated load balancers.

              +

              elb_virsubnet_ids

              +

              No

              +

              Array of strings

              +

              Subnet where the backend server of the load balancer is located. If this parameter is left blank, the default cluster subnet is used. Load balancers occupy different number of subnet IP addresses based on their specifications. Therefore, you are not advised to use the subnet CIDR blocks of other resources (such as clusters and nodes) as the load balancer CIDR block.

              +

              This parameter is available only for dedicated load balancers.

              +

              Example:

              +
              "elb_virsubnet_ids": [
              +   "14567f27-8ae4-42b8-ae47-9f847a4690dd"
              + ]
              +
              +
              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 3 Data structure description of the elb.health-check-option field

              Parameter

              +

              Mandatory

              +

              Type

              +

              Description

              +

              delay

              +

              No

              +

              String

              +

              Initial waiting time (in seconds) for starting the health check.

              +

              Value range: 1 to 50. Default value: 5

              +

              timeout

              +

              No

              +

              String

              +

              Health check timeout, in seconds.

              +

              Value range: 1 to 50. Default value: 10

              +

              max_retries

              +

              No

              +

              String

              +

              Maximum number of health check retries.

              +

              Value range: 1 to 10. Default value: 3

              +

              protocol

              +

              No

              +

              String

              +

              Health check protocol.

              +

              Default value: protocol of the associated Service

              +

              Value options: TCP, UDP, or HTTP

              +

              path

              +

              No

              +

              String

              +

              Health check URL. This parameter needs to be configured when the protocol is HTTP.

              +

              Default value: /

              +

              The value can contain 1 to 10,000 characters.

              +
              +
              + +
              + + + + + + + + + + + +
              Table 4 Data structure of the elb.session-affinity-option field

              Parameter

              +

              Mandatory

              +

              Type

              +

              Description

              +

              persistence_timeout

              +

              Yes

              +

              String

              +

              Sticky session timeout, in minutes. This parameter is valid only when elb.session-affinity-mode is set to SOURCE_IP.

              +

              Value range: 1 to 60. Default value: 60

              +
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0386.html b/docs/cce/umn/cce_10_0386.html new file mode 100644 index 00000000..2482173b --- /dev/null +++ b/docs/cce/umn/cce_10_0386.html @@ -0,0 +1,60 @@ + + +

              Pod Labels and Annotations

              +

              Pod Annotations

              CCE allows you to add annotations to a YAML file to realize some advanced pod functions. The following table describes the annotations you can add.

              + +
              + + + + + + + + + + + + + +
              Table 1 Pod annotations

              Annotation

              +

              Description

              +

              Default Value

              +

              kubernetes.AOM.log.stdout

              +

              Standard output parameter. If not specified, the standard log output of all containers is reported to AOM. You can collect stdout logs from certain containers or ignore them at all.

              +

              Example:

              +
              • Collecting none of the stdout logs:

                kubernetes.AOM.log.stdout: '[]'

                +
              • Collecting stdout logs of container-1 and container-2:

                kubernetes.AOM.log.stdout: '["container-1","container-2"]'

                +
              +

              -

              +

              metrics.alpha.kubernetes.io/custom-endpoints

              +

              Parameter for reporting AOM monitoring metrics that you specify.

              +

              For details, see Custom Monitoring.

              +

              -

              +
              +
              +
              +

              Pod Labels

              When you create a workload on the console, the following labels are added to the pod by default. The value of app is the workload name. You can add labels as required.

              +

              +

              The pod labels added here will be added to the selector.matchLabels parameter in the workload definition. The following is an example YAML file:

              +
              ...
              +spec:
              +  selector:
              +    matchLabels:
              +      app: nginx
              +      version: v1
              +  template:
              +    metadata:
              +      labels:
              +        app: nginx
              +        version: v1
              +    spec:
              +      ...
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0388.html b/docs/cce/umn/cce_10_0388.html new file mode 100644 index 00000000..cd534ae4 --- /dev/null +++ b/docs/cce/umn/cce_10_0388.html @@ -0,0 +1,63 @@ + + +

              Cluster Secrets

              +

              By default, CCE creates the following secrets in each namespace:

              +
              • default-secret
              • paas.elb
              • default-token-xxxxx (xxxxx is a random number.)
              +

              The functions of these secrets are described as follows.

              +

              default-secret

              The type of default-secret is kubernetes.io/dockerconfigjson. The data is the credential for logging in to the SWR image repository and is used to pull images from SWR. If you need to pull an image from SWR when creating a workload on CCE, set imagePullSecrets to default-secret.

              +
              apiVersion: v1                      
              +kind: Pod                          
              +metadata:
              +  name: nginx                      
              +spec:                            
              +  containers:
              +  - image: nginx:alpine            
              +    name: container-0               
              +    resources:                      
              +      limits:
              +        cpu: 100m
              +        memory: 200Mi
              +      requests:
              +        cpu: 100m
              +        memory: 200Mi
              +  imagePullSecrets:
              +  - name: default-secret
              +

              The data of default-secret is updated periodically, and the current data will expire after a certain period of time. You can run the describe command to view the expiration time in of default-secret.

              +

              Use default-secret directly instead of copying the secret content to create a new one. The credential in the copied secret will expire and the image cannot be pulled.

              +
              +
              $ kubectl describe secret default-secret
              +Name:         default-secret
              +Namespace:    default
              +Labels:       secret-generated-by=cce
              +Annotations:  temporary-ak-sk-expires-at: 2021-11-26 20:55:31.380909 +0000 UTC
              +
              +Type:  kubernetes.io/dockerconfigjson
              +
              +Data
              +====
              +.dockerconfigjson:  347 bytes
              +
              +

              paas.elb

              The data of paas.elb is the temporary AK/SK data, which is used to create ELB load balancers during Service and ingress creation. The data of paas.elb is periodically updated and expires after a certain period of time.

              +

              In practice, you will not directly use paas.elb. However, do not delete it. Otherwise, ELB load balancers will fail to be created.

              +
              +

              default-token-xxxxx

              By default, Kubernetes creates a service account named default for each namespace. default-token-xxxxx is the key of the service account, and xxxxx is a random number.

              +
              $ kubectl get sa
              +NAME     SECRETS   AGE
              +default  1         30d
              +$ kubectl describe sa default
              +Name:                default
              +Namespace:           default
              +Labels:              <none>
              +Annotations:         <none>
              +Image pull secrets:  <none>
              +Mountable secrets:   default-token-vssmw
              +Tokens:              default-token-vssmw
              +Events:              <none>
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0393.html b/docs/cce/umn/cce_10_0393.html new file mode 100644 index 00000000..05d4838f --- /dev/null +++ b/docs/cce/umn/cce_10_0393.html @@ -0,0 +1,24 @@ + + +

              Deployment Examples

              +

              +
              + + diff --git a/docs/cce/umn/cce_10_0396.html b/docs/cce/umn/cce_10_0396.html new file mode 100644 index 00000000..1519bd29 --- /dev/null +++ b/docs/cce/umn/cce_10_0396.html @@ -0,0 +1,72 @@ + + +

              Setting Basic Container Information

              +

              A workload is an abstract model of a group of pods. One pod can encapsulate one or more containers. You can click Add Container in the upper right corner to add multiple container images and set them separately.

              +

              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Image parameters

              Parameter

              +

              Description

              +

              Container Name

              +

              Name the container.

              +

              Image Name

              +

              Click Select Image and select the image used by the container.

              +

              If you need to use a third-party image, see Using a Third-Party Image.

              +

              Image Tag

              +

              Select the image tag to be deployed.

              +

              Pull Policy

              +

              Image update or pull policy. If you select Always, the image is pulled from the image repository each time. If you do not select Always, the existing image of the node is preferentially used. If the image does not exist, the image is pulled from the image repository.

              +

              CPU Quota

              +
              • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
              • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
              +

              Memory Quota

              +
              • Request: minimum amount of memory required by a container. The default value is 512 MiB.
              • Limit: maximum amount of memory available for a container. When memory usage exceeds the specified memory limit, the container will be terminated.
              +

              For more information about Request and Limit, see Setting Container Specifications.

              +

              GPU Quota

              +

              It is configurable only when the cluster contains GPU nodes.

              +
              • All: The GPU is not used.
              • Dedicated: GPU resources are exclusively used by the container.
              • Shared: percentage of GPU resources used by the container. For example, if this parameter is set to 10%, the container uses 10% of GPU resources.
              +

              Privileged Container

              +

              Programs in a privileged container have certain privileges.

              +

              If Privileged Container is enabled, the container is assigned privileges. For example, privileged containers can manipulate network devices on the host machine and modify kernel parameters.

              +

              Init Container

              +

              Indicates whether to use the container as an init container.

              +

              An init container is a special container that run before app containers in a pod. For details, see Init Container.

              +
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0397.html b/docs/cce/umn/cce_10_0397.html new file mode 100644 index 00000000..f91b4cf7 --- /dev/null +++ b/docs/cce/umn/cce_10_0397.html @@ -0,0 +1,52 @@ + + +

              Configuring the Workload Upgrade Policy

              +

              In actual applications, upgrade is a common operation. A Deployment, StatefulSet, or DaemonSet can easily support application upgrade.

              +

              You can set different upgrade policies:

              +
              • Rolling upgrade: New pods are created gradually and then old pods are deleted. This is the default policy.
              • Replace upgrade: The current pods are deleted and then new pods are created.
              +

              +

              Upgrade Parameters

              • Max. Surge (maxSurge)

                Specifies the maximum number of pods that can exist over spec.replicas. The default value is 25%. For example, if spec.replicas is set to 4, no more than 5 pods can exist during the upgrade process, that is, the upgrade step is 1. The absolute number is calculated from the percentage by rounding up. The value can also be set to an absolute number.

                +

                This parameter is supported only by Deployments.

                +
              • Max. Unavailable Pods (maxUnavailable)

                Specifies the maximum number of pods that can be unavailable during the update process. The default value is 25%. For example, if spec.replicas is set to 4, at least 3 pods exist during the upgrade process, that is, the deletion step is 1. The value can also be set to an absolute number.

                +

                This parameter is supported only by Deployments.

                +
              • Min. Ready Seconds (minReadySeconds)

                A pod is considered available only when the minimum readiness time is exceeded without any of its containers crashing. The default value is 0 (the pod is considered available immediately after it is ready).

                +
              • Revision History Limit (revisionHistoryLimit)

                Specifies the number of old ReplicaSets to retain to allow rollback. These old ReplicaSets consume resources in etcd and crowd the output of kubectl get rs. The configuration of each Deployment revision is stored in its ReplicaSets. Therefore, once the old ReplicaSet is deleted, you lose the ability to roll back to that revision of Deployment. By default, 10 old ReplicaSets will be kept, but the ideal value depends on the frequency and stability of the new Deployments.

                +
              • Max. Upgrade Duration (progressDeadlineSeconds)

                Specifies the number of seconds that the system waits for a Deployment to make progress before reporting a Deployment progress failure. It is surfaced as a condition with Type=Progressing, Status=False, and Reason=ProgressDeadlineExceeded in the status of the resource. The Deployment controller will keep retrying the Deployment. In the future, once automatic rollback will be implemented, the Deployment controller will roll back a Deployment as soon as it observes such a condition.

                +

                If this parameter is specified, the value of this parameter must be greater than that of .spec.minReadySeconds.

                +
              • Scale-In Time Window (terminationGracePeriodSeconds)

                Graceful deletion time. The default value is 30 seconds. When a pod is deleted, a SIGTERM signal is sent and the system waits for the applications in the container to terminate. If the application is not terminated within the time specified by terminationGracePeriodSeconds, a SIGKILL signal is sent to forcibly terminate the pod.

                +
              +
              +

              Upgrade Example

              The Deployment can be upgraded in a declarative mode. That is, you only need to modify the YAML definition of the Deployment. For example, you can run the kubectl edit command to change the Deployment image to nginx:alpine. After the modification, query the ReplicaSet and pod. The query result shows that a new ReplicaSet is created and the pod is re-created.

              +
              $ kubectl edit deploy nginx
              +
              +$ kubectl get rs
              +NAME               DESIRED   CURRENT   READY     AGE
              +nginx-6f9f58dffd   2         2         2         1m
              +nginx-7f98958cdf   0         0         0         48m
              +
              +$ kubectl get pods
              +NAME                     READY     STATUS    RESTARTS   AGE
              +nginx-6f9f58dffd-tdmqk   1/1       Running   0          1m
              +nginx-6f9f58dffd-tesqr   1/1       Running   0          1m
              +

              The Deployment can use the maxSurge and maxUnavailable parameters to control the proportion of pods to be re-created during the upgrade, which is useful in many scenarios. The configuration is as follows:

              +
              spec:
              +  strategy:
              +    rollingUpdate:
              +      maxSurge: 1
              +      maxUnavailable: 0
              +    type: RollingUpdate
              +

              In the preceding example, the value of spec.replicas is 2. If both maxSurge and maxUnavailable are the default value 25%, maxSurge allows a maximum of three pods to exist (2 x 1.25 = 2.5, rounded up to 3), and maxUnavailable does not allow a maximum of two pods to be unavailable (2 x 0.75 = 1.5, rounded up to 2). That is, during the upgrade process, there will always be two pods running. Each time a new pod is created, an old pod is deleted, until all pods are new.

              +
              +

              Rollback

              Rollback is to roll an application back to the earlier version when a fault occurs during the upgrade. A Deployment can be easily rolled back to the earlier version.

              +

              For example, if the upgraded image is faulty, you can run the kubectl rollout undo command to roll back the Deployment.

              +
              $ kubectl rollout undo deployment nginx
              +deployment.apps/nginx rolled back
              +

              A Deployment can be easily rolled back because it uses a ReplicaSet to control a pod. After the upgrade, the previous ReplicaSet still exists. The Deployment is rolled back by using the previous ReplicaSet to re-create the pod. The number of ReplicaSets stored in a Deployment can be restricted by the revisionHistoryLimit parameter. The default value is 10.

              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0398.html b/docs/cce/umn/cce_10_0398.html new file mode 100644 index 00000000..97e692cb --- /dev/null +++ b/docs/cce/umn/cce_10_0398.html @@ -0,0 +1,53 @@ + + +

              Headless Service

              +

              The preceding types of Services allow internal and external pod access, but not the following scenarios:

              +
              • Accessing all pods at the same time
              • Pods in a Service accessing each other
              +

              This is where headless Service come into service. A headless Service does not create a cluster IP address, and the DNS records of all pods are returned during query. In this way, the IP addresses of all pods can be queried. StatefulSets use headless Services to support mutual access between pods.

              +
              apiVersion: v1
              +kind: Service       # Object type (Service)
              +metadata:
              +  name: nginx-headless
              +  labels:
              +    app: nginx
              +spec:
              +  ports:
              +    - name: nginx      #     - name: nginx     # Name of the port for communication between pods
              +      port: 80        # Port number for communication between pods
              +  selector:
              +    app: nginx        # Select the pod whose label is app:nginx.
              +  clusterIP: None     # Set this parameter to None, indicating that a headless Service is to be created.
              +

              Run the following command to create a headless Service:

              +
              # kubectl create -f headless.yaml 
              +service/nginx-headless created
              +

              After the Service is created, you can query the Service.

              +
              # kubectl get svc
              +NAME             TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
              +nginx-headless   ClusterIP   None         <none>        80/TCP    5s
              +

              Create a pod to query the DNS. You can view the records of all pods. In this way, all pods can be accessed.

              +
              $ kubectl run -i --tty --image tutum/dnsutils dnsutils --restart=Never --rm /bin/sh
              +If you do not see a command prompt, try pressing Enter.
              +/ # nslookup nginx-0.nginx
              +Server:         10.247.3.10
              +Address:        10.247.3.10#53
              +Name:   nginx-0.nginx.default.svc.cluster.local
              +Address: 172.16.0.31
              +
              +/ # nslookup nginx-1.nginx
              +Server:         10.247.3.10
              +Address:        10.247.3.10#53
              +Name:   nginx-1.nginx.default.svc.cluster.local
              +Address: 172.16.0.18
              +
              +/ # nslookup nginx-2.nginx
              +Server:         10.247.3.10
              +Address:        10.247.3.10#53
              +Name:   nginx-2.nginx.default.svc.cluster.local
              +Address: 172.16.0.19
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0399.html b/docs/cce/umn/cce_10_0399.html new file mode 100644 index 00000000..340595cf --- /dev/null +++ b/docs/cce/umn/cce_10_0399.html @@ -0,0 +1,54 @@ + + +

              Configuring Intra-VPC Access

              +

              This section describes how to access an intranet from a container (outside the cluster in a VPC), including intra-VPC access and cross-VPC access.

              +

              Intra-VPC Access

              The performance of accessing an intranet from a container varies depending on the container network models of a cluster.

              +
              • Container tunnel network

                The container tunnel network encapsulates network data packets through tunnels based on the node network. A container can access other resources in the same VPC as long as the node can access the resources. If the access fails, check whether the security group of the peer resource allows access from the node where the container is located.

                +
              • Cloud Native Network 2.0

                In the Cloud Native Network 2.0 model, a container is assigned an IP address from the CIDR block of a VPC. The container CIDR block is the subnet of the VPC where the node is located. The container can naturally communicate with other addresses in the VPC. If the access fails, check whether the security group of peer resources allows the access from the container CIDR block.

                +
              • VPC network

                The VPC network model uses VPC routes to forward container traffic. The container CIDR block and the node VPC are not in the same CIDR block. When a container accesses other resources in the same VPC, the security group of the peer resource must allow access of the container CIDR block.

                +

                For example, the CIDR block where the cluster node resides is 192.168.10.0/24, and the container CIDR block is 172.16.0.0/16.

                +

                There is an ECS whose IP address is 192.168.10.52 in the VPC (outside the cluster). The security group of the ECS allows access of only the CIDR block of the cluster node.

                +

                +

                In this case, if you ping 192.168.10.52 from the container, the ping operation fails.

                +
                kubectl exec test01-6cbbf97b78-krj6h  -it -- /bin/sh
                +/ # ping 192.168.10.25
                +PING 192.168.10.25 (192.168.10.25): 56 data bytes
                +^C
                +--- 192.168.10.25 ping statistics ---
                +104 packets transmitted, 0 packets received, 100% packet loss
                +

                Configure the security group to allow access from the container CIDR block 172.16.0.0/16.

                +

                +

                In this case, 192.168.10.52 can be pinged from the container.

                +
                $ kubectl exec test01-6cbbf97b78-krj6h  -it -- /bin/sh
                +/ # ping 192.168.10.25
                +PING 192.168.10.25 (192.168.10.25): 56 data bytes
                +64 bytes from 192.168.10.25: seq=0 ttl=64 time=1.412 ms
                +64 bytes from 192.168.10.25: seq=1 ttl=64 time=1.400 ms
                +64 bytes from 192.168.10.25: seq=2 ttl=64 time=1.299 ms
                +64 bytes from 192.168.10.25: seq=3 ttl=64 time=1.283 ms
                +^C
                +--- 192.168.10.25 ping statistics ---
                +4 packets transmitted, 4 packets received, 0% packet loss
                +
              +
              +

              Cross-VPC Access

              Cross-VPC access is implemented by establishing a peering connection between VPCs.

              +
              • In the container tunnel network model, a container can access the peer VPC only when the communication is enabled between the node network and the peer VPC.
              • Cloud Native Network 2.0 is similar to the container tunnel network. You only need to enable the communication between the subnet where the container is located and the peer VPC.
              • Each VPC network has an independent container CIDR block. In addition to the VPC CIDR block, the container CIDR block also needs to be connected.
                Assume that there are two VPCs.
                • vpc-demo: Its CIDR block is 192.168.0.0/16, the cluster is in vpc-demo, and the container CIDR block is 10.0.0.0/16.
                • vpc-demo2: Its CIDR block is 10.1.0.0/16.
                +
                +

                Create a peering connection named peering-demo (the local VPC is vpc-demo and the peer VPC is vpc-demo2). Add the container CIDR block to the route of the peer VPC.

                +

                After this configuration, you can access the container CIDR block 10.0.0.0/16 in vpc-demo2. During the access, pay attention to the security group configuration and enable the port configuration.

                +
              +
              +

              Accessing Other Cloud Services

              Common services that communicate with CCE through an intranet include RDS, DCS, Kafka, RabbitMQ, and ModelArts.

              +

              In addition to the network configurations described in Intra-VPC Access and Cross-VPC Access, you also need to check whether these cloud services allow external access. For example, the DCS Redis instance can be accessed only by the IP addresses in its whitelist. Generally, these cloud services can be accessed by IP addresses in the same VPC. However, the container CIDR block in the VPC network model is different from the CIDR block of the VPC. Therefore, you must add the container CIDR block to the whitelist.

              +
              +

              What If a Container Fails to Access an Intranet?

              If an intranet cannot be accessed from a container, perform the following operations:

              +
              1. View the security group rule of the peer server to check whether the container is allowed to access the peer server.
                • The container tunnel network model needs to allow the IP address of the node where the container is located.
                • The VPC network model needs to allow the container CIDR block.
                • The Cloud Native Network 2.0 model needs to allow the subnet where the container is located.
                +
              2. Check whether a whitelist is configured for the peer server. For example, the DCS Redis instance can be accessed only by the IP addresses in its whitelist. Add the container and node CIDR blocks to the whitelist.
              3. Check whether the container engine is installed on the peer server and whether it conflicts with the container CIDR block in CCE. If a network conflict occurs, the access fails.
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0400.html b/docs/cce/umn/cce_10_0400.html new file mode 100644 index 00000000..69b75ac7 --- /dev/null +++ b/docs/cce/umn/cce_10_0400.html @@ -0,0 +1,28 @@ + + +

              Accessing Public Networks from a Container

              +

              Containers can access public networks in either of the following ways:

              +
              • Bind a public IP address to the node where the container is located if the network model is VPC network or tunnel network.
              • Bind a public IP address to the pod. (When the Cloud Native Network 2.0 model is used, manually bind an EIP to the ENI or sub-ENI of the pod on the VPC console. This method is not recommended because the IP address of a pod changes after the pod is rescheduled. As a result, the new pod cannot access the public network.)
              • Configure SNAT rules through NAT Gateway.
              +

              You can use NAT Gateway to enable container pods in a VPC to access public networks. NAT Gateway provides source network address translation (SNAT), which translates private IP addresses to a public IP address by binding an elastic IP address (EIP) to the gateway, providing secure and efficient access to the Internet. Figure 1 shows the SNAT architecture. The SNAT function allows the container pods in a VPC to access the Internet without being bound to an EIP. SNAT supports a large number of concurrent connections, which makes it suitable for applications involving a large number of requests and connections.

              +
              Figure 1 SNAT
              +

              To enable a container pod to access the Internet, perform the following steps:

              +
              1. Assign an EIP.

                1. Log in to the management console.
                2. Click in the upper left corner of the management console and select a region and a project.
                3. Click at the upper left corner and choose Networking > Elastic IP in the expanded list.
                4. On the EIPs page, click Create EIP.
                5. Set parameters as required.

                  Set Region to the region where container pods are located.

                  +
                  +
                +

              2. Create a NAT gateway.

                1. Log in to the management console.
                2. Click in the upper left corner of the management console and select a region and a project.
                3. Click at the upper left corner and choose Networking > NAT Gateway in the expanded list.
                4. On the displayed page, click Create Public NAT Gateway in the upper right corner.
                5. Set parameters as required.

                  Select the same VPC.

                  +
                  +
                +

              3. Configure an SNAT rule and bind the EIP to the subnet.

                1. Log in to the management console.
                2. Click in the upper left corner of the management console and select a region and a project.
                3. Click at the upper left corner and choose Networking > NAT Gateway in the expanded list.
                4. On the page displayed, click the name of the NAT gateway for which you want to add the SNAT rule.
                5. On the SNAT Rules tab page, click Add SNAT Rule.
                6. Set parameters as required.
                +

                SNAT rules take effect by CIDR block. As different container network models use different communication modes, the subnet needs to be selected according to the following rules:

                +
                • Tunnel network and VPC network: Select the subnet where the node is located, that is, the subnet selected during node creation.
                +

                If there are multiple CIDR blocks, you can create multiple SNAT rules or customize a CIDR block as long as the CIDR block contains the node subnet.

                +
                +

                After the SNAT rule is configured, workloads can access public networks from the container. Public networks can be pinged from the container.

                +

              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0402.html b/docs/cce/umn/cce_10_0402.html new file mode 100644 index 00000000..0e079431 --- /dev/null +++ b/docs/cce/umn/cce_10_0402.html @@ -0,0 +1,82 @@ + + +

              Host Network

              +

              Scenario

              Kubernetes allows pods to directly use the host/node network.

              +
              +

              Configuration

              Add hostNetwork: true to the pod definition.

              +
              apiVersion: apps/v1
              +kind: Deployment
              +metadata:
              +  name: nginx
              +spec:
              +  replicas: 1
              +  selector:
              +    matchLabels:
              +      app: nginx
              +  template:
              +    metadata:
              +      labels:
              +        app: nginx
              +    spec:
              +      hostNetwork: true
              +      containers:
              +      - image: nginx:alpine
              +        name: nginx
              +      imagePullSecrets:
              +      - name: default-secret
              +

              The configuration succeeds if the pod IP is the same as the node IP.

              +
              $ kubectl get pod -owide
              +NAME                    READY   STATUS    RESTARTS   AGE     IP          NODE        NOMINATED NODE   READINESS GATES
              +nginx-6fdf99c8b-6wwft   1/1     Running   0          3m41s   10.1.0.55   10.1.0.55   <none>           <none>
              +
              +

              Precautions

              If a pod uses the host network, it occupies a host port. The pod IP is the host IP. To use the host network, you must confirm pods do not conflict with each other in terms of the host ports they occupy. Do not use the host network unless you know exactly which host port is used by which pod.

              +

              When using the host network, you access the node to access a pod on it. Therefore, you need to allow access from the security group port of the node. Otherwise, the access fails.

              +

              In addition, using the host network requires you to reserve host ports for the pods. When using a Deployment to deploy pods of the hostNetwork type, ensure that the number of pods does not exceed the number of nodes. Otherwise, multiple pods will be scheduled onto the node, and they will fail to start due to port conflicts. For example, in the preceding example nginx YAML, if two pods (setting replicas to 2) are deployed in a cluster with only one node, one pod cannot be created. The pod logs will show that the Nginx cannot be started because the port is occupied.

              +

              Do not schedule multiple pods that use the host network on the same node. Otherwise, when a ClusterIP Service is created to access a pod, the cluster IP address cannot be accessed.

              +
              +
              $ kubectl get deploy
              +NAME    READY   UP-TO-DATE   AVAILABLE   AGE
              +nginx   1/2     2            1           67m
              +$ kubectl get pod
              +NAME                    READY   STATUS             RESTARTS   AGE
              +nginx-6fdf99c8b-6wwft   1/1     Running            0          67m
              +nginx-6fdf99c8b-rglm7   0/1     CrashLoopBackOff   13         44m
              +$ kubectl logs nginx-6fdf99c8b-rglm7
              +/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
              +/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
              +/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
              +10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
              +10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
              +/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
              +/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
              +/docker-entrypoint.sh: Configuration complete; ready for start up
              +2022/05/11 07:18:11 [emerg] 1#1: bind() to 0.0.0.0:80 failed (98: Address in use)
              +nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address in use)
              +2022/05/11 07:18:11 [emerg] 1#1: bind() to [::]:80 failed (98: Address in use)
              +nginx: [emerg] bind() to [::]:80 failed (98: Address in use)
              +2022/05/11 07:18:11 [emerg] 1#1: bind() to 0.0.0.0:80 failed (98: Address in use)
              +nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address in use)
              +2022/05/11 07:18:11 [emerg] 1#1: bind() to [::]:80 failed (98: Address in use)
              +nginx: [emerg] bind() to [::]:80 failed (98: Address in use)
              +2022/05/11 07:18:11 [emerg] 1#1: bind() to 0.0.0.0:80 failed (98: Address in use)
              +nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address in use)
              +2022/05/11 07:18:11 [emerg] 1#1: bind() to [::]:80 failed (98: Address in use)
              +nginx: [emerg] bind() to [::]:80 failed (98: Address in use)
              +2022/05/11 07:18:11 [emerg] 1#1: bind() to 0.0.0.0:80 failed (98: Address in use)
              +nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address in use)
              +2022/05/11 07:18:11 [emerg] 1#1: bind() to [::]:80 failed (98: Address in use)
              +nginx: [emerg] bind() to [::]:80 failed (98: Address in use)
              +2022/05/11 07:18:11 [emerg] 1#1: bind() to 0.0.0.0:80 failed (98: Address in use)
              +nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address in use)
              +2022/05/11 07:18:11 [emerg] 1#1: bind() to [::]:80 failed (98: Address in use)
              +nginx: [emerg] bind() to [::]:80 failed (98: Address in use)
              +2022/05/11 07:18:11 [emerg] 1#1: still could not bind()
              +nginx: [emerg] still could not bind()
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0403.html b/docs/cce/umn/cce_10_0403.html new file mode 100644 index 00000000..51c352b8 --- /dev/null +++ b/docs/cce/umn/cce_10_0403.html @@ -0,0 +1,17 @@ + + +

              Changing Cluster Scale

              +

              Scenario

              CCE allows you to change the number of nodes managed in a cluster.

              +
              +

              Notes and Constraints

              • This function is supported for clusters of v1.15 and later versions.
              • Starting from v1.15.11, the number of nodes in a cluster can be changed to 2000. The number of nodes in a single master node cannot be changed to 1000 or more.
              • Currently, a cluster can only be scaled out to a larger specification, but cannot be scaled in.
              • During the specifications change, master nodes will be powered off and on, and the cluster cannot run properly. Perform the change during off-peak hours.
              • Changing the cluster scale does not affect the services running in the cluster. However, the control plane (master nodes) will be interrupted for a short period of time. You are advised not to perform any other operations (such as creating workloads) during the change.
              • Change failures will trigger a cluster rollback to the normal state. If the rollback fails, submit a service ticket.
              +
              +

              Procedure

              1. Log in to the CCE console. In the navigation pane, choose Clusters.
              2. Click next to the cluster whose specifications need to be changed.
              3. On the page displayed, select a new flavor for Target Flavor as required.
              4. Click OK.

                You can click Operation Records in the upper left corner to view the cluster change history. The status changes from Executing to Successful, indicating that the cluster specifications are successfully changed.

                +

              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0423.html b/docs/cce/umn/cce_10_0423.html new file mode 100644 index 00000000..1aa96fa1 --- /dev/null +++ b/docs/cce/umn/cce_10_0423.html @@ -0,0 +1,19 @@ + + + +

              Volcano Scheduling

              + +

              +
              + + + diff --git a/docs/cce/umn/cce_10_0430.html b/docs/cce/umn/cce_10_0430.html new file mode 100644 index 00000000..15d8d07a --- /dev/null +++ b/docs/cce/umn/cce_10_0430.html @@ -0,0 +1,89 @@ + + +

              Basic Cluster Information

              +

              Kubernetes allows you to easily deploy and manage containerized application and facilitates container scheduling and orchestration.

              +

              For developers, Kubernetes is a cluster operating system. Kubernetes provides service discovery, scaling, load balancing, self-healing, and even leader election, freeing developers from infrastructure-related configurations.

              +

              When using Kubernetes, it is like you run a large number of servers as one and the method for deploying applications in Kubernetes is always the same.

              +

              Kubernetes Cluster Architecture

              A Kubernetes cluster consists of master nodes (Masters) and worker nodes (Nodes). Applications are deployed on worker nodes, and you can specify the nodes for deployment.

              +

              The following figure shows the architecture of a Kubernetes cluster.

              +
              Figure 1 Kubernetes cluster architecture
              +

              Master node

              +

              A master node is the machine where the control plane components run, including API server, Scheduler, Controller manager, and etcd.

              +
              • API server: functions as a transit station for components to communicate with each other, receives external requests, and writes information to etcd.
              • Controller manager: performs cluster-level functions, such as component replication, node tracing, and node fault fixing.
              • Scheduler: schedules containers to nodes based on various conditions (such as available resources and node affinity).
              • etcd: serves as a distributed data storage component that stores cluster configuration information.
              +

              In a production environment, multiple master nodes are deployed to ensure high cluster availability. For example, you can deploy three master nodes for your CCE cluster.

              +

              Worker node

              +

              A worker node is a compute node in a cluster, that is, a node running containerized applications. A worker node has the following components:

              +
              • kubelet: communicates with the container runtime, interacts with the API server, and manages containers on the node.
              • kube-proxy: serves as an access proxy between application components.
              • Container runtime: functions as the software for running containers. You can download images to build your container runtime, such as Docker.
              +
              +

              Master Nodes and Cluster Scale

              When you create a cluster on CCE, you can have one or three master nodes. Three master nodes can create a cluster in HA mode.

              +

              The master node specifications decide the number of nodes that can be managed by a cluster. You can select the cluster management scale, for example, 50 or 200 nodes.

              +
              +

              Cluster Network

              From the perspective of the network, all nodes in a cluster are located in a VPC, and containers are running on the nodes. You need to configure node-node, node-container, and container-container communication.

              +

              A cluster network can be divided into three network types:

              +
              • Node network: IP addresses are assigned to nodes in a cluster.
              • Container network: IP addresses are assigned to containers in a cluster for communication. Currently, multiple container network models are supported, and each model has its own working mechanism.
              • Service network: A Service is a Kubernetes object used to access containers. Each Service has a fixed IP address.
              +

              When you create a cluster, select a proper CIDR block for each network. Ensure that the CIDR blocks do not conflict with each other and have sufficient available IP addresses. You cannot change the container network model after the cluster is created. Plan the container network model properly in advance.

              +

              You are advised to learn about the cluster network and container network models before creating a cluster. For details, see Container Network Models.

              +
              +

              Cluster Lifecycle

              +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Cluster status

              Status

              +

              Description

              +

              Creating

              +

              A cluster is being created and is requesting for cloud resources.

              +

              Running

              +

              A cluster is running properly.

              +

              Scaling-out

              +

              A node is being added to a cluster.

              +

              Scaling-in

              +

              A node is being deleted from a cluster.

              +

              Hibernating

              +

              A cluster is hibernating.

              +

              Awaking

              +

              A cluster is being woken up.

              +

              Upgrading

              +

              A cluster is being upgraded.

              +

              Unavailable

              +

              A cluster is unavailable.

              +

              Deleting

              +

              A cluster is being deleted.

              +
              +
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0461.html b/docs/cce/umn/cce_10_0461.html new file mode 100644 index 00000000..6ae92d5a --- /dev/null +++ b/docs/cce/umn/cce_10_0461.html @@ -0,0 +1,94 @@ + + +

              Precautions for Using a Node

              +

              Introduction

              A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (PM), depending on your service requirements. The components on a node include kubelet, container runtime, and kube-proxy.

              +

              A Kubernetes cluster consists of master nodes and worker nodes. The nodes described in this section refer to worker nodes, the computing nodes of a cluster that run containerized applications.

              +
              +

              CCE uses high-performance Elastic Cloud Servers (ECSs) as nodes to build highly available Kubernetes clusters.

              +
              +

              Supported Node Specifications

              Different regions support different node flavors, and node flavors may be changed or sold out. You are advised to log in to the CCE console and check whether the required node flavors are supported on the page for creating nodes.

              +
              +

              Underlying File Storage System of Docker

              • In clusters of v1.15.6 or earlier, the underlying file storage system uses the XFS format.
              • In clusters of v1.15.11 or later, after a node is created or reset, the underlying file storage system uses the ext4 format.
              +

              For containerized applications that use the XFS format, pay attention to the impact of the underlying file storage format change. (The sequence of files in different file systems is different. For example, some Java applications reference a JAR package, but the directory contains multiple versions of the JAR package. If the version is not specified, the actual referenced package is determined by the system file.)

              +

              Run the docker info | grep "Backing Filesystem" command to check the format of the Docker underlying storage file used by the current node.

              +
              +

              Paas User and User Group

              When you create a node in a CCE cluster, a paas user or user group is created on the node by default. CCE components and CCE add-ons on a node run as a non-root user (paas user/user group) to minimize the running permission. If the paas user or user group is modified, CCE components and pods may fail to run properly.

              +

              The normal running of CCE components depends on the paas user or user group. Pay attention to the following requirements:

              +
              • Do not modify the directory permission and container directory permission on a node.
              • Do not change the GID and UID of the paas user or user group.
              • Do not directly use the paas user or user group to set the user and group to which the service file belongs.
              +
              +
              +

              Node Lifecycle

              A lifecycle indicates the node statuses recorded from the time when the node is created through the time when the node is deleted or released.

              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Node statuses

              Status

              +

              Status Attribute

              +

              Description

              +

              Running

              +

              Stable state

              +

              The node is running properly and is connected to the cluster.

              +

              Nodes in this state can provide services.

              +

              Unavailable

              +

              Stable state

              +

              The node is not running properly.

              +

              Instances in this state no longer provide services. In this case, perform the operations in Resetting a Node.

              +

              Creating

              +

              Intermediate state

              +

              The node has been created but is not running.

              +

              Installing

              +

              Intermediate state

              +

              The Kubernetes software is being installed on the node.

              +

              Deleting

              +

              Intermediate state

              +

              The node is being deleted.

              +

              If this state stays for a long time, an exception occurs.

              +

              Stopped

              +

              Stable state

              +

              The node is stopped properly.

              +

              A node in this state cannot provide services. You can start the node on the ECS console.

              +

              Error

              +

              Stable state

              +

              The node is abnormal.

              +

              Instances in this state no longer provide services. In this case, perform the operations in Resetting a Node.

              +
              +
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0462.html b/docs/cce/umn/cce_10_0462.html new file mode 100644 index 00000000..a91b6cda --- /dev/null +++ b/docs/cce/umn/cce_10_0462.html @@ -0,0 +1,119 @@ + + +

              Container Engine

              +

              Introduction to Container Engines

              Container engines, one of the most important components of Kubernetes, manage the lifecycle of images and containers. The kubelet interacts with a container runtime through the Container Runtime Interface (CRI).

              +
              +

              Mapping between Node OSs and Container Engines

              +
              + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Node OSs and container engines in CCE clusters

              OS

              +

              Kernel Version

              +

              Container Engine

              +

              Container Storage Rootfs

              +

              Container Runtime

              +

              CentOS 7.x

              +

              3.x

              +

              Docker

              +

              Clusters of v1.23 and later support containerd.

              +

              Clusters of v1.19.16 and earlier use Device Mapper.

              +

              Clusters of v1.19.16 and later use OverlayFS.

              +

              runC

              +

              EulerOS 2.5

              +

              3.x

              +

              Docker

              +

              Device Mapper

              +

              runC

              +

              EulerOS 2.9

              +

              4.x

              +

              Docker

              +

              Clusters of v1.23 and later support containerd.

              +

              OverlayFS

              +

              runC

              +
              +
              + +
              + + + + + + + + + + + + + + + + + + + + + + +
              Table 2 Node OSs and container engines in CCE Turbo clusters

              Node Type

              +

              OS

              +

              Kernel Version

              +

              Container Engine

              +

              Container Storage Rootfs

              +

              Container Runtime

              +

              Elastic Cloud Server (VM)

              +

              CentOS 7.x

              +

              EulerOS 2.9

              +

              3.x

              +

              Docker

              +

              OverlayFS

              +

              runC

              +

              Elastic Cloud Server (physical machine)

              +

              EulerOS 2.9

              +

              4.x

              +

              containerd

              +

              Device Mapper

              +

              Kata

              +
              +
              +
              +

              Differences in Tracing

              • Docker (Kubernetes 1.23 and earlier versions):

                kubelet --> docker shim (in the kubelet process) --> docker --> containerd

                +
              • Docker (community solution for Kubernetes v1.24 or later):

                kubelet --> cri-dockerd (kubelet uses CRI to connect to cri-dockerd) --> docker--> containerd

                +
              • containerd:

                kubelet --> cri plugin (in the containerd process) --> containerd

                +
              +

              Although Docker has added functions such as swarm cluster, docker build, and Docker APIs, it also introduces bugs. Compared with containerd, Docker has one more layer of calling. Therefore, containerd is more resource-saving and secure.

              +
              +

              Container Engine Version Description

              • Docker
                • EulerOS/CentOS: docker-engine 18.9.0, a Docker version customized for CCE. Security vulnerabilities will be fixed in a timely manner.
                +
              • containerd: 1.4.1
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0463.html b/docs/cce/umn/cce_10_0463.html new file mode 100644 index 00000000..f8a0771b --- /dev/null +++ b/docs/cce/umn/cce_10_0463.html @@ -0,0 +1,129 @@ + + +

              Kata Containers and Common Containers

              +

              The most significant difference is that each Kata container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualization layer. CCE provides container isolation that is more secure than independent private Kubernetes clusters. With isolated OS kernels, computing resources, and networks, pod resources and data will not be preempted and stolen by other pods.

              +

              You can run common or Kata containers on a single node in a CCE Turbo cluster. The differences between them are as follows:

              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

              Category

              +

              Kata Container

              +

              Common Container (Docker)

              +

              Common Container (containerd)

              +

              Node type used to run containers

              +

              Bare-metal server (BMS)

              +

              VM

              +

              VM

              +

              Container Engine

              +

              containerd

              +

              Docker

              +

              containerd

              +

              Container Runtime

              +

              Kata

              +

              runC

              +

              runC

              +

              Container kernel

              +

              Exclusive kernel

              +

              Sharing the kernel with the host

              +

              Sharing the kernel with the host

              +

              Container isolation

              +

              Lightweight VMs

              +

              cgroups and namespaces

              +

              cgroups and namespaces

              +

              Container engine storage driver

              +

              Device Mapper

              +

              OverlayFS2

              +

              OverlayFS

              +

              Pod overhead

              +

              Memory: 100 MiB

              +

              CPU: 0.1 cores

              +

              Pod overhead is a feature for accounting for the resources consumed by the pod infrastructure on top of the container requests and limits. For example, if limits.cpu is set to 0.5 cores and limits.memory to 256 MiB for a pod, the pod will request 0.6-core CPUs and 356 MiB of memory.

              +

              None

              +

              None

              +

              Minimal specifications

              +

              Memory: 256 MiB

              +

              CPU: 0.25 cores

              +

              It is recommended that the ratio of CPU (unit: core) to memory (unit: GiB) be in the range of 1:1 to 1:8. For example, if CPU is 0.5 cores, the memory should range form 512 MiB to 4 GiB.

              +

              None

              +

              None

              +

              Container engine CLI

              +

              crictl

              +

              Docker

              +

              crictl

              +

              Pod computing resources

              +

              The request and limit values must be the same for both CPU and memory.

              +

              The request and limit values can be different for both CPU and memory.

              +

              The request and limit values can be different for both CPU and memory.

              +

              Host network

              +

              Not supported

              +

              Supported

              +

              Supported

              +
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0465.html b/docs/cce/umn/cce_10_0465.html new file mode 100644 index 00000000..96444536 --- /dev/null +++ b/docs/cce/umn/cce_10_0465.html @@ -0,0 +1,18 @@ + + +

              Pod Security

              +

              +
              + + diff --git a/docs/cce/umn/cce_10_0466.html b/docs/cce/umn/cce_10_0466.html new file mode 100644 index 00000000..d8316ec7 --- /dev/null +++ b/docs/cce/umn/cce_10_0466.html @@ -0,0 +1,107 @@ + + +

              Configuring Pod Security Admission

              +

              Before using Pod Security Admission, you need to understand Kubernetes Pod Security Standards. These standards define different isolation levels for pods. They let you define how you want to restrict the behavior of pods in a clear, consistent fashion. Kubernetes offers a built-in pod security admission controller to enforce the pod security standards. Pod security restrictions are applied at the namespace level when pods are created.

              +

              The pod security standard defines three security policy levels:

              + +
              + + + + + + + + + + + + + +
              Table 1 Pod security policy levels

              Level

              +

              Description

              +

              privileged

              +

              Unrestricted policy, providing the widest possible level of permissions, typically aimed at system- and infrastructure-level workloads managed by privileged, trusted users, such as CNIs and storage drivers.

              +

              baseline

              +

              Minimally restrictive policy which prevents known privilege escalations, typically targeted at non-critical workloads. This policy disables capabilities such as hostNetwork and hostPID.

              +

              restricted

              +

              Heavily restricted policy, following current Pod hardening best practices.

              +
              +
              +

              Pod security admission is applied at the namespace level. The controller restricts the security context and other parameters in the pod or container in the namespace. The privileged policy does not verify the securityContext field of the pod and container. The baseline and restricted policies have different requirements on securityContext. For details, see Pod Security Standards.

              +

              Setting security context: Configure a Security Context for a Pod or Container

              +

              Pod Security Admission Labels

              Kubernetes defines three types of labels for Pod Security Admission (see Table 2). You can set these labels in a namespace to define the pod security standard level to be used. However, do not change the pod security standard level in system namespaces such as kube-system. Otherwise, pods in the system namespace may be faulty.

              + +
              + + + + + + + + + + + + + + + + + +
              Table 2 Pod security admission labels

              Mode

              +

              Target Object

              +

              Description

              +

              enforce

              +

              Pods

              +

              Policy violations will cause the pod to be rejected.

              +

              audit

              +

              Workloads (such as Deployment and job)

              +

              Policy violations will trigger the addition of an audit annotation to the event recorded in the audit log, but are otherwise allowed.

              +

              warn

              +

              Workloads (such as Deployment and job)

              +

              Policy violations will trigger a user-facing warning, but are otherwise allowed.

              +
              +
              +

              Pods are often created indirectly, by creating a workload object such as a Deployment or job. To help catch violations early, both the audit and warning modes are applied to the workload resources. However, the enforce mode is applied only to the resulting pod objects.

              +
              +
              +

              Enforcing Pod Security Admission with Namespace Labels

              You can label namespaces to enforce pod security standards. Assume that a namespace is configured as follows:

              +
              apiVersion: v1
              +kind: Namespace
              +metadata:
              +  name: my-baseline-namespace
              +  labels:
              +    pod-security.kubernetes.io/enforce: privileged
              +    pod-security.kubernetes.io/enforce-version: v1.25
              +    pod-security.kubernetes.io/audit: baseline
              +    pod-security.kubernetes.io/audit-version: v1.25
              +    pod-security.kubernetes.io/warn: restricted
              +    pod-security.kubernetes.io/warn-version: v1.25
              +
              +    # The label can be in either of the following formats:
              +    # pod-security.kubernetes.io/<MODE>: <LEVEL> 
              +    # pod-security.kubernetes.io/<MODE>-version: <VERSION>  
              +    # The audit and warn modes inform you of which security behaviors are violated by the load.
              +

              Namespace labels indicate which policy level to apply for the mode. For each mode, there are two labels that determine the policy used:

              +
              • pod-security.kubernetes.io/<MODE>: <LEVEL>
                • <MODE>: must be enforce, audit, or warn. For details about the modes, see Table 2.
                • <LEVEL>: must be privileged, baseline, or restricted. For details about the levels, see Table 1.
                +
              • pod-security.kubernetes.io/<MODE>-version: <VERSION>

                Optional, which pins the policy to a given Kubernetes version.

                +
                • <MODE>: must be enforce, audit, or warn. For details about the modes, see Table 2.
                • <VERSION>: Kubernetes version number. For example, v1.25. You can also use latest.
                +
              +

              If pods are deployed in the preceding namespace, the following security restrictions apply:

              +
              1. The verification in the enforce mode is skipped (enforce mode + privileged level).
              2. Restrictions related to the baseline policy are verified (audit mode + baseline level). That is, if the pod or container violates the policy, the corresponding event is recorded into the audit log.
              3. Restrictions related to the restricted policy are verified (warn mode + restricted level). That is, if the pod or container violates the policy, the user will receive an alarm when creating the pod.
              +
              +

              Migrating from Pod Security Policy to Pod Security Admission

              If you use pod security policies in a cluster earlier than v1.25 and need to replace them with pod security admission in a cluster of v1.25 or later, follow the guide in Migrate from PodSecurityPolicy to the Built-In PodSecurity Admission Controller.

              +
              1. Pod security admission supports only three isolation modes, less flexible than pod security policies. If you require more control over specific constraints, you will need to use a Validating Admission Webhook to enforce those policies.
              2. Pod security admission is a non-mutating admission controller, meaning it will not modify pods before validating them. If you were relying on this aspect of PSP, you will need to either modify the security context in your workloads, or use a Mutating Admission Webhook to make those changes.
              3. PSP lets you bind different policies to different service accounts. This approach has many pitfalls and is not recommended, but if you require this feature anyway you will need to use a third-party webhook instead.
              4. Do not apply pod security admission to namespaces where CCE components, such as kube-system, kube-public, and kube-node-lease, are deployed. Otherwise, CCE components and add-on functions will be abnormal.
              +
              +
              + +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0467.html b/docs/cce/umn/cce_10_0467.html new file mode 100644 index 00000000..ca7e4c2c --- /dev/null +++ b/docs/cce/umn/cce_10_0467.html @@ -0,0 +1,21 @@ + + +

              CCE Kubernetes 1.25 Release Notes

              +

              CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.25.

              +

              Resource Changes and Deprecations

              Kubernetes 1.25 Release Notes

              +
              • PodSecurityPolicy is replaced by Pod Security Admission. For details about the migration, see Migrate from PodSecurityPolicy to the Built-In PodSecurity Admission Controller.
              • SeccompDefault is in Beta. To enable this feature, you need to add the startup parameter --seccomp-default=true to kubelet. In this way, seccomp is set to RuntimeDefault by default, improving the system security. Clusters of v1.25 no longer use seccomp.security.alpha.kubernetes.io/pod and container.seccomp.security.alpha.kubernetes.io/annotation to use seccomp. Replace them with the securityContext.seccompProfile field in the pod or container. For details, see Configure a Security Context for a Pod or Container.

                After the feature is enabled, the system calls required by the application may be restricted by the runtime. Ensure that the debugging is performed in the test environment and the application is not affected.

                +
                +
              • EndPort in Network Policy is stable. This feature is incorporated in version 1.21. EndPort is added to NetworkPolicy for you to specify a port range.
              • Since clusters of v1.25, Kubernetes does not support certificate authentication generated using the SHA1WithRSA or ECDSAWithSHA1 algorithm. You are advised to use the SHA256 algorithm.
              +

              Kubernetes 1.24 Release Notes

              +
              • Beta APIs are disabled by default. When some long-term beta APIs are removed from Kubernetes, 90% cluster administrators do not care about the beta APIs. Beta features are not recommended in the production environment. However, due to the default enabling policy, these APIs are enabled in the production environment, incurring risks. Therefore, in v1.24 and later versions, beta APIs are disabled by default except for the enabled beta APIs.
              • The LegacyServiceAccountTokenNoAutoGeneration feature is in beta state. By default, this feature is enabled and no more secret token will be automatically generated for the service account. If you want to use a token that never expires, you need to create a secret and mount it. For details, see Service account token secrets.
              • service.alpha.kubernetes.io/tolerate-unready-endpoints is replaced by Service.spec.publishNotReadyAddresses.
              • The Service.Spec.LoadBalancerIP tag is deprecated and may be removed in later versions. Use a customized annotation.
              +
              +

              References

              For more details about the performance comparison and function evolution between Kubernetes 1.25 and other versions, see the following documents:

              + +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0468.html b/docs/cce/umn/cce_10_0468.html new file mode 100644 index 00000000..f2fd49d2 --- /dev/null +++ b/docs/cce/umn/cce_10_0468.html @@ -0,0 +1,22 @@ + + +

              CCE Kubernetes 1.23 Release Notes

              +

              CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.23.

              +

              Resource Changes and Deprecations

              Changes in CCE 1.23

              +
              • The web-terminal add-on is no longer supported. Use CloudShell or kubectl instead.
              +

              Kubernetes 1.23 Release Notes

              +
              • FlexVolume is deprecated. Use CSI.
              • HorizontalPodAutoscaler v2 is promoted to GA, and HorizontalPodAutoscaler API v2 is gradually stable in version 1.23. The HorizontalPodAutoscaler v2beta2 API is not recommended. Use the v2 API.
              • PodSecurity moves to beta, replacing the deprecated PodSecurityPolicy. PodSecurity is an admission controller that enforces pod security standards on pods in the namespace based on specific namespace labels that set the enforcement level. PodSecurity is enabled by default in version 1.23.
              +

              Kubernetes 1.22 Release Notes

              +
              • Ingresses no longer support networking.k8s.io/v1beta1 and extensions/v1beta1 APIs. If you use the API of an earlier version to manage ingresses, an application cannot be exposed to external services. Use networking.k8s.io/v1.
              • CustomResourceDefinitions no longer support the apiextensions.k8s.io/v1beta1 API. If you use the API of an earlier version to create a CRD, the creation will fail, which affects the controller that reconciles this CRD. Use apiextensions.k8s.io/v1.
              • ClusterRoles, ClusterRoleBindings, Roles, and RoleBindings no longer support the rbac.authorization.k8s.io/v1beta1 API. If you use the API of an earlier version to manage RBAC resources, application permissions control is affected and even cannot work in the cluster. Use rbac.authorization.k8s.io/v1.
              • The Kubernetes release cycle is changed from four releases a year to three releases a year.
              • StatefulSets support minReadySeconds.
              • During scale-in, pods are randomly selected and deleted based on the pod UID by default (LogarithmicScaleDown). This feature enhances the randomness of the pods to be deleted and alleviates the problems caused by pod topology spread constraints. For more information, see KEP-2185 and issue 96748.
              • The BoundServiceAccountTokenVolume feature is stable. This feature improves the token security of the service account and changes the method of mounting tokens to pods. Kubernetes clusters of v1.21 and later enable this approach by default.

                +
              +
              +

              References

              For more details about the performance comparison and function evolution between Kubernetes 1.23 and other versions, see the following documents:

              + +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0469.html b/docs/cce/umn/cce_10_0469.html new file mode 100644 index 00000000..5e120710 --- /dev/null +++ b/docs/cce/umn/cce_10_0469.html @@ -0,0 +1,19 @@ + + +

              CCE Kubernetes 1.21 Release Notes

              +

              CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.21.

              +

              Resource Changes and Deprecations

              Kubernetes 1.21 Release Notes

              +
              • CronJob is now in the stable state, and the version number changes to batch/v1.
              • The immutable Secret and ConfigMap have now been upgraded to the stable state. A new immutable field is added to these objects to reject changes. The rejection protects clusters from accidental updates that may cause application outages. As these resources are immutable, kubelet does not monitor or poll for changes. This reduces the load of kube-apiserver and improves scalability and performance of your clusters. For more information, see Immutable ConfigMaps.
              • Graceful node shutdown has been upgraded to the test state. With this update, kubelet can detect that a node is shut down and gracefully terminate the pods on the node. Prior to this update, when the node was shut down, its pod did not follow the expected termination lifecycle, which caused workload problems. Now kubelet can use systemd to detect the systems that are about to be shut down and notify the running pods to terminate them gracefully.
              • For a pod with multiple containers, you can use kubectl.kubernetes.io/ to pre-select containers.
              • PodSecurityPolicy is deprecated. For details, see https://kubernetes.io/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/.
              • The BoundServiceAccountTokenVolume feature has entered the beta test. This feature improves the token security of the service account and changes the method of mounting tokens to pods. Kubernetes clusters of v1.21 and later enable this approach by default.
              +

              Kubernetes 1.20 Release Notes

              +
              • The API priority and fairness have reached the test state and are enabled by default. This allows kube-apiserver to classify incoming requests by priority. For more information, see API Priority and Fairness.
              • The bug of exec probe timeouts is fixed. Before this bug is fixed, the exec probe does not consider the timeoutSeconds field. Instead, the probe will run indefinitely, even beyond its configured deadline. It will stop until the result is returned. Now, if no value is specified, the default value is used, that is, one second. If the detection time exceeds one second, the application health check may fail. Update the timeoutSeconds field for the applications that use this feature during the upgrade. The repair provided by the newly introduced ExecProbeTimeout feature gating enables the cluster operator to restore the previous behavior, but this behavior will be locked and removed in later versions.
              • RuntimeClass enters the stable state. RuntimeClass provides a mechanism to support multiple runtimes in a cluster and expose information about the container runtime to the control plane.
              • kubectl debugging has reached the test state. kubectl debugging provides support for common debugging workflows.
              • Dockershim was marked as deprecated in Kubernetes 1.20. Currently, you can continue to use Docker in the cluster. This change is irrelevant to the container image used by clusters. You can still use Docker to build your images. For more information, see Dockershim Deprecation FAQ.
              +
              +

              References

              For more details about the performance comparison and function evolution between Kubernetes 1.21 and other versions, see the following documents:

              + +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0470.html b/docs/cce/umn/cce_10_0470.html new file mode 100644 index 00000000..a26d6e44 --- /dev/null +++ b/docs/cce/umn/cce_10_0470.html @@ -0,0 +1,35 @@ + + +

              CCE Kubernetes 1.19 Release Notes

              +

              CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.19.

              +

              Resource Changes and Deprecations

              Kubernetes 1.19 Release Notes

              +
              • vSphere in-tree volumes can be migrated to vSphere CSI drivers. The in-tree vSphere Volume plugin is no longer used and will be deleted in later versions.
              • apiextensions.k8s.io/v1beta1 has been deprecated. You are advised to use apiextensions.k8s.io/v1.
              • apiregistration.k8s.io/v1beta1 has been deprecated. You are advised to use apiregistration.k8s.io/v1.
              • authentication.k8s.io/v1beta1 and authorization.k8s.io/v1beta1 have been deprecated and will be removed from Kubernetes 1.22. You are advised to use authentication.k8s.io/v1 and authorization.k8s.io/v1.
              • autoscaling/v2beta1 has been deprecated. You are advised to use autoscaling/v2beta2.
              • coordination.k8s.io/v1beta1 has been deprecated in Kubernetes 1.19 and will be removed from version 1.22. You are advised to use coordination.k8s.io/v1.
              • kube-apiserver: The componentstatus API has been deprecated.
              • kubeadm: The kubeadm config view command has been deprecated and will be deleted in later versions. Use kubectl get cm -o yaml -n kube-system kubeadm-config to directly obtain the kubeadm configuration.
              • kubeadm: The kubeadm alpha kubelet config enable-dynamic command has been deprecated.
              • kubeadm: The --use-api flag in the kubeadm alpha certs renew command has been deprecated.
              • Kubernetes no longer supports hyperkube image creation.
              • The --export flag is removed from the kubectl get command.
              • The alpha feature ResourceLimitsPriorityFunction has been deleted.
              • storage.k8s.io/v1beta1 has been deprecated. You are advised to use storage.k8s.io/v1.
              +

              Kubernetes 1.18 Release Notes

              +
              • kube-apiserver
                • All resources in the apps/v1beta1 and apps/v1beta2 API versions are no longer served. You can use the apps/v1 API version.
                • DaemonSets, Deployments, and ReplicaSets in the extensions/v1beta1 API version are no longer served. You can use the apps/v1 API version.
                • NetworkPolicies in the extensions/v1beta1 API version are no longer served. You can use the networking.k8s.io/v1 API version.
                • PodSecurityPolicies in the extensions/v1beta1 API version are no longer served. Migrate to use the policy/v1beta1 API version.
                +
              +
              • kubelet
                • --redirect-container-streaming is not recommended and will be deprecated in v1.20.
                • The resource measurement endpoint /metrics/resource/v1alpha1 and all measurement standards under this endpoint have been deprecated. Use the measurement standards under the endpoint /metrics/resource instead:
                  • scrape_error --> scrape_error
                  • node_cpu_usage_seconds_total --> node_cpu_usage_seconds
                  • node_memory_working_set_bytes --> node_memory_working_set_bytes
                  • container_cpu_usage_seconds_total --> container_cpu_usage_seconds
                  • container_memory_working_set_bytes --> container_memory_working_set_bytes
                  • scrape_error --> scrape_error
                  +
                • In future releases, kubelet will no longer create the target directory CSI NodePublishVolume according to the CSI specifications. You may need to update the CSI driver accordingly to correctly create and process the target path.
                +
              +
              • kube-proxy
                • You are not advised to use the --healthz-port and --metrics-port flags. Use --healthz-bind-address and --metrics-bind-address instead.
                • The EndpointSliceProxying function option is added to control the use of EndpointSlices in kube-proxy. This function is disabled by default.
                +
              +
              • kubeadm
                • The --kubelet-version flag of kubeadm upgrade node has been deprecated and will be deleted in later versions.
                • The --use-api flag in the kubeadm alpha certs renew command has been deprecated.
                • kube-dns has been deprecated and will no longer be supported in future versions.
                • The ClusterStatus structure in the kubeadm-config ConfigMap has been deprecated and will be deleted in later versions.
                +
              +
              • kubectl
                • You are not advised to use boolean and unset values for --dry-run. server|client|none is used in the new version.
                • --server-dry-run has been deprecated for kubectl apply and replaced by --dry-run=server.
                +
              +
              • add-ons
              +

              The cluster-monitoring add-on is deleted.

              +
              • kube-scheduler
                • The scheduling_duration_seconds metric has been deprecated.
                • The scheduling_algorithm_predicate_evaluation_seconds and scheduling_algorithm_priority_evaluation_seconds counters metrics are no longer used and are replaced by framework_extension_point_duration_seconds[extension_point="Filter"] and framework_extension_point_duration_seconds[extension_point="Score"].
                • The scheduler policy AlwaysCheckAllPredictes has been deprecated.
                +
              +
              • Other changes
                • The k8s.io/node-api component is no longer updated. Instead, you can use the RuntimeClass type in k8s.io/api and the generated clients in k8s.io/client-go.
                • The client label has been deleted from apiserver_request_total.
                +
              +
              +

              References

              For more details about the performance comparison and function evolution between Kubernetes 1.19 and other versions, see the following documents:

              + +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0471.html b/docs/cce/umn/cce_10_0471.html new file mode 100644 index 00000000..12af1696 --- /dev/null +++ b/docs/cce/umn/cce_10_0471.html @@ -0,0 +1,18 @@ + + +

              CCE Kubernetes 1.17 Release Notes

              +

              CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.17.

              +

              Resource Changes and Deprecations

              • All resources in the apps/v1beta1 and apps/v1beta2 API versions are no longer served. Migrate to use the apps/v1 API version.
              • DaemonSets, Deployments, and ReplicaSets in the extensions/v1beta1 API version are no longer served. You can use the apps/v1 API version.
              • NetworkPolicies in the extensions/v1beta1 API version are no longer served. Migrate to use the networking.k8s.io/v1 API version.
              • PodSecurityPolicies in the extensions/v1beta1 API version are no longer served. Migrate to use the policy/v1beta1 API version.
              • Ingresses in the extensions/v1beta1 API version will no longer be served in v1.20. Migrate to use the networking.k8s.io/v1beta1 API version.
              • PriorityClass in the scheduling.k8s.io/v1beta1 and scheduling.k8s.io/v1alpha1 API versions is no longer served in v1.17. Migrate to use the scheduling.k8s.io/v1 API version.
              • The event series.state field in the events.k8s.io/v1beta1 API version has been deprecated and will be removed from v1.18.
              • CustomResourceDefinition in the apiextensions.k8s.io/v1beta1 API version has been deprecated and will no longer be served in v1.19. Use the apiextensions.k8s.io/v1 API version.
              • MutatingWebhookConfiguration and ValidatingWebhookConfiguration in the admissionregistration.k8s.io/v1beta1 API version have been deprecated and will no longer be served in v1.19. You can use the admissionregistration.k8s.io/v1 API version.
              • The rbac.authorization.k8s.io/v1alpha1 and rbac.authorization.k8s.io/v1beta1 API versions have been deprecated and will no longer be served in v1.20. Use the rbac.authorization.k8s.io/v1 API version.
              • The CSINode object of storage.k8s.io/v1beta1 has been deprecated and will be removed in later versions.
              +
              +

              Other Deprecations and Removals

              • OutOfDisk node condition is removed in favor of DiskPressure.
              • The scheduler.alpha.kubernetes.io/critical-pod annotation is removed in favor of priorityClassName.
              • beta.kubernetes.io/os and beta.kubernetes.io/arch have been deprecated in v1.14 and will be removed in v1.18.
              • Do not use --node-labels to set labels prefixed with kubernetes.io and k8s.io. The kubernetes.io/availablezone label in earlier versions is removed in v1.17 and changed to failure-domain.beta.kubernetes.io/zone.
              • The beta.kubernetes.io/instance-type is deprecated in favor of node.kubernetes.io/instance-type.
              • Remove the {kubelet_root_dir}/plugins path.
              • Remove the built-in cluster roles system:csi-external-provisioner and system:csi-external-attacher.
              +
              +

              References

              For more details about the performance comparison and function evolution between Kubernetes 1.17 and other versions, see the following documents:

              + +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0477.html b/docs/cce/umn/cce_10_0477.html new file mode 100644 index 00000000..e2153637 --- /dev/null +++ b/docs/cce/umn/cce_10_0477.html @@ -0,0 +1,23 @@ + + +

              Service Account Token Security Improvement

              +

              In clusters earlier than v1.21, a token is obtained by mounting the secret of the service account to a pod. Tokens obtained this way are permanent. This approach is no longer recommended starting from version 1.21. Service accounts will stop auto creating secrets in clusters from version 1.25.

              +

              In clusters of version 1.21 or later, you can use the TokenRequest API to obtain the token and use the projected volume to mount the token to the pod. Such tokens are valid for a fixed period (one hour by default). Before expiration, Kubelet refreshes the token to ensure that the pod always uses a valid token. When the mounting pod is deleted, the token automatically becomes invalid. This approach is implemented by the BoundServiceAccountTokenVolume feature to improve the token security of the service account. Kubernetes clusters of v1.21 and later enables this approach by default.

              +

              For smooth transition, the community extends the token validity period to one year by default. After one year, the token becomes invalid, and clients that do not support certificate reloading cannot access the API server. It is recommended that clients of earlier versions be upgraded as soon as possible. Otherwise, service faults may occur.

              +

              If you use a Kubernetes client of a to-be-outdated version, the certificate reloading may fail. Versions of officially supported Kubernetes client libraries able to reload tokens are as follows:

              +
              • Go: >= v0.15.7
              • Python: >= v12.0.0
              • Java: >= v9.0.0
              • Javascript: >= v0.10.3
              • Ruby: master branch
              • Haskell: v0.3.0.0
              • C#: >= 7.0.5
              +

              For details, visit https://github.com/kubernetes/enhancements/tree/master/keps/sig-auth/1205-bound-service-account-tokens.

              +

              If you need a token that never expires, you can also manually manage secrets for service accounts. Although a permanent service account token can be manually created, you are advised to use a short-lived token by calling the TokenRequest API for higher security.

              +
              +

              Diagnosis

              Run the following steps to check your CCE clusters of v1.21 and later:

              +
              1. Use kubectl to connect to the cluster and run the kubectl get --raw "/metrics" | grep stale command to query the metrics. Check the metric named serviceaccount_stale_tokens_total.

                If the value is greater than 0, some workloads in the cluster may be using an earlier client-go version. In this case, check whether this problem occurs in your deployed applications. If yes, upgrade client-go to the version specified by the community as soon as possible. The version must be at least two major versions of the CCE cluster. For example, if your cluster version is 1.23, the Kubernetes dependency library version must be at least 1.19.

                +

                +
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0477_0.html b/docs/cce/umn/cce_10_0477_0.html new file mode 100644 index 00000000..53c90ce8 --- /dev/null +++ b/docs/cce/umn/cce_10_0477_0.html @@ -0,0 +1,23 @@ + + +

              Service Account Token Security Improvement

              +

              In clusters earlier than v1.21, a token is obtained by mounting the secret of the service account to a pod. Tokens obtained this way are permanent. This approach is no longer recommended starting from version 1.21. Service accounts will stop auto creating secrets in clusters from version 1.25.

              +

              In clusters of version 1.21 or later, you can use the TokenRequest API to obtain the token and use the projected volume to mount the token to the pod. Such tokens are valid for a fixed period (one hour by default). Before expiration, Kubelet refreshes the token to ensure that the pod always uses a valid token. When the mounting pod is deleted, the token automatically becomes invalid. This approach is implemented by the BoundServiceAccountTokenVolume feature to improve the token security of the service account. Kubernetes clusters of v1.21 and later enables this approach by default.

              +

              For smooth transition, the community extends the token validity period to one year by default. After one year, the token becomes invalid, and clients that do not support certificate reloading cannot access the API server. It is recommended that clients of earlier versions be upgraded as soon as possible. Otherwise, service faults may occur.

              +

              If you use a Kubernetes client of a to-be-outdated version, the certificate reloading may fail. Versions of officially supported Kubernetes client libraries able to reload tokens are as follows:

              +
              • Go: >= v0.15.7
              • Python: >= v12.0.0
              • Java: >= v9.0.0
              • Javascript: >= v0.10.3
              • Ruby: master branch
              • Haskell: v0.3.0.0
              • C#: >= 7.0.5
              +

              For details, visit https://github.com/kubernetes/enhancements/tree/master/keps/sig-auth/1205-bound-service-account-tokens.

              +

              If you need a token that never expires, you can also manually manage secrets for service accounts. Although a permanent service account token can be manually created, you are advised to use a short-lived token by calling the TokenRequest API for higher security.

              +
              +

              Diagnosis

              Run the following steps to check your CCE clusters of v1.21 and later:

              +
              1. Use kubectl to connect to the cluster and run the kubectl get --raw "/metrics" | grep stale command to query the metrics. Check the metric named serviceaccount_stale_tokens_total.

                If the value is greater than 0, some workloads in the cluster may be using an earlier client-go version. In this case, check whether this problem occurs in your deployed applications. If yes, upgrade client-go to the version specified by the community as soon as possible. The version must be at least two major versions of the CCE cluster. For example, if your cluster version is 1.23, the Kubernetes dependency library version must be at least 1.19.

                +

                +
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0551.html b/docs/cce/umn/cce_10_0551.html new file mode 100644 index 00000000..93edd210 --- /dev/null +++ b/docs/cce/umn/cce_10_0551.html @@ -0,0 +1,19 @@ + + + +

              CPU Core Binding

              + +

              +
              + +
              + + + +
              + diff --git a/docs/cce/umn/cce_10_0553.html b/docs/cce/umn/cce_10_0553.html new file mode 100644 index 00000000..ea69ff6e --- /dev/null +++ b/docs/cce/umn/cce_10_0553.html @@ -0,0 +1,13 @@ + + +

              Logging

              +
              + + diff --git a/docs/cce/umn/cce_10_0557.html b/docs/cce/umn/cce_10_0557.html new file mode 100644 index 00000000..c5743b1f --- /dev/null +++ b/docs/cce/umn/cce_10_0557.html @@ -0,0 +1,16 @@ + + +

              Log Management Overview

              +

              CCE allows you to configure policies for collecting, managing, and analyzing workload logs periodically to prevent logs from being over-sized.

              +
              • Using ICAgent:

                By default, the ICAgent collects container standard outputs (stdout logs). No configuration required.

                +

                You can also configure the path for storing container logs when creating a workload so that the ICAgent collects logs from this path.

                +
                You can select either of the following modes for container logs:
                • hostPath: A host path is mounted to the specified container path (mount path). In the node host path, you can view the container logs output into the mount path.
                • emptyDir: A temporary path of the node is mounted to the specified path (mount path). Log data that exists in the temporary path but is not reported by the collector to AOM will disappear after the pod is deleted.
                +
                +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_10_0602.html b/docs/cce/umn/cce_10_0602.html new file mode 100644 index 00000000..4e68567a --- /dev/null +++ b/docs/cce/umn/cce_10_0602.html @@ -0,0 +1,23 @@ + + +

              Cluster Overload Control

              +

              Scenario

              If overload control is enabled, concurrent requests are dynamically controlled based on the resource pressure of master nodes to keep them and the cluster available.

              +
              +

              Notes and Constraints

              The cluster version must be 1.23 or later.

              +
              +

              Enabling Overload Control

              Method 1: Enabling it when creating a cluster

              +

              When creating a cluster of v1.23 or later, you can enable overload control during the cluster creation.

              +

              +

              Method 2: Enabling it in an existing cluster

              +
              1. Log in to the CCE console and go to an existing cluster whose version is v1.23 or later.
              2. On the cluster information page, view the master node information. If overload control is not enabled, a message is displayed. You can click Enable to enable the function.

                +

              +
              +

              Disabling Cluster Overload Control

              1. Log in to the CCE console and go to an existing cluster whose version is v1.23 or later.
              2. On the Cluster Information page, click Manage in the upper right corner.
              3. Set support-overload to false under kube-apiserver.
              4. Click OK.
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_bestpractice.html b/docs/cce/umn/cce_bestpractice.html new file mode 100644 index 00000000..73105f9e --- /dev/null +++ b/docs/cce/umn/cce_bestpractice.html @@ -0,0 +1,33 @@ + + + +

              Best Practice

              + +

              +
              + + + diff --git a/docs/cce/umn/cce_bestpractice_00002.html b/docs/cce/umn/cce_bestpractice_00002.html new file mode 100644 index 00000000..3f04189c --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00002.html @@ -0,0 +1,74 @@ + + +

              Properly Allocating Container Computing Resources

              +

              If a node has sufficient memory resources, a container on this node can use more memory resources than requested, but no more than limited. If the memory allocated to a container exceeds the upper limit, the container is stopped first. If the container continuously uses memory resources more than limited, the container is terminated. If a stopped container is allowed to be restarted, kubelet will restart it, but other types of run errors will occur.

              +

              Scenario 1

              The node's memory has reached the memory limit reserved for the node. As a result, OOM killer is triggered.

              +

              Solution

              +

              You can either scale up the node or migrate the pods on the node to other nodes.

              +
              +

              Scenario 2

              The upper limit of resources configured for the pod is too small. When the actual usage exceeds the limit, OOM killer is triggered.

              +

              Solution

              +

              Set a higher upper limit for the workload.

              +
              +

              Example

              A pod will be created and allocated memory that exceeds the limit. As shown in the following configuration file of the pod, the pod requests 50 MB memory and the memory limit is set to 100 MB.

              +

              Example YAML file (memory-request-limit-2.yaml):

              +
              apiVersion: v1
              +kind: Pod
              +metadata:
              +  name: memory-demo-2
              +spec:
              +  containers:
              +  - name: memory-demo-2-ctr
              +    image: vish/stress
              +    resources:
              +      requests:
              +        memory: 50Mi
              +      limits:
              +        memory: "100Mi"
              +    args:
              +    - -mem-total
              +    - 250Mi
              +    - -mem-alloc-size
              +    - 10Mi
              +    - -mem-alloc-sleep
              +    - 1s
              +
              +

              The args parameters indicate that the container attempts to request 250 MB memory, which exceeds the pod's upper limit (100 MB).

              +

              Creating a pod:

              +
              kubectl create -f https://k8s.io/docs/tasks/configure-pod-container/memory-request-limit-2.yaml --namespace=mem-example 
              +

              Viewing the details about the pod:

              +
              kubectl get pod memory-demo-2 --namespace=mem-example 
              +

              In this stage, the container may be running or be killed. If the container is not killed, repeat the previous command until the container is killed.

              +
              NAME            READY     STATUS      RESTARTS   AGE 
              +memory-demo-2   0/1       OOMKilled   1          24s
              +

              Viewing detailed information about the container:

              +
              kubectl get pod memory-demo-2 --output=yaml --namespace=mem-example 
              +

              This output indicates that the container is killed because the memory limit is exceeded.

              +
              lastState:
              +   terminated:
              +     containerID: docker://7aae52677a4542917c23b10fb56fcb2434c2e8427bc956065183c1879cc0dbd2
              +     exitCode: 137
              +     finishedAt: 2020-02-20T17:35:12Z
              +     reason: OOMKilled
              +     startedAt: null
              +

              In this example, the container can be automatically restarted. Therefore, kubelet will start it again. You can run the following command several times to see how the container is killed and started:

              +
              kubectl get pod memory-demo-2 --namespace=mem-example
              +

              The preceding command output indicates how the container is killed and started back and forth:

              +
              $ kubectl get pod memory-demo-2 --namespace=mem-example 
              +NAME            READY     STATUS      RESTARTS   AGE 
              +memory-demo-2   0/1       OOMKilled   1          37s
              +$ kubectl get pod memory-demo-2 --namespace=mem-example 
              +NAME            READY     STATUS    RESTARTS   AGE 
              +memory-demo-2   1/1       Running   2          40s
              +

              Viewing the historical information of the pod:

              +
              kubectl describe pod memory-demo-2 --namespace=mem-example 
              +

              The following command output indicates that the pod is repeatedly killed and started.

              +
              ... Normal  Created   Created container with id 66a3a20aa7980e61be4922780bf9d24d1a1d8b7395c09861225b0eba1b1f8511 
              +... Warning BackOff   Back-off restarting failed container
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_bestpractice_00004.html b/docs/cce/umn/cce_bestpractice_00004.html index 70d47857..9780e5bb 100644 --- a/docs/cce/umn/cce_bestpractice_00004.html +++ b/docs/cce/umn/cce_bestpractice_00004.html @@ -1,55 +1,61 @@ -

              Planning CIDR Blocks for a CCE Cluster

              -

              Before creating a cluster on CCE, determine the number of VPCs, number of subnets, container CIDR blocks, and Services for access based on service requirements.

              -

              This section describes the functions of various addresses in a CCE cluster in a VPC and how to plan CIDR blocks.

              -

              Basic Concepts

              VPC CIDR Block

              -

              Virtual Private Cloud (VPC) enables you to provision logically isolated, configurable, and manageable virtual networks for cloud servers, cloud containers, and cloud databases. You have complete control over your virtual network, including selecting your own CIDR block, creating subnets, and configuring security groups. You can also assign EIPs and allocate bandwidth in your VPC for secure and easy access to your business system.

              -

              Subnet CIDR Block

              -

              A subnet is a network that manages ECS network planes. It supports IP address management and DNS. The IP addresses of all ECSs in a subnet belong to the subnet.

              -
              Figure 1 VPC CIDR block architecture
              -

              By default, ECSs in all subnets of the same VPC can communicate with one another, while ECSs in different VPCs cannot communicate with each other.

              -

              You can create VPC peering connections to enable ECSs in different VPCs to communicate with one another.

              -

              Container (Pod) CIDR Block

              -

              Pod is a Kubernetes object. Each pod has an IP address.

              -

              When creating a cluster on CCE, you can specify the pod (container) CIDR block, which cannot overlap with the subnet CIDR block. For example, if the subnet CIDR block is 192.168.0.0/16, the container CIDR block cannot be 192.168.0.0/18 or 192.168.1.0/18, because these addresses are included in 192.168.0.0/16.

              -

              Service CIDR Block

              -

              Service is also a Kubernetes object. Each Service has an address. When creating a cluster on CCE, you can specify the Service CIDR block. Similarly, the Service CIDR block cannot overlap with the subnet CIDR block or the container CIDR block. The Service CIDR block can be used only within a cluster.

              -

              For details about the relationship between these CIDR blocks, see Figure 2.

              +

              Planning CIDR Blocks for a Cluster

              +

              Before creating a cluster on CCE, determine the number of VPCs, number of subnets, container CIDR blocks, and Services for access based on service requirements.

              +

              This topic describes the addresses in a CCE cluster in a VPC and how to plan CIDR blocks.

              +

              Notes and Constraints

              To access a CCE cluster through a VPN, ensure that the VPN does not conflict with the VPC CIDR block where the cluster resides and the container CIDR block.

              -

              How Do I Select a CIDR Block?

              Single-VPC Single-Cluster Scenarios

              -

              These are the simplest scenarios. The VPC CIDR block is determined when the VPC is created. When creating a CCE cluster, select a CIDR block different from that of the current VPC.

              -
              Figure 2 CIDR block in the single-VPC single-cluster scenario
              -

              Single-VPC Multi-Cluster Scenarios

              -

              Multiple CCE clusters are created in a VPC.

              -

              In the VPC network mode, pod packets are forwarded through VPC routes. CCE automatically configures a routing table on the VPC routes to each container CIDR block.

              -

              Pay attention to the following:

              -
              • The VPC address is determined during VPC creation. When creating a cluster, select a CIDR block for each cluster that does not overlap with the VPC CIDR block or other container CIDR blocks.
              • The container CIDR blocks of all clusters cannot overlap, but the Service CIDR blocks can. In this case, CCE clusters are partially interconnected. A pod of a cluster can directly access the pods of another cluster, but cannot access the Services of the cluster.
              • The network scale is limited by the VPC route table.
              -
              Figure 3 VPC network - multi-cluster scenario
              -

              In the tunnel network model, the container network is an overlay network plane deployed over the VPC network. Though at some cost of performance, the tunnel encapsulation enables higher interoperability and compatibility with advanced features (such as network policy-based isolation), meeting the requirements of most applications.

              -
              Figure 4 Tunnel network - multi-cluster scenario
              -

              Pay attention to the following:

              -
              • The VPC address is determined during VPC creation. When creating a cluster, select a CIDR block for each cluster that does not overlap with the VPC CIDR block or other container CIDR blocks.
              • The container CIDR blocks of all clusters can overlap, so do the Service CIDR blocks.
              • It is recommended that ELB be used for the cross-cluster access between containers.
              -

              VPC Interconnection Scenarios

              -

              When two VPC networks are interconnected, you can configure the packets to be sent to the peer VPC in the route table.

              -

              In the VPC network model, after creating a peering connection, you need to add routes for the peering connection to enable communication between the two VPCs.

              -
              Figure 5 VPC Network - VPC interconnection scenario
              -

              To interconnect cluster containers across VPCs, you need to create VPC peering connections.

              -

              Pay attention to the following:

              -
              • The VPC address is determined during VPC creation. When creating a cluster, select a CIDR block for each cluster that does not overlap with the VPC CIDR block or other container CIDR blocks.
              • The container CIDR blocks of all clusters cannot overlap, but the Service CIDR blocks can.
              • Add the peer container CIDR block to the route table of the VPC peering connection. The following is an example:
                Figure 6 Adding the peer container CIDR block to the local route on the VPC console
                +

                Basic Concepts

                • VPC CIDR Block

                  Virtual Private Cloud (VPC) enables you to provision logically isolated, configurable, and manageable virtual networks for cloud servers, cloud containers, and cloud databases. You have complete control over your virtual network, including selecting your own CIDR block, creating subnets, and configuring security groups. You can also assign EIPs and allocate bandwidth in your VPC for secure and easy access to your business system.

                  +
                • Subnet CIDR Block
                  A subnet is a network that manages ECS network planes. It supports IP address management and DNS. The IP addresses of all ECSs in a subnet belong to the subnet.
                  Figure 1 VPC CIDR block architecture
                  +
                  +

                  By default, ECSs in all subnets of the same VPC can communicate with one another, while ECSs in different VPCs cannot communicate with each other.

                  +

                  You can create a peering connection on VPC to enable ECSs in different VPCs to communicate with each other.

                  +
                • Container (Pod) CIDR Block

                  Pod is a Kubernetes concept. Each pod has an IP address.

                  +

                  When creating a cluster on CCE, you can specify the pod (container) CIDR block, which cannot overlap with the subnet CIDR block. For example, if the subnet CIDR block is 192.168.0.0/16, the container CIDR block cannot be 192.168.0.0/18 or 192.168.1.0/18, because these addresses are included in 192.168.0.0/16.

                  +
                • Container Subnet (Only for CCE Turbo Clusters)

                  In a CCE Turbo cluster, a container is assigned an IP address from the CIDR block of a VPC. The container subnet can overlap with the subnet CIDR block. Note that the subnet you select determines the maximum number of pods in the cluster. After a cluster is created, you can only add container subnets but cannot delete them.

                  +
                • Service CIDR Block

                  Service is also a Kubernetes concept. Each Service has an address. When creating a cluster on CCE, you can specify the Service CIDR block. Similarly, the Service CIDR block cannot overlap with the subnet CIDR block or the container CIDR block. The Service CIDR block can be used only within a cluster.

                -

                In the tunnel network model, after creating a peering connection, you need to add routes for the peering connection to enable communication between the two VPCs.

                -
                Figure 7 Tunnel network - VPC interconnection scenario
                -

                Pay attention to the following:

                -
                • The VPC address is determined during VPC creation. When creating a cluster, select a CIDR block for each cluster that does not overlap with the VPC CIDR block or other container CIDR blocks.
                • The container CIDR blocks of all clusters cannot overlap, but the Service CIDR blocks can.
                • Add the peer subnet CIDR block to the route table of the VPC peering connection. The following is an example:
                  Figure 8 Adding the subnet CIDR block of the peer cluster node to the local route on the VPC console
                  -
                -

                VPC-IDC Scenarios

                -

                Similar to the VPC interconnection scenario, some CIDR blocks in the VPC are routed to the IDC. The pod IP addresses of CCE clusters cannot overlap with the addresses within these CIDR blocks. To access the pod IP addresses in the cluster in the IDC, you need to configure the route table to the private line VBR on the IDC.

                +
                +

                Single-VPC Single-Cluster Scenarios

                CCE Clusters: include clusters in VPC network model and container tunnel network model. Figure 2 shows the CIDR block planning of a cluster.
                • VPC CIDR Block: specifies the VPC CIDR block where the cluster resides. The size of this CIDR block affects the maximum number of nodes that can be created in the cluster.
                • Subnet CIDR Block: specifies the subnet CIDR block where the node in the cluster resides. The subnet CIDR block is included in the VPC CIDR block. Different nodes in the same cluster can be allocated to different subnet CIDR blocks.
                • Container CIDR Block: cannot overlap with the subnet CIDR block.
                • Service CIDR Block: cannot overlap with the subnet CIDR block or the container CIDR block.
                +
                Figure 2 Network CIDR block planning in the single-VPC single-cluster scenario (CCE cluster)
                +
                +
                Figure 3 shows the CIDR block planning for a CCE Turbo cluster (cloud native network 2.0).
                • VPC CIDR Block: specifies the VPC CIDR block where the cluster resides. The size of this CIDR block affects the maximum number of nodes that can be created in the cluster.
                • Subnet CIDR Block: specifies the subnet CIDR block where the node in the cluster resides. The subnet CIDR block is included in the VPC CIDR block. Different nodes in the same cluster can be allocated to different subnet CIDR blocks.
                • Container Subnet CIDR Block: The container subnet is included in the VPC CIDR block and can overlap with the subnet CIDR block or even be the same as the subnet CIDR block. Note that the container subnet size determines the maximum number of containers in the cluster because IP addresses in the VPC are directly allocated to containers. After a cluster is created, you can only add container subnets but cannot delete them. You are advised to set a larger IP address segment for the container subnet to prevent insufficient container IP addresses.
                • Service CIDR Block: cannot overlap with the subnet CIDR block or the container CIDR block.
                +
                Figure 3 CIDR block planning in the single-VPC single-cluster scenario (CCE Turbo cluster)
                +
                +
                +

                Single-VPC Multi-Cluster Scenarios

                VPC network model

                +
                Pod packets are forwarded through VPC routes. CCE automatically configures a routing table on the VPC routes to each container CIDR block. The network scale is limited by the VPC route table. Figure 4 shows the CIDR block planning of the cluster.
                • VPC CIDR Block: specifies the VPC CIDR block where the cluster resides. The size of this CIDR block affects the maximum number of nodes that can be created in the cluster.
                • Subnet CIDR Block: The subnet CIDR block in each cluster cannot overlap with the container CIDR block.
                • Container CIDR Block: If multiple VPC network model clusters exist in a single VPC, the container CIDR blocks of all clusters cannot overlap because the clusters use the same routing table. In this case, CCE clusters are partially interconnected. A pod of a cluster can directly access the pods of another cluster, but cannot access the Services of the cluster.
                • Service CIDR Block: can be used only in clusters. Therefore, the service CIDR blocks of different clusters can overlap, but cannot overlap with the subnet CIDR block and container CIDR block of the cluster to which the clusters belong.
                +
                Figure 4 VPC network - multi-cluster scenario
                +
                +

                Tunnel Network

                +
                Though at some cost of performance, the tunnel encapsulation enables higher interoperability and compatibility with advanced features (such as network policy-based isolation), meeting the requirements of most applications. Figure 5 shows the CIDR block planning of the cluster.
                • VPC CIDR Block: specifies the VPC CIDR block where the cluster resides. The size of this CIDR block affects the maximum number of nodes that can be created in the cluster.
                • Subnet CIDR Block: The subnet CIDR block in each cluster cannot overlap with the container CIDR block.
                • Container CIDR Block: The container CIDR blocks of all clusters can overlap. In this case, pods in different clusters cannot be directly accessed using IP addresses. It is recommended that ELB be used for the cross-cluster access between containers.
                • Service CIDR Block: can be used only in clusters. Therefore, the service CIDR blocks of different clusters can overlap, but cannot overlap with the subnet CIDR block and container CIDR block of the cluster to which the clusters belong.
                +
                Figure 5 Tunnel network - multi-cluster scenario
                +
                +

                Cloud native network 2.0 network model (CCE Turbo cluster)

                +
                In this mode, container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and multiple types of VPC networks can be bound to deliver high performance.
                • VPC CIDR Block: specifies the VPC CIDR block where the cluster resides. In a CCE Turbo cluster, the CIDR block size affects the total number of nodes and containers that can be created in the cluster.
                • Subnet CIDR Block: There is no special restriction on the subnet CIDR blocks in CCE Turbo clusters.
                • Container Subnet: The CIDR block of the container subnet is included in the VPC CIDR block. Container subnets in different clusters can overlap with each other or overlap with the subnet CIDR block. However, you are advised to stagger the container CIDR blocks of different clusters and ensure that the container subnet CIDR blocks have sufficient IP addresses. In this case, pods in different clusters can directly access each other through IP addresses.
                • Service CIDR Block: can be used only in clusters. Therefore, the service CIDR blocks of different clusters can overlap, but cannot overlap with the subnet CIDR block or container CIDR block.
                +
                Figure 6 Cloud native network 2.0 network model - multi-cluster scenario
                +
                +

                Coexistence of Clusters in Multi-Network

                +

                When a VPC contains clusters created with different network models, comply with the following rules when creating a cluster:

                +
                • VPC CIDR Block: In this scenario, all clusters are located in the same VPC CIDR block. Ensure that there are sufficient available IP addresses in the VPC.
                • Subnet CIDR Block: Ensure that the subnet CIDR block does not overlap with the container CIDR block. Even in some scenarios (for example, coexistence with CCE Turbo clusters), the subnet CIDR block can overlap with the container (subnet) CIDR block. However, this is not recommended.
                • Container CIDR Block: Ensure that the container CIDR blocks of clusters in VPC network model do not overlap.
                • Service CIDR Block: The service CIDR blocks of all clusters can overlap, but cannot overlap with the subnet CIDR block and container CIDR block of the cluster.
                +
                +

                Cross-VPC Cluster Interconnection

                When two VPC networks are interconnected, you can configure the packets to be sent to the peer VPC in the route table.

                +

                In the VPC network model, after creating a peering connection, you need to add routes for the peering connection to enable communication between the two VPCs.

                +
                Figure 7 VPC Network - VPC interconnection scenario
                +

                When creating a VPC peering connection between containers across VPCs, pay attention to the following points:

                +
                • The VPC to which the clusters belong must not overlap. In each cluster, the subnet CIDR block cannot overlap with the container CIDR block.
                • The container CIDR blocks of clusters cannot overlap, but the Service CIDR blocks can.
                • You need to add not only the peer VPC CIDR block but also the peer container CIDR block to the VPC routing tables at both ends. Note that this operation must be performed in the VPC route tables of the clusters.
                +

                In the tunnel network model, after creating a peering connection, you need to add routes for the peering connection to enable communication between the two VPCs.

                +
                Figure 8 Tunnel network - VPC interconnection scenario
                +

                Pay attention to the following:

                +
                • The VPC of the clusters must not overlap.
                • The container CIDR blocks of all clusters can overlap, so do the Service CIDR blocks.
                • Add the peer subnet CIDR block to the route table of the VPC peering connection.
                +

                In Cloud Native Network 2.0 mode, after creating a VPC peering connection, you only need to add routes for the VPC peering connection to enable communication between the two VPCs. Ensure that the VPC of the clusters does not overlap.

                +
                +

                VPC-IDC Scenarios

                Similar to the VPC interconnection scenario, some CIDR blocks in the VPC are routed to the IDC. The pod IP addresses of CCE clusters cannot overlap with the addresses within these CIDR blocks. To access the pod IP addresses in the cluster in the IDC, you need to configure the route table to the private line VBR on the IDC.

              diff --git a/docs/cce/umn/cce_bestpractice_00006.html b/docs/cce/umn/cce_bestpractice_00006.html new file mode 100644 index 00000000..171079eb --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00006.html @@ -0,0 +1,186 @@ + + +

              Checklist for Deploying Containerized Applications in the Cloud

              +

              Overview

              Security, efficiency, stability, and availability are common requirements on all cloud services. To meet these requirements, the system availability, data reliability, and O&M stability must be perfectly coordinated. This checklist describes the check items for deploying containerized applications on the cloud to help you efficiently migrate services to CCE, reducing potential cluster or application exceptions caused by improper use.

              +
              +

              Check Items

              +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 System availability

              Category

              +

              Check Item

              +

              Type

              +

              Impact

              +

              Cluster

              +

              Before creating a cluster, properly plan the node network and container network based on service requirements to allow subsequent service expansion.

              +

              Network planning

              +

              If the subnet or container CIDR block where the cluster resides is small, the number of available nodes supported by the cluster may be less than required.

              +

              Before creating a cluster, properly plan CIDR blocks for the related Direct Connect, peering connection, container network, service network, and subnet to avoid IP address conflicts.

              +

              Network planning

              +

              If CIDR blocks are not properly set and IP address conflicts occur, service access will be affected.

              +

              When a cluster is created, the default security group is automatically created and bound to the cluster. You can set custom security group rules based on service requirements.

              +

              Deployment

              +

              Security groups are key to security isolation. Improper security policy configuration may cause security risks and service connectivity problems.

              +

              Enable the multi-master node mode, and set the number of master nodes to 3 when creating a cluster.

              +

              Reliability

              +

              After the multi-master node mode is enabled, three master nodes will be created. If a master node is faulty, the cluster can still be available without affecting service functions. In commercial scenarios, it is advised to enable the multi-master node mode.

              +

              When creating a cluster, select a proper network model, such as container tunnel network or VPC network.

              +

              Deployment

              +

              After a cluster is created, the network model cannot be changed. Exercise caution when selecting a network model.

              +

              Workload

              +

              When creating a workload, you need to set the CPU and memory limits to improve service robustness.

              +

              Deployment

              +

              When multiple applications are deployed on the same node, if the upper and lower resource limits are not set for an application, resource leakage occurs. As a result, resources cannot be allocated to other applications, and the application monitoring information will be inaccurate.

              +

              When creating a workload, you can set probes for container health check, including liveness probe and readiness probe.

              +

              Reliability

              +

              If the health check function is not configured, a pod cannot detect service exceptions or automatically restart the service to restore it. This results in a situation where the pod status is normal but the service in the pod is abnormal.

              +

              When creating a workload, select a proper access mode (Service). Currently, the following types of Services are supported: ClusterIP, NodePort, and LoadBalancer.

              +

              Deployment

              +

              Improper Service configuration may cause logic confusion for internal and external access and resource waste.

              +

              When creating a workload, do not set the number of replicas for a single pod. Set a proper node scheduling policy based on your service requirements.

              +

              Reliability

              +

              For example, if the number of replicas of a single pod is set, the service will be abnormal when the node or pod is abnormal. To ensure that your pods can be successfully scheduled, ensure that the node has idle resources for container scheduling after you set the scheduling rule.

              +

              Properly set affinity and anti-affinity.

              +

              Reliability

              +

              If affinity and anti-affinity are both configured for an application that provides Services externally, Services may fail to be accessed after the application is upgraded or restarted.

              +

              When creating a workload, set the pre-stop processing command (Lifecycle > Pre-Stop) to ensure that the services running in the pods can be completed in advance in the case of application upgrade or pod deletion.

              +

              Reliability

              +

              If the pre-stop processing command is not configured, the pod will be directly killed and services will be interrupted during application upgrade.

              +
              +
              + +
              + + + + + + + + + + + + + + + + +
              Table 2 Data reliability

              Category

              +

              Check Item

              +

              Type

              +

              Impact

              +

              Container data persistency

              +

              Select a proper data volume type based on service requirements.

              +

              Reliability

              +

              When a node is faulty and cannot be recovered, data in the local disk cannot be recovered. Therefore, you are advised to use cloud storage volumes to ensure data reliability.

              +

              Backup

              +

              Back up application data.

              +

              Reliability

              +

              Data cannot be restored after being lost.

              +
              +
              + +
              + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 3 O&M reliability

              Category

              +

              Check Item

              +

              Type

              +

              Impact

              +

              Project

              +

              The quotas of ECS, VPC, subnet, EIP, and EVS resources must meet customer requirements.

              +

              Deployment

              +

              If the quota is insufficient, resources will fail to be created. Specifically, users who have configured auto scaling must have sufficient resource quotas.

              +

              You are not advised to modify kernel parameters, system configurations, cluster core component versions, security groups, and ELB-related parameters on cluster nodes, or install software that has not been verified.

              +

              Deployment

              +

              Exceptions may occur on CCE clusters or Kubernetes components on the node, making the node unavailable for application deployment.

              +

              Do not modify information about resources created by CCE, such as security groups and EVS disks. Resources created by CCE are labeled cce.

              +

              Deployment

              +

              CCE cluster functions may be abnormal.

              +

              Proactive O&M

              +

              CCE provides multi-dimensional monitoring and alarm reporting functions, and supports basic resource monitoring based on fine-grained metrics by interconnecting with Application Operations Management (AOM). Alarms allow users to locate and rectify faults as soon as possible.

              +

              Monitoring

              +

              If the alarms are not configured, the standard of container cluster performance cannot be established. When an exception occurs, you cannot receive alarms and will need to manually locate the fault.

              +
              +
              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_bestpractice_00035.html b/docs/cce/umn/cce_bestpractice_00035.html new file mode 100644 index 00000000..6446f86d --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00035.html @@ -0,0 +1,37 @@ + + +

              Obtaining the Client Source IP Address for a Container

              +

              Background

              There may be different types of proxy servers between a client and a container server. How can a container obtain the real source IP address of the client? This section describes several scenarios you may encounter.

              +
              +

              Principles

              +

              Layer-7 forwarding:

              +

              Ingress: If this access mode is used, the client source IP address is saved in the X-Forwarded-For HTTP header field by default. No other configuration is required.

              +
              • ELB ingress: A self-developed ingress to implement layer-7 network access between the internet and intranet (in the same VPC) based on ELB. If the backend Service type is NodePort, set Service Affinity to Node level.
              +

              Layer-4 forwarding:

              +
              • LoadBalancer: Use ELB to achieve load balancing. You can manually enable the Obtain Client IP Address option for TCP and UDP listeners of shared load balancers. By default, the Obtain Client IP Address option is enabled for TCP and UDP listeners of dedicated load balancers. You do not need to manually enable it.
              • NodePort: In this access mode, the container port is mapped to the node port. If cluster-level affinity is configured, access requests will be forwarded through the node and the client source IP address cannot be obtained. If node-level affinity is configured, access requests are not forwarded and the client source IP address can be obtained.
              +
              +

              Ingress

              Configure the application server and obtain the IP address of a client from the HTTP header.

              +

              The real IP address is placed in the X-Forwarded-For HTTP header field by the load balancer in the following format:

              +
              X-Forwarded-For: IP address of the client,Proxy server 1-IP address,Proxy server 2-IP address,...
              +

              If you use this method, the first IP address obtained is the IP address of the client.

              +

              For details, see How Can I Obtain the IP Address of a Client?

              +
              • When adding an ingress, if the backend service is of the NodePort type, set Service Affinity to Node level, that is, set spec.externalTrafficPolicy to Local. For details, see NodePort.
              +
              +
              +

              LoadBalancer

              For a LoadBalancer Service, different types of clusters obtain source IP addresses in different scenarios. In some scenarios, source IP addresses cannot be obtained currently.

              +

              VPC and Container Tunnel Network Models

              +

              To obtain source IP addresses, perform the following steps:

              +
              1. When creating a LoadBalancer Service on the CCE console, set Service Affinity to Node level instead of Cluster level.

                +

              2. Go to the ELB console and enable the function of obtaining the client IP address of the listener corresponding to the load balancer. Transparent transmission of source IP addresses is enabled for dedicated load balancers by default. You do not need to manually enable this function.

                1. Log in to the ELB console.
                2. Click in the upper left corner to select the desired region and project.
                3. Click Service List. Under Networking, click Elastic Load Balance.
                4. On the Load Balancers page, click the name of the load balancer.
                5. Click Listeners.
                6. To add a listener, click Add Listener.
                7. To modify a listener, locate the listener and click on the right of its name.
                8. Enable Obtain Client IP Address.
                +

              +

              +
              +

              NodePort

              Set the service affinity of a NodePort Service to Node level instead of Cluster level. That is, set spec.externalTrafficPolicy of the Service to Local.

              +
              +
              +
              + +
              + diff --git a/docs/cce/umn/cce_bestpractice_00162.html b/docs/cce/umn/cce_bestpractice_00162.html index d7a1cd6d..e29f7a89 100644 --- a/docs/cce/umn/cce_bestpractice_00162.html +++ b/docs/cce/umn/cce_bestpractice_00162.html @@ -1,97 +1,107 @@ -

              Selecting a Network Model When Creating a Cluster on CCE

              -

              CCE uses high-performance container networking add-ons, which support the tunnel network and VPC network models.

              -

              After a cluster is created, the network model cannot be changed. Exercise caution when selecting a network model.

              +

              Selecting a Network Model

              +

              CCE uses self-proprietary, high-performance container networking add-ons to support the tunnel network, Cloud Native Network 2.0, and VPC network models.

              +

              After a cluster is created, the network model cannot be changed. Exercise caution when selecting a network model.

              -
              • Tunnel network: The container network is an overlay tunnel network on top of a VPC network and uses the VXLAN technology. This network model is applicable when there is no high requirements on performance. VXLAN encapsulates Ethernet packets as UDP packets for tunnel transmission. Though at some cost of performance, the tunnel encapsulation enables higher interoperability and compatibility with advanced features (such as network policy-based isolation), meeting the requirements of most applications.
                Figure 1 Container tunnel network
                -
              • VPC network: The container network uses VPC routing to integrate with the underlying network. This network model is applicable to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network. Each node is assigned a CIDR block of a fixed size. VPC networks are free from tunnel encapsulation overhead and outperform container tunnel networks. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in the cluster can be directly accessed from outside the cluster.
                Figure 2 VPC network
                +
                • Tunnel network: The container network is an overlay tunnel network on top of a VPC network and uses the VXLAN technology. This network model is applicable when there is no high requirements on performance. VXLAN encapsulates Ethernet packets as UDP packets for tunnel transmission. Though at some cost of performance, the tunnel encapsulation enables higher interoperability and compatibility with advanced features (such as network policy-based isolation), meeting the requirements of most applications.
                  Figure 1 Container tunnel network
                  +
                • VPC network: The container network uses VPC routing to integrate with the underlying network. This network model is applicable to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network. Each node is assigned a CIDR block of a fixed size. VPC networks are free from tunnel encapsulation overhead and outperform container tunnel networks. In addition, as VPC routing includes routes to node IP addresses and container network segment, container pods in the cluster can be directly accessed from outside the cluster.
                  Figure 2 VPC network
                  +
                • Cloud Native Network 2.0: The container network deeply integrates the elastic network interface (ENI) capability of VPC, uses the VPC CIDR block to allocate container addresses, and supports passthrough networking to containers through a load balancer.
                  Figure 3 Cloud Native Network 2.0
                -

                The following table lists the differences between the network models.

                +

                The following table lists the differences between the network models.

                -
                Table 1 Network comparison

                Dimension

                +
                - - + - - - + - - - + - - - + - - - + - - - + - - - + - - - + - - - - - - -
                Table 1 Networking model comparison

                Dimension

                Tunnel Network

                +

                Tunnel Network

                VPC Network

                +

                VPC Network

                +

                Cloud Native Network 2.0

                Core component

                +

                Core technology

                OVS

                +

                OVS

                IPVlan

                +

                IPvlan and VPC route

                +

                VPC ENI/sub-ENI

                Applicable clusters

                +

                Applicable Clusters

                Hybrid cluster

                -

                VM cluster

                +

                CCE cluster

                Hybrid cluster

                -

                VM cluster

                +

                CCE cluster

                +

                CCE Turbo cluster

                Support for network policies

                -

                (networkpolicy)

                +

                Network isolation

                Yes

                +

                Kubernetes native NetworkPolicy for pods

                No

                +

                No

                +

                Pods support security group isolation.

                Support for ENI

                +

                Passthrough networking

                No

                +

                No

                Yes. The container network is deeply integrated with the VPC network, and ENI is used for pods to communicate.

                +

                No

                +

                Yes

                IP address management

                +

                IP address management

                IP addresses can be migrated.

                +
                • The container CIDR block is allocated separately.
                • CIDR blocks are divided by node and can be dynamically allocated (CIDR blocks can be dynamically added after being allocated.)
                • Each node is allocated with a small subnet.
                • A static route is added on the VPC router with the next hop set to the node IP address.
                +
                • The container CIDR block is allocated separately.
                • CIDR blocks are divided by node and statically allocated (the CIDR block cannot be changed after a node is created).
                +

                The container CIDR block is divided from the VPC subnet and does not need to be allocated separately.

                Network performance

                +

                Performance

                Performance loss due to VXLAN tunnel encapsulation

                +

                Performance loss due to VXLAN encapsulation

                • No performance loss as no tunnel encapsulation is required; performance comparable to bare metal networks
                • Data forwarded across nodes through the VPC router
                +

                No tunnel encapsulation. Cross-node packets are forwarded through VPC routers, delivering performance equivalent to that of the host network.

                +

                The container network is integrated with the VPC network, eliminating performance loss.

                Networking scale

                +

                Networking scale

                A maximum of 2,000 nodes are supported.

                +

                A maximum of 2,000 nodes are supported.

                Limited by the VPC route table.

                +

                By default, 200 nodes are supported.

                +

                Each time a node is added to the cluster, a route is added to the VPC routing table. Therefore, the cluster scale is limited by the VPC route table.

                +

                A maximum of 2,000 nodes are supported.

                External dependency

                +

                Scenario

                None

                +
                • Common container services
                • Scenarios that do not have high requirements on network latency and bandwidth

                Static route table of the VPC router

                +
                • Scenarios that have high requirements on network latency and bandwidth
                • Containers communicate with VMs using a microservice registration framework, such as Dubbo and CSE.

                Application scenarios

                -
                • Common container service scenarios
                • Scenarios that do not have high requirements on network latency and bandwidth
                -
                • Scenarios that have high requirements on network latency and bandwidth
                • Containers can communicate with VMs using a microservice registration framework, such as Dubbo and CSE.
                +
                • Scenarios that have high requirements on network latency, bandwidth, and performance
                • Containers communicate with VMs using a microservice registration framework, such as Dubbo and CSE.
                -
                1. The actual cluster scale is limited by the quota of custom routes of the VPC. Therefore, estimate the number of required nodes before creating a VPC.
                2. By default, the VPC network model supports direct communication between containers and hosts in the same VPC. If a peering connection policy is configured between the VPC and another VPC, the containers can directly communicate with hosts on the peer VPC. In addition, in hybrid networking scenarios such as Direct Connect and VPN, communication between containers and hosts on the peer end can also be achieved with proper planning.
                +
                1. The scale of a cluster that uses the VPC network model is limited by the custom routes of the VPC. Therefore, you need to estimate the number of required nodes before creating a cluster.
                2. The scale of a cluster that uses the Cloud Native Network 2.0 model depends on the size of the VPC subnet CIDR block selected for the network attachment definition. Before creating a cluster, evaluate the scale of your cluster.
                3. By default, VPC routing network supports direct communication between containers and hosts in the same VPC. If a peering connection policy is configured between the VPC and another VPC, the containers can directly communicate with hosts on the peer VPC. In addition, in hybrid networking scenarios such as Direct Connect and VPN, communication between containers and hosts on the peer end can also be achieved with proper planning.
                diff --git a/docs/cce/umn/cce_bestpractice_00190.html b/docs/cce/umn/cce_bestpractice_00190.html new file mode 100644 index 00000000..68748bbb --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00190.html @@ -0,0 +1,88 @@ + + +

                Adding a Second Data Disk to a Node in a CCE Cluster

                +

                You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).

                +
                • When creating a node in a cluster of v1.13.10 or later, if a data disk is not managed by LVM, follow instructions in this section to format the data disk before adding the disk. Otherwise, the data disk will still be managed by LVM.
                • When creating a node in a cluster earlier than v1.13.10, you must format the data disks that are not managed by LVM. Otherwise, either these data disks or the first data disk will be managed by LVM.
                +
                +

                Before using this feature, write a script that can format data disks and save it to your OBS bucket. This script must be executed by user root.

                +

                Input Parameters

                +
                1. Set the script name to formatdisk.sh, save the script to your OBS bucket, and obtain the address of the script in OBS.
                2. You need to specify the size of the Docker data disk (the data disk managed by LVM is called the Docker data disk). The size of the Docker disk must be different from that of the second disk. For example, the Docker data disk is 100 GB and the new disk is 110 GB.
                3. Set the mount path of the second data disk, for example, /data/code.
                +

                Run the following command in the pre-installation script to format the disk:

                +
                cd /tmp;curl -k -X GET OBS bucket address /formatdisk.sh -1 -O;fdisk -l;sleep 30;bash -x formatdisk.sh 100 /data/code;fdisk -l
                +

                Example script (formatdisk.sh):

                +
                dockerdisksize=$1
                +mountdir=$2
                +systemdisksize=40
                +i=0
                +while [ 20 -gt $i ]; do 
                +    echo $i; 
                +    if [ $(lsblk -o KNAME,TYPE | grep disk | grep -v nvme | awk '{print $1}' | awk '{ print "/dev/"$1}' |wc -l) -ge 3 ]; then 
                +        break 
                +    else 
                +        sleep 5 
                +    fi; 
                +    i=$[i+1] 
                +done 
                +all_devices=$(lsblk -o KNAME,TYPE | grep disk | grep -v nvme | awk '{print $1}' | awk '{ print "/dev/"$1}')
                +for device in ${all_devices[@]}; do
                +    isRawDisk=$(lsblk -n $device 2>/dev/null | grep disk | wc -l)
                +    if [[ ${isRawDisk} > 0 ]]; then
                +        # is it partitioned ?
                +        match=$(lsblk -n $device 2>/dev/null | grep -v disk | wc -l)
                +        if [[ ${match} > 0 ]]; then
                +            # already partited
                +            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Raw disk ${device} has been partition, will skip this device"
                +            continue
                +        fi
                +    else
                +        isPart=$(lsblk -n $device 2>/dev/null | grep part | wc -l)
                +        if [[ ${isPart} -ne 1 ]]; then
                +            # not parted
                +            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Disk ${device} has not been partition, will skip this device"
                +            continue
                +        fi
                +        # is used ?
                +        match=$(lsblk -n $device 2>/dev/null | grep -v part | wc -l)
                +        if [[ ${match} > 0 ]]; then
                +            # already used
                +            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Disk ${device} has been used, will skip this device"
                +            continue
                +        fi
                +        isMount=$(lsblk -n -o MOUNTPOINT $device 2>/dev/null)
                +        if [[ -n ${isMount} ]]; then
                +            # already used
                +            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Disk ${device} has been used, will skip this device"
                +            continue
                +        fi
                +        isLvm=$(sfdisk -lqL 2>>/dev/null | grep $device | grep "8e.*Linux LVM")
                +        if [[ ! -n ${isLvm} ]]; then
                +            # part system type is not Linux LVM
                +            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Disk ${device} system type is not Linux LVM, will skip this device"
                +            continue
                +        fi
                +    fi
                +    block_devices_size=$(lsblk -n -o SIZE $device 2>/dev/null | awk '{ print $1}')
                +    if [[ ${block_devices_size}"x" != "${dockerdisksize}Gx" ]] && [[ ${block_devices_size}"x" != "${systemdisksize}Gx" ]]; then
                +echo "n
                +p
                +1
                +
                +
                +w
                +" | fdisk $device
                +        mkfs -t ext4 ${device}1
                +        mkdir -p $mountdir
                +	uuid=$(blkid ${device}1 |awk '{print $2}')
                +	echo "${uuid}  $mountdir ext4  noatime  0 0" | tee -a /etc/fstab >/dev/null
                +        mount $mountdir
                +    fi
                +done
                +

                If the preceding example cannot be executed, use the dos2unix tool to convert the format.

                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_00198.html b/docs/cce/umn/cce_bestpractice_00198.html new file mode 100644 index 00000000..b4384058 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00198.html @@ -0,0 +1,48 @@ + + +

                Expanding Node Disk Capacity

                +

                System Disk

                1. Expand the capacity of the system disk on the EVS console.
                2. Restart the node on the ECS console.
                3. Log in to the CCE console and click the cluster. In the navigation pane, choose Nodes. Click More > Sync Server Data at the row containing the target node.
                +
                +

                Node Data Disk (Dedicated for Docker)

                1. Expand the capacity of the data disk on the EVS console.
                2. Log in to the CCE console and click the cluster. In the navigation pane, choose Nodes. Click More > Sync Server Data at the row containing the target node.
                3. Log in to the target node.
                4. Run the lsblk command to check the block device information of the node.

                  A data disk is divided depending on the container storage Rootfs:

                  +
                  • Overlayfs: No independent thin pool is allocated. Image data is stored in the dockersys disk.
                    # lsblk
                    +NAME                MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
                    +sda                   8:0    0   50G  0 disk 
                    +└─sda1                8:1    0   50G  0 part /
                    +sdb                   8:16   0  200G  0 disk 
                    +├─vgpaas-dockersys  253:0    0   90G  0 lvm  /var/lib/docker               # Space used by Docker.
                    +└─vgpaas-kubernetes 253:1    0   10G  0 lvm  /mnt/paas/kubernetes/kubelet  # Space used by Kubernetes.
                    +

                    Run the following commands on the node to add the new disk capacity to the dockersys disk:

                    +
                    pvresize /dev/sdb 
                    +lvextend -l+100%FREE -n vgpaas/dockersys
                    +resize2fs /dev/vgpaas/dockersys
                    +
                  • Devicemapper: A thin pool is allocated to store image data.
                    # lsblk
                    +NAME                                MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
                    +sda                                   8:0    0   50G  0 disk 
                    +└─sda1                                8:1    0   50G  0 part /
                    +sdb                                   8:16   0  200G  0 disk 
                    +├─vgpaas-dockersys                  253:0    0   18G  0 lvm  /var/lib/docker    
                    +├─vgpaas-thinpool_tmeta             253:1    0    3G  0 lvm                   
                    +│ └─vgpaas-thinpool                 253:3    0   67G  0 lvm                   # Thin pool space.
                    +│   ...
                    +├─vgpaas-thinpool_tdata             253:2    0   67G  0 lvm  
                    +│ └─vgpaas-thinpool                 253:3    0   67G  0 lvm  
                    +│   ...
                    +└─vgpaas-kubernetes                 253:4    0   10G  0 lvm  /mnt/paas/kubernetes/kubelet
                    +

                    Run the following commands on the node to add the new disk capacity to the thinpool disk:

                    +
                    pvresize /dev/sdb 
                    +lvextend -l+100%FREE -n vgpaas/thinpool
                    +
                  +

                +
                +

                Node Data Disk (Kubernetes)

                1. Expand the capacity of the data disk on the EVS console.
                2. Log in to the CCE console and click the cluster. In the navigation pane, choose Nodes. Click More > Sync Server Data at the row containing the target node.
                3. Log in to the target node.
                4. Run the following commands on the node to add the new disk capacity to the Kubernetes disk:

                  pvresize /dev/sdb
                  +lvextend -l+100%FREE -n vgpaas/kubernetes
                  +resize2fs /dev/vgpaas/kubernetes
                  +

                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_00199.html b/docs/cce/umn/cce_bestpractice_00199.html new file mode 100644 index 00000000..2f6e241f --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00199.html @@ -0,0 +1,123 @@ + + +

                Mounting an Object Storage Bucket of a Third-Party Tenant

                +

                This section describes how to mount OBS buckets and OBS parallel file systems (preferred) of third-party tenants.

                +

                Scenario

                The CCE cluster of a SaaS service provider needs to be mounted with the OBS bucket of a third-party tenant, as shown in Figure 1.

                +
                Figure 1 Mounting an OBS bucket of a third-party tenant
                +
                1. The third-party tenant authorizes the SaaS service provider to access the OBS buckets or parallel file systems by setting the bucket policy and bucket ACL.
                2. The SaaS service provider statically imports the OBS buckets and parallel file systems of the third-party tenant.
                3. The SaaS service provider processes the service and writes the processing result (result file or result data) back to the OBS bucket of the third-party tenant.
                +
                +

                Precautions

                • Only parallel file systems and OBS buckets of third-party tenants in the same region can be mounted.
                • Only clusters where the everest add-on of v1.1.11 or later has been installed (the cluster version must be v1.15 or later) can be mounted with OBS buckets of third-party tenants.
                • The service platform of the SaaS service provider needs to manage the lifecycle of the third-party bucket PVs. When a PVC is deleted separately, the PV is not deleted. Instead, it will be retained. To do so, you need to call the native Kubernetes APIs to create and delete static PVs.
                +
                +

                Authorizing the SaaS Service Provider to Access the OBS Buckets

                The following uses an OBS bucket as an example to describe how to set a bucket policy and bucket ACL to authorize the SaaS service provider. The configuration for an OBS parallel file system is the same.

                +
                1. Log in to the OBS console. In the navigation pane, choose Buckets.
                2. In the bucket list, click a bucket name to access the Overview page.
                1. In the navigation pane, choose Permissions > Bucket Policy. On the displayed page, click Create to create a bucket policy.

                  Set the parameters as shown in the following figure.
                  Figure 2 Creating a bucket policy
                  +
                  • Policy Mode: Select Customized.
                  • Effect: Select Allow.
                  • Principal: Select Other account, and enter the account ID and user ID. The bucket policy takes effect for the specified users.
                  • Resources: Select the resources that can be operated.
                  • Actions: Select the actions that can be operated.
                  +
                  +

                2. In the navigation pane, choose Permissions > Bucket ACLs. In the right pane, click Add.Enter the account ID or account name of the authorized user, select Read and Write for Access to Bucket, select Read and Write for Access to ACL, and click OK.
                +
                +

                Statically Importing OBS Buckets and Parallel File Systems

                • Static PV of an OBS bucket:
                  apiVersion: v1
                  +kind: PersistentVolume
                  +metadata:
                  +  name: objbucket      #Replace the name with the actual PV name of the bucket.
                  +  annotations:
                  +    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
                  +spec:
                  +  accessModes:
                  +  - ReadWriteMany
                  +  capacity:
                  +    storage: 1Gi
                  +  mountOptions:  
                  +  - default_acl=bucket-owner-full-control      #New OBS mounting parameters
                  +  csi:
                  +    driver: obs.csi.everest.io
                  +    fsType: obsfs
                  +    volumeAttributes:
                  +      everest.io/obs-volume-type: STANDARD
                  +      everest.io/region:   eu-de     #Set it to the ID of the current region.
                  +      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
                  +    volumeHandle: objbucket             #Replace the name with the actual bucket name of the third-party tenant.
                  +  persistentVolumeReclaimPolicy: Retain    #This parameter must be set to Retain to ensure that the bucket will not be deleted when a PV is deleted.
                  +  storageClassName: csi-obs-mountoption    #You can associate a new custom OBS storage class or the built-in csi-obs of the cluster.
                  +
                  • mountOptions: This field contains the new OBS mounting parameters that allow the bucket owner to have full access to the data in the bucket. This field solves the problem that the bucket owner cannot read the data written into a mounted third-party bucket. If the object storage of a third-party tenant is mounted, default_acl must be set to bucket-owner-full-control. For details about other values of default_acl, see Bucket ACLs and Object ACLs.
                  • persistentVolumeReclaimPolicy: When the object storage of a third-party tenant is mounted, this field must be set to Retain. In this way, the OBS bucket will not be deleted when a PV is deleted. The service platform of the SaaS service provider needs to manage the lifecycle of the third-party bucket PVs. When a PVC is deleted separately, the PV is not deleted. Instead, it will be retained. To do so, you need to call the native Kubernetes APIs to create and delete static PVs.
                  • storageClassName: You can associate a new custom OBS storage class (click here) or the built-in csi-obs of the cluster.
                  +
                  PVC of a bound OBS bucket:
                  apiVersion: v1
                  +kind: PersistentVolumeClaim
                  +metadata:
                  +  annotations:
                  +    csi.storage.k8s.io/fstype: obsfs
                  +    everest.io/obs-volume-type: STANDARD
                  +    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
                  +  name: objbucketpvc      #Replace the name with the actual PVC name of the bucket.
                  +  namespace: default
                  +spec:
                  +  accessModes:
                  +  - ReadWriteMany
                  +  resources:
                  +    requests:
                  +      storage: 1Gi
                  +  storageClassName: csi-obs-mountoption     #The value must be the same as the storage class associated with the bound PV.
                  +  volumeName: objbucket       #Replace the name with the actual PV name of the bucket to be bound.
                  +
                  +
                • Static PV of an OBS parallel file system:
                  apiVersion: v1
                  +kind: PersistentVolume
                  +metadata:
                  +  name: obsfscheck   #Replace the name with the actual PV name of the parallel file system.
                  +  annotations:
                  +    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
                  +spec:
                  +  accessModes:
                  +  - ReadWriteMany
                  +  capacity:
                  +    storage: 1Gi
                  +  mountOptions:
                  +  - default_acl=bucket-owner-full-control     #New OBS mounting parameters
                  +  csi:
                  +    driver: obs.csi.everest.io
                  +    fsType: obsfs
                  +    volumeAttributes:
                  +      everest.io/obs-volume-type: STANDARD
                  +      everest.io/region:   eu-de
                  +      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
                  +    volumeHandle: obsfscheck               #Replace the name with the actual name of the parallel file system of the third-party tenant.
                  +  persistentVolumeReclaimPolicy: Retain        #This parameter must be set to Retain to ensure that the bucket will not be deleted when a PV is deleted.
                  +  storageClassName: csi-obs-mountoption       #You can associate a new custom OBS storage class or the built-in csi-obs of the cluster.
                  +
                  • mountOptions: This field contains the new OBS mounting parameters that allow the bucket owner to have full access to the data in the bucket. This field solves the problem that the bucket owner cannot read the data written into a mounted third-party bucket. If the object storage of a third-party tenant is mounted, default_acl must be set to bucket-owner-full-control. For details about other values of default_acl, see Bucket ACLs and Object ACLs.
                  • persistentVolumeReclaimPolicy: When the object storage of a third-party tenant is mounted, this field must be set to Retain. In this way, the OBS bucket will not be deleted when a PV is deleted. The service platform of the SaaS service provider needs to manage the lifecycle of the third-party bucket PVs. When a PVC is deleted separately, the PV is not deleted. Instead, it will be retained. To do so, you need to call the native Kubernetes APIs to create and delete static PVs.
                  • storageClassName: You can associate a new custom OBS storage class (click here) or the built-in csi-obs of the cluster.
                  +
                  PVC of a bound OBS parallel file system:
                  apiVersion: v1
                  +kind: PersistentVolumeClaim
                  +metadata:
                  +  annotations:
                  +    csi.storage.k8s.io/fstype: obsfs
                  +    everest.io/obs-volume-type: STANDARD
                  +    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
                  +  name: obsfscheckpvc   #Replace the name with the actual PVC name of the parallel file system.
                  +  namespace: default
                  +spec:
                  +  accessModes:
                  +  - ReadWriteMany
                  +  resources:
                  +    requests:
                  +      storage: 1Gi
                  +  storageClassName: csi-obs-mountoption    #The value must be the same as the storage class associated with the bound PV.
                  +  volumeName: obsfscheck     #Replace the name with the actual PV name of the parallel file system.
                  +
                  +
                • (Optional) Creating a custom OBS storage class to associate with a static PV:
                  apiVersion: storage.k8s.io/v1
                  +kind: StorageClass
                  +metadata:
                  +  name: csi-obs-mountoption
                  +mountOptions:
                  +  - default_acl=bucket-owner-full-control
                  +parameters:
                  +  csi.storage.k8s.io/csi-driver-name: obs.csi.everest.io
                  +  csi.storage.k8s.io/fstype: obsfs
                  +  everest.io/obs-volume-type: STANDARD
                  +provisioner: everest-csi-provisioner
                  +reclaimPolicy: Retain
                  +volumeBindingMode: Immediate
                  +
                  • csi.storage.k8s.io/fstype: File type. The value can be obsfs or s3fs. If the value is s3fs, an OBS bucket is created and mounted using s3fs. If the value is obsfs, an OBS parallel file system is created and mounted using obsfs.
                  • reclaimPolicy: Reclaim policy of a PV. The value will be set in PV.spec.persistentVolumeReclaimPolicy dynamically created based on the new PVC associated with the storage class. If the value is Delete, the external OBS bucket and the PV will be deleted when the PVC is deleted. If the value is Retain, the PV and external storage are retained when the PVC is deleted. In this case, you need to clear the PV separately. In the scenario where an imported third-party bucket is associated, the storage class is used only for associating static PVs (with this field set to Retain). Dynamic creation is not involved.
                  +
                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_00220.html b/docs/cce/umn/cce_bestpractice_00220.html new file mode 100644 index 00000000..921efc41 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00220.html @@ -0,0 +1,99 @@ + + +

                Implementing High Availability for Containers in CCE

                +

                Basic Principles

                To achieve high availability for your CCE containers, you can do as follows:

                +
                1. Deploy three master nodes for the cluster.
                2. When nodes are deployed across AZs, set custom scheduling policies based on site requirements to maximize resource utilization.
                3. Create multiple node pools in different AZs and use them for node scaling.
                4. Set the number of pods to be greater than 2 when creating a workload.
                5. Set pod affinity rules to distribute pods to different AZs and nodes.
                +
                +

                Procedure

                Assume that there are four nodes in a cluster distributed in the following AZs:

                +
                $ kubectl get node -L topology.kubernetes.io/zone,kubernetes.io/hostname
                +NAME            STATUS   ROLES    AGE   VERSION                      ZONE       HOSTNAME
                +192.168.5.112   Ready    <none>   42m   v1.21.7-r0-CCE21.11.1.B007   eu-de-01   192.168.5.112
                +192.168.5.179   Ready    <none>   42m   v1.21.7-r0-CCE21.11.1.B007   eu-de-01   192.168.5.179
                +192.168.5.252   Ready    <none>   37m   v1.21.7-r0-CCE21.11.1.B007   eu-de-02   192.168.5.252
                +192.168.5.8     Ready    <none>   33h   v1.21.7-r0-CCE21.11.1.B007   eu-de-03   192.168.5.8
                +

                Create workloads according to the following two podAntiAffinity rules:

                +
                • The first one is the pod anti-affinity in an AZ. Set the parameters as follows:
                  • weight: A larger weight value indicates a higher priority. In this example, set it to 50.
                  • topologyKey: a default or custom key for the node label that the system uses to denote a topology domain. A topology key determines the scope where the pod should be scheduled to. In this example, set this parameter to topology.kubernetes.io/zone, which is the label for identifying the AZ where the node is located.
                  • labelSelector: Select the label of the workload to realize the anti-affinity between this container and the workload.
                  +
                • The second one is the pod anti-affinity in the node host name. Set the parameters as follows:
                  • weight: Set it to 50.
                  • topologyKey: Set it to kubernetes.io/hostname.
                  • labelSelector: Select the label of the pod, which is anti-affinity with the pod.
                  +
                +
                kind: Deployment
                +apiVersion: apps/v1
                +metadata:
                +  name: nginx
                +  namespace: default
                +spec:
                +  replicas: 2
                +  selector:
                +    matchLabels:
                +      app: nginx
                +  template:
                +    metadata:
                +      labels:
                +        app: nginx
                +    spec:
                +      containers:
                +        - name: container-0
                +          image: nginx:alpine
                +          resources:
                +            limits:
                +              cpu: 250m
                +              memory: 512Mi
                +            requests:
                +              cpu: 250m
                +              memory: 512Mi
                +      affinity:
                +        podAntiAffinity:
                +          preferredDuringSchedulingIgnoredDuringExecution:
                +            - weight: 50
                +              podAffinityTerm:
                +                labelSelector:                       # Select the label of the workload to realize the anti-affinity between this container and the workload.
                +                  matchExpressions:
                +                    - key: app
                +                      operator: In
                +                      values:
                +                        - nginx
                +                namespaces:
                +                  - default
                +                topologyKey: topology.kubernetes.io/zone   # It takes effect in the same AZ.
                +            - weight: 50
                +              podAffinityTerm:
                +                labelSelector:                       # Select the label of the workload to realize the anti-affinity between this container and the workload.
                +                  matchExpressions:
                +                    - key: app
                +                      operator: In
                +                      values:
                +                        - nginx
                +                namespaces:
                +                  - default
                +                topologyKey: kubernetes.io/hostname     # It takes effect on the node.
                +      imagePullSecrets:
                +        - name: default-secret
                +

                Create a workload and view the node where the pod is located.

                +
                $ kubectl get pod -owide
                +NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE
                +nginx-6fffd8d664-dpwbk   1/1     Running   0          17s   10.0.0.132   192.168.5.112
                +nginx-6fffd8d664-qhclc   1/1     Running   0          17s   10.0.1.133   192.168.5.252
                +

                Increase the number of pods to 3. The pod is scheduled to another node, and the three nodes are in three different AZs.

                +
                $ kubectl scale --replicas=3 deploy/nginx
                +deployment.apps/nginx scaled
                +$ kubectl get pod -owide
                +NAME                     READY   STATUS    RESTARTS   AGE     IP           NODE
                +nginx-6fffd8d664-8t7rv   1/1     Running   0          3s      10.0.0.9     192.168.5.8
                +nginx-6fffd8d664-dpwbk   1/1     Running   0          2m45s   10.0.0.132   192.168.5.112
                +nginx-6fffd8d664-qhclc   1/1     Running   0          2m45s   10.0.1.133   192.168.5.252
                +

                Increase the number of pods to 4. The pod is scheduled to the last node. With podAntiAffinity rules, pods can be evenly distributed to AZs and nodes.

                +
                $ kubectl scale --replicas=4 deploy/nginx
                +deployment.apps/nginx scaled
                +$ kubectl get pod -owide
                +NAME                     READY   STATUS    RESTARTS   AGE     IP           NODE
                +nginx-6fffd8d664-8t7rv   1/1     Running   0          2m30s   10.0.0.9     192.168.5.8
                +nginx-6fffd8d664-dpwbk   1/1     Running   0          5m12s   10.0.0.132   192.168.5.112
                +nginx-6fffd8d664-h796b   1/1     Running   0          78s     10.0.1.5     192.168.5.179
                +nginx-6fffd8d664-qhclc   1/1     Running   0          5m12s   10.0.1.133   192.168.5.252
                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_00226.html b/docs/cce/umn/cce_bestpractice_00226.html new file mode 100644 index 00000000..4c20a58f --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00226.html @@ -0,0 +1,157 @@ + + +

                Using hostAliases to Configure /etc/hosts in a Pod

                +

                Scenario

                If DNS or other related settings are inappropriate, you can use hostAliases to overwrite the resolution of the host name at the pod level when adding entries to the /etc/hosts file of the pod.

                +
                +

                Procedure

                1. Use kubectl to connect to the cluster.
                2. Create the hostaliases-pod.yaml file.

                  vi hostaliases-pod.yaml

                  +

                  The field in bold in the YAML file indicates the image name and tag. You can replace the example value as required.

                  +
                  apiVersion: v1
                  +kind: Pod
                  +metadata:
                  +  name: hostaliases-pod
                  +spec:
                  +  hostAliases:
                  +  - ip: 127.0.0.1
                  +    hostnames:
                  +    - foo.local
                  +    - bar.local
                  +  - ip: 10.1.2.3
                  +    hostnames:
                  +    - foo.remote
                  +    - bar.remote
                  +  containers:
                  +    - name: cat-hosts
                  +      image: tomcat:9-jre11-slim
                  +      lifecycle:
                  +        postStart:
                  +          exec:
                  +            command:
                  +              - cat
                  +              - /etc/hosts
                  +  imagePullSecrets:
                  +    - name: default-secret
                  + +
                  + + + + + + + + + + + + + + + + + + + + + + + + + +
                  Table 1 pod field description

                  Parameter

                  +

                  Mandatory/Optional

                  +

                  Description

                  +

                  apiVersion

                  +

                  Mandatory

                  +

                  API version number

                  +

                  kind

                  +

                  Mandatory

                  +

                  Type of the object to be created

                  +

                  metadata

                  +

                  Mandatory

                  +

                  Metadata definition of a resource object

                  +

                  name

                  +

                  Mandatory

                  +

                  Name of a pod

                  +

                  spec

                  +

                  Mandatory

                  +

                  Detailed description of the pod. For details, see Table 2.

                  +
                  +
                  + +
                  + + + + + + + + + + + + + +
                  Table 2 spec field description

                  Parameter

                  +

                  Mandatory/Optional

                  +

                  Description

                  +

                  hostAliases

                  +

                  Mandatory

                  +

                  Host alias

                  +

                  containers

                  +

                  Mandatory

                  +

                  For details, see Table 3.

                  +
                  +
                  + +
                  + + + + + + + + + + + + + + + + + +
                  Table 3 containers field description

                  Parameter

                  +

                  Mandatory/Optional

                  +

                  Description

                  +

                  name

                  +

                  Mandatory

                  +

                  Container name

                  +

                  image

                  +

                  Mandatory

                  +

                  Container image name

                  +

                  lifecycle

                  +

                  Optional

                  +

                  Lifecycle

                  +
                  +
                  +

                3. Create a pod.

                  kubectl create -f hostaliases-pod.yaml

                  +

                  If information similar to the following is displayed, the pod is created.

                  +
                  pod/hostaliases-pod created
                  +

                4. Query the pod status.

                  kubectl get pod hostaliases-pod

                  +

                  If the pod is in the Running state, the pod is successfully created.

                  +
                  NAME                  READY          STATUS       RESTARTS      AGE
                  +hostaliases-pod       1/1            Running      0             16m
                  +

                5. Check whether the hostAliases functions properly.

                  docker ps |grep hostaliases-pod

                  +

                  docker exec -ti Container ID /bin/sh

                  +

                  +

                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_00227.html b/docs/cce/umn/cce_bestpractice_00227.html new file mode 100644 index 00000000..040ae3fc --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00227.html @@ -0,0 +1,73 @@ + + +

                Modifying Kernel Parameters Using a Privileged Container

                +

                Prerequisites

                To access a Kubernetes cluster from a client, you can use the Kubernetes command line tool kubectl.

                +
                +

                Procedure

                1. Create a DaemonSet in the background, select the Nginx image, enable the Privileged Container, configure the lifecycle, and add the hostNetwork field (value: true).

                  1. Create a DaemonSet file.

                    vi daemonSet.yaml

                    +

                    An example YAML file is provided as follows:

                    +

                    The spec.spec.containers.lifecycle field indicates the command that will be run after the container is started.

                    +
                    +
                    kind: DaemonSet
                    +apiVersion: apps/v1
                    +metadata:
                    +  name: daemonset-test
                    +  labels:
                    +    name: daemonset-test
                    +spec:
                    +  selector:
                    +    matchLabels:
                    +      name: daemonset-test
                    +  template:
                    +    metadata:
                    +      labels:
                    +        name: daemonset-test
                    +    spec:
                    +      hostNetwork: true
                    +      containers:
                    +      - name: daemonset-test
                    +        image: nginx:alpine-perl
                    +        command:
                    +        - "/bin/sh"
                    +        args:
                    +        - "-c"
                    +        - while :; do  time=$(date);done
                    +        imagePullPolicy: IfNotPresent
                    +        lifecycle:
                    +          postStart:
                    +            exec:
                    +              command:
                    +              - sysctl
                    +              - "-w"
                    +              - net.ipv4.tcp_tw_reuse=1
                    +        securityContext:
                    +          privileged: true
                    +      imagePullSecrets:
                    +      - name: default-secret
                    +
                  2. Create a DaemonSet.

                    kubectl create –f daemonSet.yaml

                    +
                  +

                2. Check whether the DaemonSet is successfully created.

                  kubectl get daemonset DaemonSet name

                  +

                  In this example, run the following command:

                  +

                  kubectl get daemonset daemonset-test

                  +

                  Information similar to the following is displayed:

                  +
                  NAME               DESIRED    CURRENT   READY    UP-T0-DATE    AVAILABLE     NODE SELECTOR   AGE
                  +daemonset-test     2          2         2        2             2             <node>          2h
                  +

                3. Query the container ID of DaemonSet on the node.

                  docker ps -a|grep DaemonSet name

                  +

                  In this example, run the following command:

                  +

                  docker ps -a|grep daemonset-test

                  +

                  Information similar to the following is displayed:

                  +
                  897b99faa9ce        3e094d5696c1                           "/bin/sh  -c while..."     31 minutes ago     Up  30 minutes  ault_fa7cc313-4ac1-11e9-a716-fa163e0aalba_0
                  +

                4. Access the container.

                  docker exec -it containerid /bin/sh

                  +

                  In this example, run the following command:

                  +

                  docker exec -it 897b99faa9ce /bin/sh

                  +

                5. Check whether the configured command is executed after the container is started.

                  sysctl -a |grep net.ipv4.tcp_tw_reuse

                  +

                  If the following information is displayed, the system parameters are modified successfully:

                  +
                  net.ipv4.tcp_tw_reuse=1
                  +

                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_00228.html b/docs/cce/umn/cce_bestpractice_00228.html new file mode 100644 index 00000000..0cef5e72 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00228.html @@ -0,0 +1,61 @@ + + +

                Initializing a Container

                +

                Concepts

                Before containers running applications are started, one or some init containers are started first. If there are multiple init containers, they will be started in the defined sequence. The application containers are started only after all init containers run to completion and exit. Storage volumes in a pod are shared. Therefore, the data generated in the init containers can be used by the application containers.

                +

                Init containers can be used in multiple Kubernetes resources, such as Deployments, DaemonSets, and jobs. They perform initialization before application containers are started.

                +
                +

                Scenario

                Before deploying a service, you can use an init container to make preparations before the pod where the service is running is deployed. After the preparations are complete, the init container runs to completion and exit, and the container to be deployed will be started.

                +
                • Scenario 1: Wait for other modules to be ready. For example, an application contains two containerized services: web server and database. The web server service needs to access the database service. However, when the application is started, the database service may have not been started. Therefore, web server may fail to access database. To solve this problem, you can use an init container in the pod where web server is running to check whether database is ready. The init container runs to completion only when database is accessible. Then, web server is started and initiates a formal access request to database.
                • Scenario 2: Initialize the configuration. For example, the init container can check all existing member nodes in the cluster and prepare the cluster configuration information for the application container. After the application container is started, it can be added to the cluster using the configuration information.
                • Other scenarios: For example, register a pod with a central database and download application dependencies.
                +

                For details, see Init Containers.

                +
                +

                Procedure

                1. Edit the YAML file of the init container workload.

                  vi deployment.yaml

                  +

                  An example YAML file is provided as follows:

                  +
                  apiVersion: apps/v1
                  +kind: Deployment
                  +metadata:
                  +  name: mysql
                  +spec:
                  +  replicas: 1
                  +  selector:
                  +    matchLabels:
                  +      name: mysql
                  +  template:
                  +    metadata:
                  +      labels:
                  +        name: mysql
                  +    spec:
                  +      initContainers:
                  +      - name: getresource
                  +        image: busybox
                  +        command: ['sleep 20']
                  +      containers:
                  +      - name: mysql
                  +        image: percona:5.7.22
                  +        imagePullPolicy: Always
                  +        ports:
                  +        - containerPort: 3306
                  +        resources:
                  +          limits:
                  +            memory: "500Mi"
                  +            cpu: "500m"
                  +          requests:
                  +            memory: "500Mi"
                  +            cpu: "250m"
                  +        env:
                  +        - name: MYSQL_ROOT_PASSWORD
                  +          value: "mysql"
                  +

                2. Create an init container workload.

                  kubectl create -f deployment.yaml

                  +

                  Information similar to the following is displayed:

                  +
                  deployment.apps/mysql created
                  +

                3. Query the created Docker container on the node where the workload is running.

                  docker ps -a|grep mysql

                  +

                  The init container will exit after it runs to completion. The query result Exited (0) shows the exit status of the init container.

                  +

                  +

                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_00231.html b/docs/cce/umn/cce_bestpractice_00231.html new file mode 100644 index 00000000..2eb46737 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00231.html @@ -0,0 +1,147 @@ + + +

                Implementing Sticky Session Through Load Balancing

                +

                Concepts

                Session persistence is one of the most common while complex problems in load balancing.

                +

                Session persistence is also called sticky sessions. After the sticky session function is enabled, requests from the same client are distributed to the same backend ECS by the load balancer for better continuity.

                +

                In load balancing and sticky session, connection and session are two key concepts. When only load balancing is concerned, session and connection refer to the same thing.

                +

                Simply put, if a user needs to log in, it can be regarded as a session; otherwise, a connection.

                +

                The sticky session mechanism fundamentally conflicts with the basic functions of load balancing. A load balancer forwards requests from clients to multiple backend servers to avoid overload on a single server. However, sticky session requires that some requests be forwarded to the same server for processing. Therefore, you need to select a proper sticky session mechanism based on the application environment.

                +
                +

                Layer-4 Load Balancing (Service)

                In layer-4 load balancing, source IP address-based sticky session (Hash routing based on the client IP address) can be enabled. To enable source IP address-based sticky session on Services, the following conditions must be met:

                +
                1. Service Affinity of the Service is set to Node level (that is, the value of the externalTrafficPolicy field of the Service is Local).
                2. Enable the source IP address-based sticky session in the load balancing configuration of the Service.
                  apiVersion: v1
                  +kind: Service
                  +metadata:
                  +  name: svc-example
                  +  namespace: default
                  +  annotations:
                  +    kubernetes.io/elb.class: union
                  +    kubernetes.io/elb.id: 56dcc1b4-8810-480c-940a-a44f7736f0dc
                  +    kubernetes.io/elb.lb-algorithm: ROUND_ROBIN
                  +    kubernetes.io/elb.session-affinity-mode: SOURCE_IP
                  +spec:
                  +  selector: 
                  +    app: nginx
                  +  externalTrafficPolicy: Local
                  +  ports:
                  +    - name: cce-service-0
                  +      targetPort: 80
                  +      nodePort: 32633
                  +      port: 80
                  +      protocol: TCP
                  +  type: LoadBalancer
                  +
                3. Anti-affinity is enabled for the backend application corresponding to the Service.
                +
                +

                Layer-7 Load Balancing (Ingress)

                In layer-7 load balancing, sticky session based on HTTP cookies and app cookies can be enabled. To enable such sticky session, the following conditions must be met:

                +
                1. The application (workload) corresponding to the ingress is enabled with workload anti-affinity.
                2. Node affinity is enabled for the Service corresponding to the ingress.
                +

                Procedure

                +
                1. Create a Nginx workload.

                  Set the number of pods to 3 and set the podAntiAffinity.
                  kind: Deployment
                  +apiVersion: apps/v1
                  +metadata:
                  +  name: nginx
                  +  namespace: default
                  +spec:
                  +  replicas: 3
                  +  selector:
                  +    matchLabels:
                  +      app: nginx
                  +  template:
                  +    metadata:
                  +      labels:
                  +        app: nginx
                  +    spec:
                  +      containers:
                  +        - name: container-0
                  +          image: 'nginx:perl'
                  +          resources:
                  +            limits:
                  +              cpu: 250m
                  +              memory: 512Mi
                  +            requests:
                  +              cpu: 250m
                  +              memory: 512Mi
                  +      imagePullSecrets:
                  +        - name: default-secret
                  +      affinity:
                  +        podAntiAffinity:                   # Pod anti-affinity.
                  +          requiredDuringSchedulingIgnoredDuringExecution:
                  +            - labelSelector:
                  +                matchExpressions:
                  +                  - key: app
                  +                    operator: In
                  +                    values:
                  +                      - nginx
                  +              topologyKey: kubernetes.io/hostname
                  +
                  +

                2. Creating a NodePort Service

                  Configure the sticky session in a Service. An ingress can connect to multiple Services, and each Service can have different sticky sessions.
                  apiVersion: v1
                  +kind: Service
                  +metadata:
                  +  name: nginx
                  +  namespace: default
                  +  annotations:
                  +    kubernetes.io/elb.lb-algorithm: ROUND_ROBIN
                  +    kubernetes.io/elb.session-affinity-mode: HTTP_COOKIE      # HTTP cookie type.
                  +    kubernetes.io/elb.session-affinity-option: '{"persistence_timeout":"1440"}'   # Session stickiness duration, in minutes. The value ranges from 1 to 1440.
                  +spec:
                  +  selector:
                  +    app: nginx
                  +  ports:
                  +    - name: cce-service-0
                  +      protocol: TCP
                  +      port: 80
                  +      targetPort: 80
                  +      nodePort: 32633            # Node port number.
                  +  type: NodePort
                  +  externalTrafficPolicy: Local   # Node-level forwarding.
                  +
                  +

                  You can also select APP_COOKIE.

                  +
                  apiVersion: v1
                  +kind: Service
                  +metadata:
                  +  name: nginx
                  +  namespace: default
                  +  annotations:
                  +    kubernetes.io/elb.lb-algorithm: ROUND_ROBIN
                  +    kubernetes.io/elb.session-affinity-mode: APP_COOKIE     # Select APP_COOKIE.
                  +    kubernetes.io/elb.session-affinity-option: '{"app_cookie_name":"test"}'  # Application cookie name.
                  +...
                  +

                3. Create an ingress and associate it with a Service. The following example describes how to automatically create a shared load balancer. For details about how to specify other types of load balancers, see Using kubectl to Create an ELB Ingress.

                  apiVersion: networking.k8s.io/v1
                  +kind: Ingress 
                  +metadata: 
                  +  name: ingress-test
                  +  namespace: default
                  +  annotations: 
                  +    kubernetes.io/elb.class: union
                  +    kubernetes.io/elb.port: '80'
                  +    kubernetes.io/elb.autocreate: 
                  +      '{
                  +          "type":"public",
                  +          "bandwidth_name":"cce-bandwidth-test",
                  +          "bandwidth_chargemode":"traffic",
                  +          "bandwidth_size":1,
                  +          "bandwidth_sharetype":"PER",
                  +          "eip_type":"5_bgp"
                  +        }'
                  +spec:
                  +  rules: 
                  +  - host: 'www.example.com'
                  +    http: 
                  +      paths: 
                  +      - path: '/'
                  +        backend: 
                  +          service:
                  +            name: nginx     #Service name
                  +            port: 
                  +              number: 80
                  +        property:
                  +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
                  +        pathType: ImplementationSpecific
                  +  ingressClassName: cce
                  +

                4. Log in to the ELB console, access the load balancer details page, and check whether the sticky session feature is enabled.
                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_00237.html b/docs/cce/umn/cce_bestpractice_00237.html new file mode 100644 index 00000000..a755d3ea --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00237.html @@ -0,0 +1,15 @@ + + +

                Migration

                +
                + + diff --git a/docs/cce/umn/cce_bestpractice_00253_0.html b/docs/cce/umn/cce_bestpractice_00253_0.html new file mode 100644 index 00000000..77ab416c --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00253_0.html @@ -0,0 +1,171 @@ + + +

                Dynamically Creating and Mounting Subdirectories of an SFS Turbo File System

                +

                Background

                The minimum capacity of an SFS Turbo file system is 500 GB, and the SFS Turbo file system cannot be billed by usage. By default, the root directory of an SFS Turbo file system is mounted to a container which, in most case, does not require such a large capacity.

                +

                The everest add-on allows you to dynamically create subdirectories in an SFS Turbo file system and mount these subdirectories to containers. In this way, an SFS Turbo file system can be shared by multiple containers to increase storage efficiency.

                +
                +

                Notes and Constraints

                • Only clusters of v1.15 and later are supported.
                • The cluster must use the everest add-on of version 1.1.13 or later.
                • Kata containers are not supported.
                • A maximum of 10 PVCs can be created concurrently at a time by using the subdirectory function.
                +
                +

                Creating an SFS Turbo Volume of the subpath Type

                The CCE console has not yet supported the operations related to this feature, such as expanding, disassociating, and deleting subPath volumes.

                +
                +
                1. Import an SFS Turbo file system that is located in the same VPC and subnet as the cluster.
                2. Create a StorageClass YAML file, for example, sfsturbo-sc-test.yaml.

                  Configuration example:

                  +
                  apiVersion: storage.k8s.io/v1
                  +allowVolumeExpansion: true
                  +kind: StorageClass
                  +metadata:
                  +  name: sfsturbo-sc-test
                  +mountOptions:
                  +- lock
                  +parameters:
                  +  csi.storage.k8s.io/csi-driver-name: sfsturbo.csi.everest.io
                  +  csi.storage.k8s.io/fstype: nfs
                  +  everest.io/archive-on-delete: "true"
                  +  everest.io/share-access-to: 7ca2dba2-1234-1234-1234-626371a8fb3a
                  +  everest.io/share-expand-type: bandwidth
                  +  everest.io/share-export-location: 192.168.1.1:/sfsturbo/
                  +  everest.io/share-source: sfs-turbo
                  +  everest.io/share-volume-type: STANDARD
                  +  everest.io/volume-as: subpath
                  +  everest.io/volume-id: 0d773f2e-1234-1234-1234-de6a35074696
                  +provisioner: everest-csi-provisioner
                  +reclaimPolicy: Delete
                  +volumeBindingMode: Immediate
                  +

                  In this example:

                  +
                  • name: name of the StorageClass.
                  • mountOptions: mount options. This field is optional.
                    • In versions later than everest 1.1.13 and earlier than everest 1.2.8, only the nolock parameter can be configured. By default, the nolock parameter is used for the mount operation and does not need to be configured. If nolock is set to false, the lock field is used.
                    • Starting from everest 1.2.8, more parameters are supported. The default parameter configurations are shown below. For details, see Setting Mount Options. Do not set nolock to true. Otherwise, the mount operation fails.
                      mountOptions:
                      +- vers=3
                      +- timeo=600
                      +- nolock
                      +- hard
                      +
                    +
                  • everest.io/volume-as: Set this parameter to subpath.
                  • everest.io/share-access-to: This parameter is optional. In subpath mode, set this parameter to the ID of the VPC where the SFS Turbo file system is located.
                  • everest.io/share-expand-type: This parameter is optional. If the type of the SFS Turbo file system is SFS Turbo Standard – Enhanced or SFS Turbo Performance – Enhanced, set this parameter to bandwidth.
                  • everest.io/share-export-location: root directory to be mounted. It consists of the SFS Turbo shared path and sub-directory. The shared path can be queried on the SFS Turbo console. The sub-directory is user-defined. The PVCs created by the StorageClass are located in the sub-directory.
                  • everest.io/share-volume-type: This parameter is optional. It specifies the SFS Turbo file system type. The value can be STANDARD or PERFORMANCE. For enhanced types, this parameter must be used together with everest.io/share-expand-type (whose value should be bandwidth).
                  • everest.io/zone: This parameter is optional. Set it to the AZ where the SFS Turbo file system is located.
                  • everest.io/volume-id: ID of the SFS Turbo volume. You can query the volume ID on the SFS Turbo page.
                  • everest.io/archive-on-delete: If this parameter is set to true and the recycling policy is set to Delete, the original PV file will be archived when the PVC is deleted. The archive directory is named in the format of archived-$PV name.timestamp. If this parameter is set to false, the SFS Turbo sub-directory corresponding to the PV will be deleted. The default value is true.
                  +

                1. Run the kubectl create -f sfsturbo-sc-test.yaml command to create a StorageClass.
                2. Create a PVC YAML file named sfs-turbo-test.yaml.

                  Configuration example:

                  +
                  apiVersion: v1
                  +kind: PersistentVolumeClaim
                  +metadata:
                  +  name: sfs-turbo-test
                  +  namespace: default
                  +spec:
                  +  accessModes:
                  +  - ReadWriteMany
                  +  resources:
                  +    requests:
                  +      storage: 50Gi
                  +  storageClassName: sfsturbo-sc-test
                  +  volumeMode: Filesystem
                  +

                  In this example:

                  +
                  • name: name of the PVC.
                  • storageClassName: name of the StorageClass created in the previous step.
                  • storage: In the subpath mode, this parameter is invalid. The storage capacity is limited by the total capacity of the SFS Turbo file system. If the total capacity of the SFS Turbo file system is insufficient, expand the capacity on the SFS Turbo page in a timely manner.
                  +

                1. Run the kubectl create -f sfs-turbo-test.yaml command to create a PVC.
                +

                It is meaningless to conduct capacity expansion on an SFS Turbo volume created in the subpath mode. This operation does not expand the capacity of the SFS Turbo file system. You need to ensure that the total capacity of the SFS Turbo file system is not used up.

                +
                +
                +

                Creating a Deployment and Mounting an Existing Volume

                1. Create a Deployment YAML file named deployment-test.yaml.

                  Configuration example:
                  apiVersion: apps/v1
                  +kind: Deployment
                  +metadata:
                  +  name: test-turbo-subpath-example
                  +  namespace: default
                  +  generation: 1
                  +  labels:
                  +    appgroup: ''
                  +spec: 
                  +  replicas: 1 
                  +  selector: 
                  +    matchLabels: 
                  +      app: test-turbo-subpath-example 
                  +  template: 
                  +    metadata: 
                  +      labels: 
                  +        app: test-turbo-subpath-example 
                  +    spec: 
                  +      containers: 
                  +      - image: nginx:latest 
                  +        name: container-0 
                  +        volumeMounts: 
                  +        - mountPath: /tmp
                  +          name: pvc-sfs-turbo-example 
                  +      restartPolicy: Always 
                  +      imagePullSecrets:
                  +      - name: default-secret
                  +      volumes: 
                  +      - name: pvc-sfs-turbo-example 
                  +        persistentVolumeClaim: 
                  +          claimName: sfs-turbo-test
                  +
                  +

                  In this example:

                  +
                  • name: name of the Deployment.
                  • image: image used by the Deployment.
                  • mountPath: mount path of the container. In this example, the volume is mounted to the /tmp directory.
                  • claimName: name of an existing PVC.
                  +

                1. Run the kubectl create -f deployment-test.yaml command to create a Deployment.
                +
                +

                Creating a StatefulSet That Uses a Volume Dynamically Created in subpath Mode

                1. Create a StatefulSet YAML file named statefulset-test.yaml.

                  Configuration example:

                  +
                  apiVersion: apps/v1
                  +kind: StatefulSet
                  +metadata:
                  +  name: test-turbo-subpath
                  +  namespace: default
                  +  generation: 1
                  +  labels:
                  +    appgroup: ''
                  +spec:
                  +  replicas: 2
                  +  selector:
                  +    matchLabels:
                  +      app: test-turbo-subpath
                  +  template:
                  +    metadata:
                  +      labels:
                  +        app: test-turbo-subpath
                  +      annotations:
                  +        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
                  +        pod.alpha.kubernetes.io/initialized: 'true'
                  +    spec:
                  +      containers:
                  +        - name: container-0
                  +          image: 'nginx:latest'
                  +          env:
                  +            - name: PAAS_APP_NAME
                  +              value: deploy-sfs-nfs-rw-in
                  +            - name: PAAS_NAMESPACE
                  +              value: default
                  +            - name: PAAS_PROJECT_ID
                  +              value: 8190a2a1692c46f284585c56fc0e2fb9
                  +          resources: {}
                  +          volumeMounts:
                  +            - name: sfs-turbo-160024548582479676
                  +              mountPath: /tmp
                  +          terminationMessagePath: /dev/termination-log
                  +          terminationMessagePolicy: File
                  +          imagePullPolicy: IfNotPresent
                  +      restartPolicy: Always
                  +      terminationGracePeriodSeconds: 30
                  +      dnsPolicy: ClusterFirst
                  +      securityContext: {}
                  +      imagePullSecrets:
                  +        - name: default-secret
                  +      affinity: {}
                  +      schedulerName: default-scheduler
                  +  volumeClaimTemplates:
                  +    - metadata:
                  +        name: sfs-turbo-160024548582479676
                  +        namespace: default
                  +        annotations: {}
                  +      spec:
                  +        accessModes:
                  +          - ReadWriteOnce
                  +        resources:
                  +          requests:
                  +            storage: 10Gi
                  +        storageClassName: sfsturbo-sc-test
                  +  serviceName: wwww
                  +  podManagementPolicy: OrderedReady
                  +  updateStrategy:
                  +    type: RollingUpdate
                  +  revisionHistoryLimit: 10
                  +

                  In this example:

                  +
                  • name: name of the StatefulSet.
                  • image: image used by the StatefulSet.
                  • mountPath: mount path of the container. In this example, the volume is mounted to the /tmp directory.
                  • spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.
                  • storageClassName: name of the created StorageClass.
                  +

                1. Run the kubectl create -f statefulset-test.yaml command to create a StatefulSet.
                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_00254.html b/docs/cce/umn/cce_bestpractice_00254.html new file mode 100644 index 00000000..7c10cf2a --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00254.html @@ -0,0 +1,248 @@ + + +

                Connecting to Multiple Clusters Using kubectl

                +

                Painpoint

                When you have multiple CCE clusters, you may find it difficult to efficiently connect to all of them.

                +
                +

                Solution

                This section describes how to configure access to multiple clusters by modifying kubeconfig.json. The file describes multiple clusters, users, and contexts. To access different clusters, run the kubectl config use-context command to switch between contexts.

                +
                Figure 1 Using kubectl to connect to multiple clusters
                +
                +

                Prerequisites

                kubectl can access multiple clusters.

                +
                +

                Introduction to kubeconfig.json

                kubeconfig.json is the configuration file of kubectl. You can download it on the cluster details page.

                +

                +

                +

                The content of kubeconfig.json is as follows:

                +
                {
                +    "kind": "Config",
                +    "apiVersion": "v1",
                +    "preferences": {},
                +    "clusters": [{
                +        "name": "internalCluster",
                +        "cluster": {
                +            "server": "https://192.168.0.85:5443",
                +            "certificate-authority-data": "LS0tLS1CRUULIE..."
                +        }
                +    }, {
                +        "name": "externalCluster",
                +        "cluster": {
                +            "server": "https://xxx.xxx.xxx.xxx:5443",
                +            "insecure-skip-tls-verify": true
                +        }
                +    }],
                +    "users": [{
                +        "name": "user",
                +        "user": {
                +            "client-certificate-data": "LS0tLS1CRUdJTiBDRVJ...",
                +            "client-key-data": "LS0tLS1CRUdJTiBS..."
                +        }
                +    }],
                +    "contexts": [{
                +        "name": "internal",
                +        "context": {
                +            "cluster": "internalCluster",
                +            "user": "user"
                +        }
                +    }, {
                +        "name": "external",
                +        "context": {
                +            "cluster": "externalCluster",
                +            "user": "user"
                +        }
                +    }],
                +    "current-context": "external"
                +}
                +

                It mainly consists of three sections.

                +
                • clusters: describes the cluster information, mainly the access address of the cluster.
                • users: describes information about the users who access the cluster. It includes the client-certificate-data and client-key-data certificate files.
                • contexts: describes the configuration contexts. You switch between contexts to access different clusters. A context is associated with user and cluster, that is, it defines which user accesses which cluster.
                +

                The preceding kubeconfig.json defines the private network address and public network address of the cluster as two clusters with two different contexts. You can switch the context to use different addresses to access the cluster.

                +
                +

                Configuring Access to Multiple Clusters

                The following steps walk you through the procedure of configuring access to two clusters by modifying kubeconfig.json.

                +

                This example configures only the public network access to the clusters. If you want to access multiple clusters over private networks, retain the clusters field and ensure that the clusters can be accessed over private networks. Its configuration is similar to that described in this example.

                +
                1. Download kubeconfig.json of the two clusters and delete the lines related to private network access, as shown in the following figure.

                  • Cluster A:
                    {
                    +    "kind": "Config",
                    +    "apiVersion": "v1",
                    +    "preferences": {},
                    +    "clusters": [ {
                    +        "name": "externalCluster",
                    +        "cluster": {
                    +            "server": "https://119.xxx.xxx.xxx:5443",
                    +            "insecure-skip-tls-verify": true
                    +        }
                    +    }],
                    +    "users": [{
                    +        "name": "user",
                    +        "user": {
                    +            "client-certificate-data": "LS0tLS1CRUdJTxM...",
                    +            "client-key-data": "LS0tLS1CRUdJTiB...."
                    +        }
                    +    }],
                    +    "contexts": [{
                    +        "name": "external",
                    +        "context": {
                    +            "cluster": "externalCluster",
                    +            "user": "user"
                    +        }
                    +    }],
                    +    "current-context": "external"
                    +}
                    +
                  • Cluster B:
                    {
                    +    "kind": "Config",
                    +    "apiVersion": "v1",
                    +    "preferences": {},
                    +    "clusters": [ {
                    +        "name": "externalCluster",
                    +        "cluster": {
                    +            "server": "https://124.xxx.xxx.xxx:5443",
                    +            "insecure-skip-tls-verify": true
                    +        }
                    +    }],
                    +    "users": [{
                    +        "name": "user",
                    +        "user": {
                    +            "client-certificate-data": "LS0tLS1CRUdJTxM...",
                    +            "client-key-data": "LS0rTUideUdJTiB...."
                    +        }
                    +    }],
                    +    "contexts": [{
                    +        "name": "external",
                    +        "context": {
                    +            "cluster": "externalCluster",
                    +            "user": "user"
                    +        }
                    +    }],
                    +    "current-context": "external"
                    +}
                    +

                    The preceding files have the same structure except that the client-certificate-data and client-key-data fields of user and the clusters.cluster.server field are different.

                    +
                  +

                2. Modify the name field as follows:

                  • Cluster A:
                    {
                    +    "kind": "Config",
                    +    "apiVersion": "v1",
                    +    "preferences": {},
                    +    "clusters": [ {
                    +        "name": "Cluster-A",
                    +        "cluster": {
                    +            "server": "https://119.xxx.xxx.xxx:5443",
                    +            "insecure-skip-tls-verify": true
                    +        }
                    +    }],
                    +    "users": [{
                    +        "name": "Cluster-A-user",
                    +        "user": {
                    +            "client-certificate-data": "LS0tLS1CRUdJTxM...",
                    +            "client-key-data": "LS0tLS1CRUdJTiB...."
                    +        }
                    +    }],
                    +    "contexts": [{
                    +        "name": "Cluster-A-Context",
                    +        "context": {
                    +            "cluster": "Cluster-A",
                    +            "user": "Cluster-A-user"
                    +        }
                    +    }],
                    +    "current-context": "Cluster-A-Context"
                    +}
                    +
                  • Cluster B:
                    {
                    +    "kind": "Config",
                    +    "apiVersion": "v1",
                    +    "preferences": {},
                    +    "clusters": [ {
                    +        "name": "Cluster-B",
                    +        "cluster": {
                    +            "server": "https://124.xxx.xxx.xxx:5443",
                    +            "insecure-skip-tls-verify": true
                    +        }
                    +    }],
                    +    "users": [{
                    +        "name": "Cluster-B-user",
                    +        "user": {
                    +            "client-certificate-data": "LS0tLS1CRUdJTxM...",
                    +            "client-key-data": "LS0rTUideUdJTiB...."
                    +        }
                    +    }],
                    +    "contexts": [{
                    +        "name": "Cluster-B-Context",
                    +        "context": {
                    +            "cluster": "Cluster-B",
                    +            "user": "Cluster-B-user"
                    +        }
                    +    }],
                    +    "current-context": "Cluster-B-Context"
                    +}
                    +
                  +

                3. Combine these two files.

                  The file structure remains unchanged. Combine the contents of clusters, users, and contexts as follows:

                  +
                  {
                  +    "kind": "Config",
                  +    "apiVersion": "v1",
                  +    "preferences": {},
                  +    "clusters": [ {
                  +        "name": "Cluster-A",
                  +        "cluster": {
                  +            "server": "https://119.xxx.xxx.xxx:5443",
                  +            "insecure-skip-tls-verify": true
                  +        }
                  +    },
                  +     {
                  +        "name": "Cluster-B",
                  +        "cluster": {
                  +            "server": "https://124.xxx.xxx.xxx:5443",
                  +            "insecure-skip-tls-verify": true
                  +        }
                  +    }],
                  +    "users": [{
                  +        "name": "Cluster-A-user",
                  +        "user": {
                  +            "client-certificate-data": "LS0tLS1CRUdJTxM...",
                  +            "client-key-data": "LS0tLS1CRUdJTiB...."
                  +        }
                  +    },
                  +    {
                  +        "name": "Cluster-B-user",
                  +        "user": {
                  +            "client-certificate-data": "LS0tLS1CRUdJTxM...",
                  +            "client-key-data": "LS0rTUideUdJTiB...."
                  +        }
                  +    }],
                  +    "contexts": [{
                  +        "name": "Cluster-A-Context",
                  +        "context": {
                  +            "cluster": "Cluster-A",
                  +            "user": "Cluster-A-user"
                  +        }
                  +    },
                  +    {
                  +        "name": "Cluster-B-Context",
                  +        "context": {
                  +            "cluster": "Cluster-B",
                  +            "user": "Cluster-B-user"
                  +        }
                  +    }],
                  +    "current-context": "Cluster-A-Context"
                  +}
                  +

                +
                +

                Verification

                Run the following commands to copy the file to the kubectl configuration path:

                +

                mkdir -p $HOME/.kube

                +

                mv -f kubeconfig.json $HOME/.kube/config

                +

                Run the kubectl commands to check whether the two clusters can be connected.

                +
                # kubectl config use-context Cluster-A-Context
                +Switched to context "Cluster-A-Context".
                +# kubectl cluster-info
                +Kubernetes control plane is running at https://119.xxx.xxx.xxx:5443
                +CoreDNS is running at https://119.xxx.xxx.xxx:5443/api/v1/namespaces/kube-system/services/coredns:dns/proxy
                +
                +To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
                +
                +# kubectl config use-context Cluster-B-Context
                +Switched to context "Cluster-B-Context".
                +# kubectl cluster-info
                +Kubernetes control plane is running at https://124.xxx.xxx.xxx:5443
                +CoreDNS is running at https://124.xxx.xxx.xxx:5443/api/v1/namespaces/kube-system/services/coredns:dns/proxy
                +
                +To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_00281_0.html b/docs/cce/umn/cce_bestpractice_00281_0.html new file mode 100644 index 00000000..7bf0c936 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00281_0.html @@ -0,0 +1,230 @@ + + +

                Custom Storage Classes

                +

                Challenges

                When using storage resources in CCE, the most common method is to specify storageClassName to define the type of storage resources to be created when creating a PVC. The following configuration shows how to use a PVC to apply for an SAS (high I/O) EVS disk (block storage).

                +
                apiVersion: v1
                +kind: PersistentVolumeClaim
                +metadata:
                +  name: pvc-evs-example
                +  namespace: default
                +  annotations:
                +    everest.io/disk-volume-type: SAS
                +spec:
                +  accessModes:
                +  - ReadWriteOnce
                +  resources:
                +    requests:
                +      storage: 10Gi
                +  storageClassName: csi-disk
                +

                If you need to specify the EVS disk type, you can set the everest.io/disk-volume-type field. The value SAS is used as an example here, indicating the high I/O EVS disk type. Or you can choose SATA (common I/O) and SSD (ultra-high I/O).

                +

                This configuration method may not work if you want to:

                +
                • Set storageClassName only, which is simpler than specifying the EVS disk type by using everest.io/disk-volume-type.
                • Avoid modifying YAML files or Helm charts. Some users switch from self-built or other Kubernetes services to CCE and have written YAML files of many applications. In these YAML files, different types of storage resources are specified by different StorageClassNames. When using CCE, they need to modify a large number of YAML files or Helm charts to use storage resources, which is labor-consuming and error-prone.
                • Set the default storageClassName for all applications to use the default storage class. In this way, you can create storage resources of the default type without needing to specify storageClassName in the YAML file.
                +
                +

                Solution

                This section describes how to set a custom storage class in CCE and how to set the default storage class. You can specify different types of storage resources by setting storageClassName.

                +
                • For the first scenario, you can define custom storageClassNames for SAS and SSD EVS disks. For example, define a storage class named csi-disk-sas for creating SAS disks. The following figure shows the differences before and after you use a custom storage class.

                  +
                • For the second scenario, you can define a storage class with the same name as that in the existing YAML file without needing to modify storageClassName in the YAML file.
                • For the third scenario, you can set the default storage class as described below to create storage resources without specifying storageClassName in YAML files.
                  apiVersion: v1
                  +kind: PersistentVolumeClaim
                  +metadata:
                  +  name: pvc-evs-example
                  +  namespace: default
                  +spec:
                  +  accessModes:
                  +  - ReadWriteOnce
                  +  resources:
                  +    requests:
                  +      storage: 10Gi
                  +
                +
                +

                Storage Classes in CCE

                Run the following command to query the supported storage classes.

                +
                # kubectl get sc
                +NAME                PROVISIONER                     AGE
                +csi-disk            everest-csi-provisioner         17d          # Storage class for EVS disks
                +csi-disk-topology   everest-csi-provisioner         17d          # Storage class for EVS disks with delayed association
                +csi-nas             everest-csi-provisioner         17d          # Storage class for SFS file systems
                +csi-obs             everest-csi-provisioner         17d          # Storage Class for OBS buckets
                +csi-sfsturbo        everest-csi-provisioner         17d          # Storage class for SFS Turbo file systems
                +

                Check the details of csi-disk. You can see that the type of the disk created by csi-disk is SAS by default.

                +
                # kubectl get sc csi-disk -oyaml
                +allowVolumeExpansion: true
                +apiVersion: storage.k8s.io/v1
                +kind: StorageClass
                +metadata:
                +  creationTimestamp: "2021-03-17T02:10:32Z"
                +  name: csi-disk
                +  resourceVersion: "760"
                +  selfLink: /apis/storage.k8s.io/v1/storageclasses/csi-disk
                +  uid: 4db97b6c-853b-443d-b0dc-41cdcb8140f2
                +parameters:
                +  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
                +  csi.storage.k8s.io/fstype: ext4
                +  everest.io/disk-volume-type: SAS
                +  everest.io/passthrough: "true"
                +provisioner: everest-csi-provisioner
                +reclaimPolicy: Delete
                +volumeBindingMode: Immediate
                +
                +

                Custom Storage Classes

                You can customize a high I/O storage class in a YAML file. For example, the name csi-disk-sas indicates that the disk type is SAS (high I/O).

                +
                apiVersion: storage.k8s.io/v1
                +kind: StorageClass
                +metadata:
                +  name: csi-disk-sas                          # Name of the high I/O storage class, which can be customized.
                +parameters:
                +  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
                +  csi.storage.k8s.io/fstype: ext4
                +  everest.io/disk-volume-type: SAS            # High I/O EVS disk type, which cannot be customized.
                +  everest.io/passthrough: "true"
                +provisioner: everest-csi-provisioner
                +reclaimPolicy: Delete
                +volumeBindingMode: Immediate
                +allowVolumeExpansion: true                    # true indicates that capacity expansion is allowed.
                +

                For an ultra-high I/O storage class, you can set the class name to csi-disk-ssd to create SSD EVS disk (ultra-high I/O).

                +
                apiVersion: storage.k8s.io/v1
                +kind: StorageClass
                +metadata:
                +  name: csi-disk-ssd                       # Name of the ultra-high I/O storage class, which can be customized.
                +parameters:
                +  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
                +  csi.storage.k8s.io/fstype: ext4
                +  everest.io/disk-volume-type: SSD         # Ultra-high I/O EVS disk type, which cannot be customized.
                +  everest.io/passthrough: "true"
                +provisioner: everest-csi-provisioner
                +reclaimPolicy: Delete
                +volumeBindingMode: Immediate
                +allowVolumeExpansion: true
                +

                reclaimPolicy: indicates the recycling policies of the underlying cloud storage. The value can be Delete or Retain.

                +
                • Delete: When a PVC is deleted, both the PV and the EVS disk are deleted.
                • Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.
                +

                The reclamation policy set here has no impact on the SFS Turbo storage. Therefore, the yearly/monthly SFS Turbo resources will not be reclaimed when the cluster or PVC is deleted.

                +
                +

                If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

                +

                After the definition is complete, run the kubectl create commands to create storage resources.

                +
                # kubectl create -f sas.yaml
                +storageclass.storage.k8s.io/csi-disk-sas created
                +# kubectl create -f ssd.yaml
                +storageclass.storage.k8s.io/csi-disk-ssd created
                +

                Query the storage class again. Two more types of storage classes are displayed in the command output, as shown below.

                +
                # kubectl get sc
                +NAME                PROVISIONER                     AGE
                +csi-disk            everest-csi-provisioner         17d
                +csi-disk-sas        everest-csi-provisioner         2m28s
                +csi-disk-ssd        everest-csi-provisioner         16s
                +csi-disk-topology   everest-csi-provisioner         17d
                +csi-nas             everest-csi-provisioner         17d
                +csi-obs             everest-csi-provisioner         17d
                +csi-sfsturbo        everest-csi-provisioner         17d
                +

                Other types of storage resources can be defined in the similar way. You can use kubectl to obtain the YAML file and modify it as required.

                +
                • File storage
                  # kubectl get sc csi-nas -oyaml
                  +kind: StorageClass
                  +apiVersion: storage.k8s.io/v1
                  +metadata:
                  +  name: csi-nas
                  +provisioner: everest-csi-provisioner
                  +parameters:
                  +  csi.storage.k8s.io/csi-driver-name: nas.csi.everest.io
                  +  csi.storage.k8s.io/fstype: nfs
                  +  everest.io/share-access-level: rw
                  +  everest.io/share-access-to: 5e3864c6-e78d-4d00-b6fd-de09d432c632   # ID of the VPC to which the cluster belongs
                  +  everest.io/share-is-public: 'false'
                  +  everest.io/zone: xxxxx          # AZ
                  +reclaimPolicy: Delete
                  +allowVolumeExpansion: true
                  +volumeBindingMode: Immediate
                  +
                • Object storage
                  # kubectl get sc csi-obs -oyaml
                  +kind: StorageClass
                  +apiVersion: storage.k8s.io/v1
                  +metadata:
                  +  name: csi-obs
                  +provisioner: everest-csi-provisioner
                  +parameters:
                  +  csi.storage.k8s.io/csi-driver-name: obs.csi.everest.io
                  +  csi.storage.k8s.io/fstype: s3fs           # Object storage type. s3fs indicates an object bucket, and obsfs indicates a parallel file system.
                  +  everest.io/obs-volume-type: STANDARD      # Storage class of the OBS bucket
                  +reclaimPolicy: Delete
                  +volumeBindingMode: Immediate
                  +
                +
                +

                Setting a Default Storage Class

                You can specify a storage class as the default class. In this way, if you do not specify storageClassName when creating a PVC, the PVC is created using the default storage class.

                +

                For example, to specify csi-disk-ssd as the default storage class, edit your YAML file as follows:

                +
                apiVersion: storage.k8s.io/v1
                +kind: StorageClass
                +metadata:
                +  name: csi-disk-ssd
                +  annotations:
                +    storageclass.kubernetes.io/is-default-class: "true"   # Specifies the default storage class in a cluster. A cluster can have only one default storage class.
                +parameters:
                +  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
                +  csi.storage.k8s.io/fstype: ext4
                +  everest.io/disk-volume-type: SSD
                +  everest.io/passthrough: "true"
                +provisioner: everest-csi-provisioner
                +reclaimPolicy: Delete
                +volumeBindingMode: Immediate
                +allowVolumeExpansion: true
                +

                Delete the created csi-disk-ssd disk, run the kubectl create command to create a csi-disk-ssd disk again, and then query the storage class. The following information is displayed.

                +
                # kubectl delete sc csi-disk-ssd
                +storageclass.storage.k8s.io "csi-disk-ssd" deleted
                +# kubectl create -f ssd.yaml
                +storageclass.storage.k8s.io/csi-disk-ssd created
                +# kubectl get sc
                +NAME                     PROVISIONER                     AGE
                +csi-disk                 everest-csi-provisioner         17d
                +csi-disk-sas             everest-csi-provisioner         114m
                +csi-disk-ssd (default)   everest-csi-provisioner         9s
                +csi-disk-topology        everest-csi-provisioner         17d
                +csi-nas                  everest-csi-provisioner         17d
                +csi-obs                  everest-csi-provisioner         17d
                +csi-sfsturbo             everest-csi-provisioner         17d
                +
                +

                Verification

                • Use csi-disk-sas to create a PVC.
                  apiVersion: v1
                  +kind: PersistentVolumeClaim
                  +metadata:
                  +  name:  sas-disk
                  +spec:
                  +  accessModes:
                  +  - ReadWriteOnce
                  +  resources:
                  +    requests:
                  +      storage: 10Gi
                  +  storageClassName: csi-disk-sas
                  +

                  Create a storage class and view its details. As shown below, the object can be created and the value of STORAGECLASS is csi-disk-sas.

                  +
                  # kubectl create -f sas-disk.yaml 
                  +persistentvolumeclaim/sas-disk created
                  +# kubectl get pvc
                  +NAME       STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
                  +sas-disk   Bound    pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            csi-disk-sas   24s
                  +# kubectl get pv
                  +NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                     STORAGECLASS   REASON   AGE
                  +pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            Delete           Bound       default/sas-disk          csi-disk-sas            30s
                  +

                  View the PVC details on the CCE console. On the PV details page, you can see that the disk type is high I/O.

                  +

                  +
                • If storageClassName is not specified, the default configuration is used, as shown below.
                  apiVersion: v1
                  +kind: PersistentVolumeClaim
                  +metadata:
                  +  name:  ssd-disk
                  +spec:
                  +  accessModes:
                  +  - ReadWriteOnce
                  +  resources:
                  +    requests:
                  +      storage: 10Gi
                  +

                  Create and view the storage resource. You can see that the storage class of PVC ssd-disk is csi-disk-ssd, indicating that csi-disk-ssd is used by default.

                  +
                  # kubectl create -f ssd-disk.yaml 
                  +persistentvolumeclaim/ssd-disk created
                  +# kubectl get pvc
                  +NAME       STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
                  +sas-disk   Bound    pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            csi-disk-sas   16m
                  +ssd-disk   Bound    pvc-4d2b059c-0d6c-44af-9994-f74d01c78731   10Gi       RWO            csi-disk-ssd   10s
                  +# kubectl get pv
                  +NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                     STORAGECLASS   REASON   AGE
                  +pvc-4d2b059c-0d6c-44af-9994-f74d01c78731   10Gi       RWO            Delete           Bound       default/ssd-disk          csi-disk-ssd            15s
                  +pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            Delete           Bound       default/sas-disk          csi-disk-sas            17m
                  +

                  View the PVC details on the CCE console. On the PV details page, you can see that the disk type is ultra-high I/O.

                  +

                  +
                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_00282.html b/docs/cce/umn/cce_bestpractice_00282.html new file mode 100644 index 00000000..77c9e5fc --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00282.html @@ -0,0 +1,227 @@ + + +

                Using HPA and CA for Auto Scaling of Workloads and Nodes

                +

                Scenario

                The best way to handle surging traffic is to automatically adjust the number of machines based on the traffic volume or resource usage, which is called scaling.

                +

                In CCE, the resources that can be used by containers are fixed during application deployment. Therefore, in auto scaling, pods are scaled first. The node resource usage increases only after the number of pods increases. Then, nodes can be scaled based on the node resource usage. How to configure auto scaling in CCE?

                +
                +

                Solution

                Two major auto scaling policies are HPA (Horizontal Pod Autoscaling) and CA (Cluster AutoScaling). HPA is for workload auto scaling and CA is for node auto scaling.

                +

                HPA and CA work with each other. HPA requires sufficient cluster resources for successful scaling. When the cluster resources are insufficient, CA is needed to add nodes. If HPA reduces workloads, the cluster will have a large number of idle resources. In this case, CA needs to release nodes to avoid resource waste.

                +
                As shown in Figure 1, HPA performs scale-out based on the monitoring metrics. When cluster resources are insufficient, newly created pods are in Pending state. CA then checks these pending pods and selects the most appropriate node pool based on the configured scaling policy to scale out the node pool.
                Figure 1 HPA and CA working flows
                +
                +

                Using HPA and CA can easily implement auto scaling in most scenarios. In addition, the scaling process of nodes and pods can be easily observed.

                +

                This section uses an example to describe the auto scaling process using HPA and CA policies together.

                +
                +

                Preparations

                1. Create a cluster with one node. The node should have 2 cores of CPU and 4 GB of memory, or a higher specification, as well as an EIP to allow external access. If no EIP is bound to the node during node creation, you can manually bind one on the ECS console after creating the node.

                  +

                2. Install add-ons for the cluster.

                  • autoscaler: node scaling add-on
                  • metrics-server: an aggregator of resource usage data in a Kubernetes cluster. It can collect measurement data of major Kubernetes resources, such as pods, nodes, containers, and Services.
                  +

                3. Log in to the cluster node and run a computing-intensive application. When a user sends a request, the result needs to be calculated before being returned to the user.

                  1. Create a PHP file named index.php to calculate the square root of the request for 1,000,000 times before returning OK!.
                    vi index.php
                    +
                    Example file content:
                    <?php
                    +  $x = 0.0001;
                    +  for ($i = 0; $i <= 1000000; $i++) {
                    +    $x += sqrt($x);
                    +  }
                    +  echo "OK!";
                    +?>
                    +
                    +
                  2. Compile a Dockerfile to build an image.
                    vi Dockerfile
                    +
                    Example Dockerfile:
                    FROM php:5-apache
                    +COPY index.php /var/www/html/index.php
                    +RUN chmod a+rx index.php
                    +
                    +
                  3. Run the following command to build an image named hpa-example with the tag latest.
                    docker build -t hpa-example:latest .
                    +
                  4. (Optional) Log in to the SWR console, choose Organization Management in the navigation pane, and click Create Organization in the upper right corner to create an organization.

                    Skip this step if you already have an organization.

                    +
                  5. In the navigation pane, choose My Images and then click Upload Through Client. On the page displayed, click Generate a temporary login command and click to copy the command.
                  6. Run the login command copied in the previous step on the cluster node. If the login is successful, the message "Login Succeeded" is displayed.
                  7. Tag the hpa-example image.

                    docker tag [Image name 1:Tag 1] [Image repository address]/[Organization name]/[Image name 2:Tag 2]

                    +
                    • [Image name 1:Tag 1]: name and tag of the local image to be uploaded.
                    • [Image repository address]: The domain name at the end of the login command in 5 is the image repository address, which can be obtained on the SWR console.
                    • [Organization name]: name of the organization created in 4.
                    • [Image name 2:Tag 2]: desired image name and tag to be displayed on the SWR console.
                    +

                    Example:

                    +

                    docker tag hpa-example:latest swr.eu-de.otc.t-systems.com/group/hpa-example:latest

                    +
                  8. Push the image to the image repository.

                    docker push [Image repository address]/[Organization name]/[Image name 2:Tag 2]

                    +

                    Example:

                    +

                    docker push swr.eu-de.otc.t-systems.com/group/hpa-example:latest

                    +

                    The following information will be returned upon a successful push:

                    +
                    6d6b9812c8ae: Pushed 
                    +... 
                    +fe4c16cbf7a4: Pushed 
                    +latest: digest: sha256:eb7e3bbd*** size: **
                    +

                    To view the pushed image, go to the SWR console and refresh the My Images page.

                    +
                  +

                +
                +

                Creating a Node Pool and a Node Scaling Policy

                1. Log in to the CCE console, access the created cluster, click Nodes on the left, click the Node Pools tab, and click Create Node Pool in the upper right corner.
                2. Set node pool parameters, add a node with 2 vCPUs and 4 GB memory, and enable auto scaling.

                  • Nodes: Set it to 1, indicating that one node is created by default when a node pool is created.
                  • Auto Scaling: Enable the option, meaning that nodes will be automatically created or deleted in the node pool based on the cluster loads.
                  • Max. Nodes: Set it to 5, indicating the maximum number of nodes in a node pool.
                  • Specifications: 2 vCPUs | 4 GiB
                  +

                  Retain the defaults for other parameters. For details, see Creating a Node Pool.

                  +

                  +

                3. Click Add-ons on the left of the cluster console, click Edit under the autoscaler add-on, modify the add-on configuration, enable Auto node scale-in, and configure scale-in parameters. For example, trigger scale-in when the node resource utilization is less than 50%.

                  +

                  +

                  After the preceding configurations, scale-out is performed based on the pending status of the pod and scale-in is triggered when the node resource utilization decreases.

                  +

                4. Click Node Scaling on the left of the cluster console and click Create Node Scaling Policy in the upper right corner. Node scaling policies added here trigger scale-out based on the CPU/memory allocation rate or periodically.

                  As shown in the following figure, when the cluster CPU allocation rate is greater than 70%, one node will be added. A node scaling policy needs to be associated with a node pool. Multiple node pools can be associated. When you need to scale nodes, node with proper specifications will be added or reduced from the node pool based on the minimum waste principle. For details, see Creating a Node Scaling Policy.

                  +

                  +

                  +

                +
                +

                Creating a Workload

                Use the hpa-example image to create a Deployment with one replica. The image path is related to the organization uploaded to the SWR repository and needs to be replaced with the actual value.

                +
                kind: Deployment
                +apiVersion: apps/v1
                +metadata:
                +  name: hpa-example
                +spec:
                +  replicas: 1
                +  selector:
                +    matchLabels:
                +      app: hpa-example
                +  template:
                +    metadata:
                +      labels:
                +        app: hpa-example
                +    spec:
                +      containers:
                +      - name: container-1
                +        image: 'hpa-example:latest '  # Replace it with the address of the image you uploaded to SWR.
                +        resources:
                +          limits:                  # The value of limits must be the same as that of requests to prevent flapping during scaling.
                +            cpu: 500m
                +            memory: 200Mi
                +          requests:
                +            cpu: 500m
                +            memory: 200Mi
                +      imagePullSecrets:
                +      - name: default-secret
                +

                Then, create a NodePort Service for the workload so that the workload can be accessed from external networks.

                +
                kind: Service
                +apiVersion: v1
                +metadata:
                +  name: hpa-example
                +spec:
                +  ports:
                +    - name: cce-service-0
                +      protocol: TCP
                +      port: 80
                +      targetPort: 80
                +      nodePort: 31144
                +  selector:
                +    app: hpa-example
                +  type: NodePort
                +
                +

                Creating an HPA Policy

                Create an HPA policy. As shown below, the policy is associated with the hpa-example workload, and the target CPU usage is 50%.

                +

                There are two other annotations. One annotation defines the CPU thresholds, indicating that scaling is not performed when the CPU usage is between 30% and 70% to prevent impact caused by slight fluctuation. The other is the scaling time window, indicating that after the policy is successfully executed, a scaling operation will not be triggered again in this cooling interval to prevent impact caused by short-term fluctuation.

                +
                apiVersion: autoscaling/v2
                +kind: HorizontalPodAutoscaler
                +metadata:
                +  name: hpa-policy
                +  annotations:
                +    extendedhpa.metrics: '[{"type":"Resource","name":"cpu","targetType":"Utilization","targetRange":{"low":"30","high":"70"}}]'
                +    extendedhpa.option: '{"downscaleWindow":"5m","upscaleWindow":"3m"}'
                +spec:
                +  scaleTargetRef:
                +    kind: Deployment
                +    name: hpa-example
                +    apiVersion: apps/v1
                +  minReplicas: 1
                +  maxReplicas: 100
                +  metrics:
                +    - type: Resource
                +      resource:
                +        name: cpu
                +        targetAverageUtilization: 50
                +

                Set the parameters as follows if you are using the console.

                +

                +

                +
                +

                Observing the Auto Scaling Process

                1. Check the cluster node status. In the following example, there are two nodes.

                  # kubectl get node
                  +NAME            STATUS   ROLES    AGE     VERSION
                  +192.168.0.183   Ready    <none>   2m20s   v1.17.9-r0-CCE21.1.1.3.B001-17.36.8
                  +192.168.0.26    Ready    <none>   55m     v1.17.9-r0-CCE21.1.1.3.B001-17.36.8
                  +

                  Check the HPA policy. The CPU usage of the target workload is 0%.

                  +
                  # kubectl get hpa hpa-policy
                  +NAME         REFERENCE                TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
                  +hpa-policy   Deployment/hpa-example   0%/50%    1         100       1          4m
                  +

                2. Run the following command to access the workload. In the following command, {ip:port} indicates the access address of the workload, which can be queried on the workload details page.

                  while true;do wget -q -O- http://{ip:port}; done

                  +

                  If no EIP is displayed, the cluster node has not been assigned any EIP. You need to create one, bind it to the node, and synchronize node data. .

                  +
                  +

                  Observe the scaling process of the workload.

                  +
                  # kubectl get hpa hpa-policy --watch
                  +NAME         REFERENCE                TARGETS    MINPODS   MAXPODS   REPLICAS   AGE
                  +hpa-policy   Deployment/hpa-example   0%/50%     1         100       1          4m
                  +hpa-policy   Deployment/hpa-example   190%/50%   1         100       1          4m23s
                  +hpa-policy   Deployment/hpa-example   190%/50%   1         100       4          4m31s
                  +hpa-policy   Deployment/hpa-example   200%/50%   1         100       4          5m16s
                  +hpa-policy   Deployment/hpa-example   200%/50%   1         100       4          6m16s
                  +hpa-policy   Deployment/hpa-example   85%/50%    1         100       4          7m16s
                  +hpa-policy   Deployment/hpa-example   81%/50%    1         100       4          8m16s
                  +hpa-policy   Deployment/hpa-example   81%/50%    1         100       7          8m31s
                  +hpa-policy   Deployment/hpa-example   57%/50%    1         100       7          9m16s
                  +hpa-policy   Deployment/hpa-example   51%/50%    1         100       7          10m
                  +hpa-policy   Deployment/hpa-example   58%/50%    1         100       7          11m
                  +

                  You can see that the CPU usage of the workload is 190% at 4m23s, which exceeds the target value. In this case, scaling is triggered to expand the workload to four replicas/pods. In the subsequent several minutes, the CPU usage does not decrease until 7m16s. This is because the new pods may not be successfully created. The possible cause is that resources are insufficient and the pods are in Pending state. During this period, nodes are added.

                  +

                  At 7m16s, the CPU usage decreases, indicating that the pods are successfully created and start to bear traffic. The CPU usage decreases to 81% at 8m, still greater than the target value (50%) and the high threshold (70%). Therefore, 7 pods are added at 9m16s, and the CPU usage decreases to 51%, which is within the range of 30% to 70%. From then on, the number of pods remains 7.

                  +

                  In the following output, you can see the workload scaling process and the time when the HPA policy takes effect.

                  +
                  # kubectl describe deploy hpa-example
                  +...
                  +Events:
                  +  Type    Reason             Age    From                   Message
                  +  ----    ------             ----   ----                   -------
                  +  Normal  ScalingReplicaSet  25m    deployment-controller  Scaled up replica set hpa-example-79dd795485 to 1
                  +  Normal  ScalingReplicaSet  20m    deployment-controller  Scaled up replica set hpa-example-79dd795485 to 4
                  +  Normal  ScalingReplicaSet  16m    deployment-controller  Scaled up replica set hpa-example-79dd795485 to 7
                  +# kubectl describe hpa hpa-policy
                  +...
                  +Events:
                  +  Type    Reason             Age    From                       Message
                  +  ----    ------             ----   ----                       -------
                  +  Normal  SuccessfulRescale  20m    horizontal-pod-autoscaler  New size: 4; reason: cpu resource utilization (percentage of request) above target
                  +  Normal  SuccessfulRescale  16m    horizontal-pod-autoscaler  New size: 7; reason: cpu resource utilization (percentage of request) above target
                  +

                  Check the number of nodes. The following output shows that two nodes are added.

                  +
                  # kubectl get node
                  +NAME            STATUS   ROLES    AGE     VERSION
                  +192.168.0.120   Ready    <none>   3m5s    v1.17.9-r0-CCE21.1.1.3.B001-17.36.8
                  +192.168.0.136   Ready    <none>   6m58s   v1.17.9-r0-CCE21.1.1.3.B001-17.36.8
                  +192.168.0.183   Ready    <none>   18m     v1.17.9-r0-CCE21.1.1.3.B001-17.36.8
                  +192.168.0.26    Ready    <none>   71m     v1.17.9-r0-CCE21.1.1.3.B001-17.36.8
                  +

                  You can also view the scaling history on the console. For example, the CA policy is executed once when the CPU allocation rate in the cluster is greater than 70%, and the number of nodes in the node pool is increased from 2 to 3. The new node is automatically added by autoscaler based on the pending state of pods in the initial phase of HPA.

                  +

                  The node scaling process is as follows:

                  +
                  1. After the number of pods changes to 4, the pods are in Pending state due to insufficient resources. As a result, the default scale-out policy of the autoscaler add-on is triggered, and the number of nodes is increased by one.
                  2. The second node scale-out is triggered because the CPU allocation rate in the cluster is greater than 70%. As a result, the number of nodes is increased by one, which is recorded in the scaling history on the console. Scaling based on the allocation rate ensures that the cluster has sufficient resources.
                  +

                3. Stop accessing the workload and check the number of pods.

                  # kubectl get hpa hpa-policy --watch
                  +NAME         REFERENCE                TARGETS    MINPODS   MAXPODS   REPLICAS   AGE
                  +hpa-policy   Deployment/hpa-example   50%/50%    1         100       7          12m
                  +hpa-policy   Deployment/hpa-example   21%/50%    1         100       7          13m
                  +hpa-policy   Deployment/hpa-example   0%/50%     1         100       7          14m
                  +hpa-policy   Deployment/hpa-example   0%/50%     1         100       7          18m
                  +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          18m
                  +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          19m
                  +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          19m
                  +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          19m
                  +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          19m
                  +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          23m
                  +hpa-policy   Deployment/hpa-example   0%/50%     1         100       3          23m
                  +hpa-policy   Deployment/hpa-example   0%/50%     1         100       1          23m
                  +

                  You can see that the CPU usage is 21% at 13m. The number of pods is reduced to 3 at 18m, and then reduced to 1 at 23m.

                  +

                  In the following output, you can see the workload scaling process and the time when the HPA policy takes effect.

                  +
                  # kubectl describe deploy hpa-example
                  +...
                  +Events:
                  +  Type    Reason             Age    From                   Message
                  +  ----    ------             ----   ----                   -------
                  +  Normal  ScalingReplicaSet  25m    deployment-controller  Scaled up replica set hpa-example-79dd795485 to 1
                  +  Normal  ScalingReplicaSet  20m    deployment-controller  Scaled up replica set hpa-example-79dd795485 to 4
                  +  Normal  ScalingReplicaSet  16m    deployment-controller  Scaled up replica set hpa-example-79dd795485 to 7
                  +  Normal  ScalingReplicaSet  6m28s  deployment-controller  Scaled down replica set hpa-example-79dd795485 to 3
                  +  Normal  ScalingReplicaSet  72s    deployment-controller  Scaled down replica set hpa-example-79dd795485 to 1
                  +# kubectl describe hpa hpa-policy
                  +...
                  +Events:
                  +  Type    Reason             Age    From                       Message
                  +  ----    ------             ----   ----                       -------
                  +  Normal  SuccessfulRescale  20m    horizontal-pod-autoscaler  New size: 4; reason: cpu resource utilization (percentage of request) above target
                  +  Normal  SuccessfulRescale  16m    horizontal-pod-autoscaler  New size: 7; reason: cpu resource utilization (percentage of request) above target
                  +  Normal  SuccessfulRescale  6m45s  horizontal-pod-autoscaler  New size: 3; reason: All metrics below target
                  +  Normal  SuccessfulRescale  90s    horizontal-pod-autoscaler  New size: 1; reason: All metrics below target
                  +

                  You can also view the HPA policy execution history on the console. Wait until the one node is reduced.

                  +

                  The reason why the other two nodes in the node pool are not reduced is that they both have pods in the kube-system namespace (and these pods are not created by DaemonSets). For details, see Node Scaling Mechanisms.

                  +

                +
                +

                Summary

                Using HPA and CA can easily implement auto scaling in most scenarios. In addition, the scaling process of nodes and pods can be easily observed.

                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_00284.html b/docs/cce/umn/cce_bestpractice_00284.html new file mode 100644 index 00000000..272c654d --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_00284.html @@ -0,0 +1,247 @@ + + +

                Realizing Automatic Topology for EVS Disks When Nodes Are Deployed Across AZs (csi-disk-topology)

                +

                Challenges

                EVS disks cannot be attached across AZs. For example, EVS disks in AZ 1 cannot be attached to nodes in AZ 2.

                +

                If the storage class csi-disk is used for StatefulSets, when a StatefulSet is scheduled, a PVC and a PV are created immediately (an EVS disk is created along with the PV), and then the PVC is bound to the PV.

                +

                However, when the cluster nodes are located in multiple AZs, the EVS disk created by the PVC and the node to which the pods are scheduled may be in different AZs. As a result, the pods fail to be scheduled.

                +

                +
                +

                Solution

                CCE provides a storage class named csi-disk-topology. When you use this storage class to create a PVC, no PV will be created in pace with the PVC. Instead, the PV is created in the AZ of the node where the pod will be scheduled. An EVS disk is then created in the same AZ to ensure that the EVS disk can be attached and the pod can be successfully scheduled.

                +

                csi-disk-topology postpones the binding between a PVC and a PV for a while.

                +

                +
                +

                Failed Pod Scheduling Due to csi-disk Used in Cross-AZ Node Deployment

                Create a cluster with three nodes in different AZs.

                +

                Use the csi-disk storage class to create a StatefulSet and check whether the workload is successfully created.

                +
                apiVersion: apps/v1
                +kind: StatefulSet
                +metadata:
                +  name: nginx
                +spec:
                +  serviceName: nginx                             # Name of the headless Service
                +  replicas: 4
                +  selector:
                +    matchLabels:
                +      app: nginx
                +  template:
                +    metadata:
                +      labels:
                +        app: nginx
                +    spec:
                +      containers:
                +        - name: container-0
                +          image: nginx:alpine
                +          resources:
                +            limits:
                +              cpu: 600m
                +              memory: 200Mi
                +            requests:
                +              cpu: 600m
                +              memory: 200Mi
                +          volumeMounts:                           # Storage mounted to the pod
                +          - name:  data
                +            mountPath: /usr/share/nginx/html      # Mount the storage to /usr/share/nginx/html.
                +      imagePullSecrets:
                +        - name: default-secret
                +  volumeClaimTemplates:
                +  - metadata:
                +      name: data
                +      annotations:
                +        everest.io/disk-volume-type: SAS
                +    spec:
                +      accessModes:
                +      - ReadWriteOnce
                +      resources:
                +        requests:
                +          storage: 1Gi
                +      storageClassName: csi-disk
                +

                The StatefulSet uses the following headless Service.

                +
                apiVersion: v1
                +kind: Service       # Object type (Service)
                +metadata:
                +  name: nginx
                +  labels:
                +    app: nginx
                +spec:
                +  ports:
                +    - name: nginx     # Name of the port for communication between pods
                +      port: 80        # Port number for communication between pods
                +  selector:
                +    app: nginx        # Select the pod whose label is app:nginx.
                +  clusterIP: None     # Set this parameter to None, indicating the headless Service.
                +

                After the creation, check the PVC and pod status. In the following output, the PVC has been created and bound successfully, and a pod is in the Pending state.

                +
                # kubectl get pvc -owide
                +NAME           STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE   VOLUMEMODE
                +data-nginx-0   Bound    pvc-04e25985-fc93-4254-92a1-1085ce19d31e   1Gi        RWO            csi-disk       64s   Filesystem
                +data-nginx-1   Bound    pvc-0ae6336b-a2ea-4ddc-8f63-cfc5f9efe189   1Gi        RWO            csi-disk       47s   Filesystem
                +data-nginx-2   Bound    pvc-aa46f452-cc5b-4dbd-825a-da68c858720d   1Gi        RWO            csi-disk       30s   Filesystem
                +data-nginx-3   Bound    pvc-3d60e532-ff31-42df-9e78-015cacb18a0b   1Gi        RWO            csi-disk       14s   Filesystem
                +
                +# kubectl get pod -owide
                +NAME      READY   STATUS    RESTARTS   AGE     IP             NODE            NOMINATED NODE   READINESS GATES
                +nginx-0   1/1     Running   0          2m25s   172.16.0.12    192.168.0.121   <none>           <none>
                +nginx-1   1/1     Running   0          2m8s    172.16.0.136   192.168.0.211   <none>           <none>
                +nginx-2   1/1     Running   0          111s    172.16.1.7     192.168.0.240   <none>           <none>
                +nginx-3   0/1     Pending   0          95s     <none>         <none>          <none>           <none>
                +

                The event information of the pod shows that the scheduling fails due to no available node. Two nodes (in AZ 1 and AZ 2) do not have sufficient CPUs, and the created EVS disk is not in the AZ where the third node (in AZ 3) is located. As a result, the pod cannot use the EVS disk.

                +
                # kubectl describe pod nginx-3
                +Name:           nginx-3
                +...
                +Events:
                +  Type     Reason            Age   From               Message
                +  ----     ------            ----  ----               -------
                +  Warning  FailedScheduling  111s  default-scheduler  0/3 nodes are available: 3 pod has unbound immediate PersistentVolumeClaims.
                +  Warning  FailedScheduling  111s  default-scheduler  0/3 nodes are available: 3 pod has unbound immediate PersistentVolumeClaims.
                +  Warning  FailedScheduling  28s   default-scheduler  0/3 nodes are available: 1 node(s) had volume node affinity conflict, 2 Insufficient cpu.
                +

                Check the AZ where the EVS disk created from the PVC is located. It is found that data-nginx-3 is in AZ 1. In this case, the node in AZ 1 has no resources, and only the node in AZ 3 has CPU resources. As a result, the scheduling fails. Therefore, there should be a delay between creating the PVC and binding the PV.

                +
                +

                Storage Class for Delayed Binding

                If you check the cluster storage class, you can see that the binding mode of csi-disk-topology is WaitForFirstConsumer, indicating that a PV is created and bound when a pod uses the PVC. That is, the PV and the underlying storage resources are created based on the pod information.

                +
                # kubectl get storageclass
                +NAME                PROVISIONER                     RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
                +csi-disk            everest-csi-provisioner         Delete          Immediate              true                   156m
                +csi-disk-topology   everest-csi-provisioner         Delete          WaitForFirstConsumer   true                   156m
                +csi-nas             everest-csi-provisioner         Delete          Immediate              true                   156m
                +csi-obs             everest-csi-provisioner         Delete          Immediate              false                  156m
                +

                VOLUMEBINDINGMODE is displayed if your cluster is v1.19. It is not displayed in clusters of v1.17 or v1.15.

                +

                You can also view the binding mode in the csi-disk-topology details.

                +
                # kubectl describe sc csi-disk-topology
                +Name:                  csi-disk-topology
                +IsDefaultClass:        No
                +Annotations:           <none>
                +Provisioner:           everest-csi-provisioner
                +Parameters:            csi.storage.k8s.io/csi-driver-name=disk.csi.everest.io,csi.storage.k8s.io/fstype=ext4,everest.io/disk-volume-type=SAS,everest.io/passthrough=true
                +AllowVolumeExpansion:  True
                +MountOptions:          <none>
                +ReclaimPolicy:         Delete
                +VolumeBindingMode:     WaitForFirstConsumer
                +Events:                <none>
                +

                Create PVCs of the csi-disk and csi-disk-topology classes. Observe the differences between these two types of PVCs.

                +
                • csi-disk
                  apiVersion: v1
                  +kind: PersistentVolumeClaim
                  +metadata:
                  +  name:  disk
                  +  annotations:
                  +    everest.io/disk-volume-type: SAS
                  +spec:
                  +  accessModes:
                  +  - ReadWriteOnce
                  +  resources:
                  +    requests:
                  +      storage: 10Gi
                  +  storageClassName: csi-disk        # StorageClass
                  +
                • csi-disk-topology
                  apiVersion: v1
                  +kind: PersistentVolumeClaim
                  +metadata:
                  +  name:  topology
                  +  annotations:
                  +    everest.io/disk-volume-type: SAS
                  +spec:
                  +  accessModes:
                  +  - ReadWriteOnce
                  +  resources:
                  +    requests:
                  +      storage: 10Gi
                  +  storageClassName: csi-disk-topology        # StorageClass
                  +
                +

                View the PVC details. As shown below, the csi-disk PVC is in Bound state and the csi-disk-topology PVC is in Pending state.

                +
                # kubectl create -f pvc1.yaml
                +persistentvolumeclaim/disk created
                +# kubectl create -f pvc2.yaml
                +persistentvolumeclaim/topology created
                +# kubectl get pvc
                +NAME           STATUS    VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS        AGE
                +disk           Bound     pvc-88d96508-d246-422e-91f0-8caf414001fc   10Gi       RWO            csi-disk            18s
                +topology       Pending                                                                        csi-disk-topology   2s
                +

                View details about the csi-disk-topology PVC. You can see that "waiting for first consumer to be created before binding" is displayed in the event, indicating that the PVC is bound after the consumer (pod) is created.

                +
                # kubectl describe pvc topology
                +Name:          topology
                +Namespace:     default
                +StorageClass:  csi-disk-topology
                +Status:        Pending
                +Volume:
                +Labels:        <none>
                +Annotations:   everest.io/disk-volume-type: SAS
                +Finalizers:    [kubernetes.io/pvc-protection]
                +Capacity:
                +Access Modes:
                +VolumeMode:    Filesystem
                +Used By:       <none>
                +Events:
                +  Type    Reason                Age               From                         Message
                +  ----    ------                ----              ----                         -------
                +  Normal  WaitForFirstConsumer  5s (x3 over 30s)  persistentvolume-controller  waiting for first consumer to be created before binding
                +

                Create a workload that uses the PVC. Set the PVC name to topology.

                +
                apiVersion: apps/v1
                +kind: Deployment
                +metadata:
                +  name: nginx-deployment
                +spec:
                +  selector:
                +    matchLabels:
                +      app: nginx
                +  replicas: 1
                +  template:
                +    metadata:
                +      labels:
                +        app: nginx
                +    spec:
                +      containers: 
                +      - image: nginx:alpine
                +        name: container-0 
                +        volumeMounts: 
                +        - mountPath: /tmp                                # Mount path 
                +          name: topology-example 
                +      restartPolicy: Always 
                +      volumes: 
                +      - name: topology-example 
                +        persistentVolumeClaim: 
                +          claimName:  topology                       # PVC name
                +

                After the PVC is created, check the PVC details. You can see that the PVC is bound successfully.

                +
                # kubectl describe pvc topology
                +Name:          topology
                +Namespace:     default
                +StorageClass:  csi-disk-topology
                +Status:        Bound
                +....
                +Used By:       nginx-deployment-fcd9fd98b-x6tbs
                +Events:
                +  Type    Reason                 Age                   From                                                                                                  Message
                +  ----    ------                 ----                  ----                                                                                                  -------
                +  Normal  WaitForFirstConsumer   84s (x26 over 7m34s)  persistentvolume-controller                                                                           waiting for first consumer to be created before binding
                +  Normal  Provisioning           54s                   everest-csi-provisioner_everest-csi-controller-7965dc48c4-5k799_2a6b513e-f01f-4e77-af21-6d7f8d4dbc98  External provisioner is provisioning volume for claim "default/topology"
                +  Normal  ProvisioningSucceeded  52s                   everest-csi-provisioner_everest-csi-controller-7965dc48c4-5k799_2a6b513e-f01f-4e77-af21-6d7f8d4dbc98  Successfully provisioned volume pvc-9a89ea12-4708-4c71-8ec5-97981da032c9
                +
                +

                Using csi-disk-topology in Cross-AZ Node Deployment

                The following uses csi-disk-topology to create a StatefulSet with the same configurations used in the preceding example.

                +
                  volumeClaimTemplates:
                +  - metadata:
                +      name: data
                +      annotations:
                +        everest.io/disk-volume-type: SAS
                +    spec:
                +      accessModes:
                +      - ReadWriteOnce
                +      resources:
                +        requests:
                +          storage: 1Gi
                +      storageClassName: csi-disk-topology
                +

                After the creation, check the PVC and pod status. As shown in the following output, the PVC and pod can be created successfully. The nginx-3 pod is created on the node in AZ 3.

                +
                # kubectl get pvc -owide
                +NAME           STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS        AGE   VOLUMEMODE
                +data-nginx-0   Bound    pvc-43802cec-cf78-4876-bcca-e041618f2470   1Gi        RWO            csi-disk-topology   55s   Filesystem
                +data-nginx-1   Bound    pvc-fc942a73-45d3-476b-95d4-1eb94bf19f1f   1Gi        RWO            csi-disk-topology   39s   Filesystem
                +data-nginx-2   Bound    pvc-d219f4b7-e7cb-4832-a3ae-01ad689e364e   1Gi        RWO            csi-disk-topology   22s   Filesystem
                +data-nginx-3   Bound    pvc-b54a61e1-1c0f-42b1-9951-410ebd326a4d   1Gi        RWO            csi-disk-topology   9s    Filesystem
                +
                +# kubectl get pod -owide
                +NAME      READY   STATUS    RESTARTS   AGE   IP             NODE            NOMINATED NODE   READINESS GATES
                +nginx-0   1/1     Running   0          65s   172.16.1.8     192.168.0.240   <none>           <none>
                +nginx-1   1/1     Running   0          49s   172.16.0.13    192.168.0.121   <none>           <none>
                +nginx-2   1/1     Running   0          32s   172.16.0.137   192.168.0.211   <none>           <none>
                +nginx-3   1/1     Running   0          19s   172.16.1.9     192.168.0.240   <none>           <none>
                +
                +
                +
                + +
                + diff --git a/docs/cce/umn/cce_bestpractice_0050.html b/docs/cce/umn/cce_bestpractice_0050.html new file mode 100644 index 00000000..60c3536d --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0050.html @@ -0,0 +1,18 @@ + + +

                Cluster

                +

                +
                + + diff --git a/docs/cce/umn/cce_bestpractice_0051.html b/docs/cce/umn/cce_bestpractice_0051.html new file mode 100644 index 00000000..dcc7beff --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0051.html @@ -0,0 +1,27 @@ + + + +

                Container

                + +

                +
                + + + diff --git a/docs/cce/umn/cce_bestpractice_0052.html b/docs/cce/umn/cce_bestpractice_0052.html new file mode 100644 index 00000000..d689f65d --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0052.html @@ -0,0 +1,22 @@ + + +

                Networking

                +

                +
                + + diff --git a/docs/cce/umn/cce_bestpractice_0053.html b/docs/cce/umn/cce_bestpractice_0053.html new file mode 100644 index 00000000..aed39b6d --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0053.html @@ -0,0 +1,25 @@ + + +

                Storage

                +
                + + diff --git a/docs/cce/umn/cce_bestpractice_0090.html b/docs/cce/umn/cce_bestpractice_0090.html new file mode 100644 index 00000000..a5795253 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0090.html @@ -0,0 +1,16 @@ + + +

                Auto Scaling

                +

                +
                + + diff --git a/docs/cce/umn/cce_bestpractice_0107.html b/docs/cce/umn/cce_bestpractice_0107.html index 9d9fe793..e3bbc32a 100644 --- a/docs/cce/umn/cce_bestpractice_0107.html +++ b/docs/cce/umn/cce_bestpractice_0107.html @@ -1,89 +1,90 @@

                How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?

                -

                For clusters of v1.15.11-r1 and later, the CSI everest add-on has taken over all functions of the fuxi FlexVolume driver (the storage-driver add-on) for container storage management. In versions later than 1.17.9-r0, the fuxi FlexVolume driver (storage-driver) is no longer supported.

                +

                In clusters later than v1.15.11-r1, CSI (the everest add-on) has taken over all functions of fuxi FlexVolume (the storage-driver add-on) for managing container storage. You are advised to use CSI Everest.

                To migrate your storage volumes, create a static PV to associate with the original underlying storage, and then create a PVC to associate with this static PV. When you upgrade your application, mount the new PVC to the original mounting path to migrate the storage volumes.

                Services will be interrupted during the migration. Therefore, properly plan the migration and back up data.

                -

                Procedure

                1. (Optional) Back up data to prevent data loss in case of exceptions.
                2. Run kubectl commands.
                3. Configure a YAML file of the PV in the CSI format according to the PV in the FlexVolume format and associate the PV with the existing storage.

                  To be specific, run the following commands to configure the pv-example.yaml file, which is used to create a PV.

                  +

                  Procedure

                  1. (Optional) Back up data to prevent data loss in case of exceptions.
                  2. Configure a YAML file of the PV in the CSI format according to the PV in the FlexVolume format and associate the PV with the existing storage.

                    To be specific, run the following commands to configure the pv-example.yaml file, which is used to create a PV.

                    touch pv-example.yaml

                    vi pv-example.yaml

                    Configuration example of a PV for an EVS volume:
                    apiVersion: v1
                     kind: PersistentVolume
                     metadata:
                       labels:
                    -    failure-domain.beta.kubernetes.io/region: eu-de
                    -    failure-domain.beta.kubernetes.io/zone: eu-de-01
                    +    failure-domain.beta.kubernetes.io/region: eu-de
                    +    failure-domain.beta.kubernetes.io/zone: <zone name>
                       annotations:
                         pv.kubernetes.io/provisioned-by: everest-csi-provisioner
                    -  name: pv-evs-example
                    +  name: pv-evs-example
                     spec:
                       accessModes:
                       - ReadWriteOnce
                       capacity:
                    -    storage: 10Gi
                    +    storage: 10Gi
                       csi:
                         driver: disk.csi.everest.io
                         fsType: ext4
                         volumeAttributes:
                    -      everest.io/disk-mode: SCSI
                    -      everest.io/disk-volume-type: SATA
                    +      everest.io/disk-mode: SCSI
                    +      everest.io/disk-volume-type: SAS
                           storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
                    -    volumeHandle: 0992dbda-6340-470e-a74e-4f0db288ed82
                    +    volumeHandle: 0992dbda-6340-470e-a74e-4f0db288ed82
                       persistentVolumeReclaimPolicy: Delete
                       storageClassName: csi-disk
                    +

                    Pay attention to the fields in bold and red. The parameters are described as follows:

                    -
                    Table 1 EVS volume configuration parameters

                    Parameter

                    +
                    - - - - - - - - - - - - - - - - - - - @@ -93,59 +94,60 @@ spec:
                    apiVersion: v1
                     kind: PersistentVolume
                     metadata:
                    -  name: pv-sfs-example
                    +  name: pv-sfs-example
                       annotations:
                         pv.kubernetes.io/provisioned-by: everest-csi-provisioner
                     spec:
                       accessModes:
                       - ReadWriteMany
                       capacity:
                    -    storage: 10Gi
                    +    storage: 10Gi
                       csi:
                         driver: nas.csi.everest.io
                         fsType: nfs
                         volumeAttributes:
                    -      everest.io/share-export-location: sfs-nas01.Endpoint:/share-436304e8
                    +      everest.io/share-export-location:  # Shared path of the file storage
                           storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
                    -    volumeHandle: 682f00bb-ace0-41d8-9b3e-913c9aa6b695
                    +    volumeHandle: 682f00bb-ace0-41d8-9b3e-913c9aa6b695
                       persistentVolumeReclaimPolicy: Delete
                       storageClassName: csi-nas
                    +

                    Pay attention to the fields in bold and red. The parameters are described as follows:

                    -
                    Table 1 EVS volume configuration parameters

                    Parameter

                    Description

                    +

                    Description

                    failure-domain.beta.kubernetes.io/region

                    +

                    failure-domain.beta.kubernetes.io/region

                    Region where the EVS disk is located. Use the same value as that of the FlexVolume PV.

                    +

                    Region where the EVS disk is located. Use the same value as that of the FlexVolume PV.

                    failure-domain.beta.kubernetes.io/zone

                    +

                    failure-domain.beta.kubernetes.io/zone

                    AZ where the EVS disk is located. Use the same value as that of the FlexVolume PV.

                    +

                    AZ where the EVS disk is located. Use the same value as that of the FlexVolume PV.

                    name

                    +

                    name

                    Name of the PV, which must be unique in the cluster.

                    +

                    Name of the PV, which must be unique in the cluster.

                    storage

                    +

                    storage

                    EVS volume capacity in the unit of Gi. Use the value of spec.capacity.storage of the FlexVolume PV.

                    +

                    EVS volume capacity in the unit of Gi. Use the value of spec.capacity.storage of the FlexVolume PV.

                    driver

                    +

                    driver

                    Storage driver used to attach the volume. Set the driver to disk.csi.everest.io for the EVS volume.

                    +

                    Storage driver used to attach the volume. Set the driver to disk.csi.everest.io for the EVS volume.

                    volumeHandle

                    +

                    volumeHandle

                    Volume ID of the EVS disk. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

                    +

                    Volume ID of the EVS disk. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

                    everest.io/disk-mode

                    +

                    everest.io/disk-mode

                    EVS disk mode. Use the value of spec.flexVolume.options.disk-mode of the FlexVolume PV.

                    +

                    EVS disk mode. Use the value of spec.flexVolume.options.disk-mode of the FlexVolume PV.

                    everest.io/disk-volume-type

                    +

                    everest.io/disk-volume-type

                    EVS disk type. Currently, high I/O (SAS), ultra-high I/O (SSD), and common I/O (SATA) are supported. Use the value of kubernetes.io/volumetype in the storage class corresponding to spec.storageClassName of the FlexVolume PV.

                    +

                    EVS disk type. Use the value of kubernetes.io/volumetype in the storage class corresponding to spec.storageClassName of the FlexVolume PV.

                    storageClassName

                    +

                    storageClassName

                    Name of the Kubernetes storage class associated with the storage volume. Set this field to csi-disk for EVS disks.

                    +

                    Name of the Kubernetes storage class associated with the storage volume. Set this field to csi-disk for EVS disks.

                    Table 2 SFS volume configuration parameters

                    Parameter

                    +
                    - - - - - - - - - - - - - @@ -155,7 +157,7 @@ spec:
                    apiVersion: v1
                     kind: PersistentVolume
                     metadata:
                    -  name: pv-obs-example
                    +  name: pv-obs-example
                       annotations:
                         pv.kubernetes.io/provisioned-by: everest-csi-provisioner
                     spec:
                    @@ -165,61 +167,61 @@ spec:
                         storage: 1Gi
                       csi:
                         driver: obs.csi.everest.io
                    -    fsType: s3fs
                    +    fsType: s3fs
                         volumeAttributes:
                    -      everest.io/obs-volume-type: STANDARD
                    -      everest.io/region: eu-de
                    +      everest.io/obs-volume-type: STANDARD
                    +      everest.io/region: eu-de
                           storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
                    -    volumeHandle: obs-normal-static-pv
                    +    volumeHandle: obs-normal-static-pv
                       persistentVolumeReclaimPolicy: Delete
                       storageClassName: csi-obs

                    Pay attention to the fields in bold and red. The parameters are described as follows:

                    -
                    Table 2 SFS volume configuration parameters

                    Parameter

                    Description

                    +

                    Description

                    name

                    +

                    name

                    Name of the PV, which must be unique in the cluster.

                    +

                    Name of the PV, which must be unique in the cluster.

                    storage

                    +

                    storage

                    File storage size in the unit of Gi. Use the value of spec.capacity.storage of the FlexVolume PV.

                    +

                    File storage size in the unit of Gi. Use the value of spec.capacity.storage of the FlexVolume PV.

                    driver

                    +

                    driver

                    Storage driver used to attach the volume. Set the driver to nas.csi.everest.io for the file system.

                    +

                    Storage driver used to attach the volume. Set the driver to nas.csi.everest.io for the file system.

                    everest.io/share-export-location

                    +

                    everest.io/share-export-location

                    Shared path of the file system. Use the value of spec.flexVolume.options.deviceMountPath of the FlexVolume PV.

                    +

                    Shared path of the file system. Use the value of spec.flexVolume.options.deviceMountPath of the FlexVolume PV.

                    volumeHandle

                    +

                    volumeHandle

                    File system ID. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

                    +

                    File system ID. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

                    storageClassName

                    +

                    storageClassName

                    Name of the Kubernetes storage class. Set this field to csi-nas.

                    +

                    Name of the Kubernetes storage class. Set this field to csi-nas.

                    Table 3 OBS volume configuration parameters

                    Parameter

                    +
                    - - - - - - - - - - - - - - - - - @@ -229,65 +231,66 @@ spec:
                    apiVersion: v1
                     kind: PersistentVolume
                     metadata:
                    -  name: pv-efs-example
                    +  name: pv-efs-example
                       annotations:
                         pv.kubernetes.io/provisioned-by: everest-csi-provisioner
                     spec:
                       accessModes:
                       - ReadWriteMany
                       capacity:
                    -    storage: 10Gi
                    +    storage: 10Gi
                       csi:
                         driver: sfsturbo.csi.everest.io
                         fsType: nfs
                         volumeAttributes:
                    -      everest.io/share-export-location: 192.168.0.169:/
                    +      everest.io/share-export-location: 192.168.0.169:/
                           storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
                    -    volumeHandle: 8962a2a2-a583-4b7f-bb74-fe76712d8414
                    +    volumeHandle: 8962a2a2-a583-4b7f-bb74-fe76712d8414
                       persistentVolumeReclaimPolicy: Delete
                       storageClassName: csi-sfsturbo
                    +

                    Pay attention to the fields in bold and red. The parameters are described as follows:

                    -
                    Table 3 OBS volume configuration parameters

                    Parameter

                    Description

                    +

                    Description

                    name

                    +

                    name

                    Name of the PV, which must be unique in the cluster.

                    +

                    Name of the PV, which must be unique in the cluster.

                    storage

                    +

                    storage

                    Storage capacity in the unit of Gi. Set this parameter to the fixed value 1Gi.

                    +

                    Storage capacity, in the unit of Gi. Set this parameter to the fixed value 1Gi.

                    driver

                    +

                    driver

                    Storage driver used to attach the volume. Set the driver to obs.csi.everest.io for the OBS volume.

                    +

                    Storage driver used to attach the volume. Set the driver to obs.csi.everest.io for the OBS volume.

                    fsType

                    +

                    fsType

                    File type. Value options are obsfs or s3fs. If the value is s3fs, an OBS bucket is created and mounted using s3fs. If the value is obsfs, an OBS parallel file system is created and mounted using obsfs. Set this parameter according to the value of spec.flexVolume.options.posix of the FlexVolume PV. If the value of spec.flexVolume.options.posix is true, set this parameter to obsfs. If the value is false, set this parameter to s3fs.

                    +

                    File type. Value options are obsfs or s3fs. If the value is s3fs, an OBS bucket is created and mounted using s3fs. If the value is obsfs, an OBS parallel file system is created and mounted using obsfs. Set this parameter according to the value of spec.flexVolume.options.posix of the FlexVolume PV. If the value of spec.flexVolume.options.posix is true, set this parameter to obsfs. If the value is false, set this parameter to s3fs.

                    everest.io/obs-volume-type

                    +

                    everest.io/obs-volume-type

                    Storage class, including STANDARD (standard bucket) and WARM (infrequent access bucket). Set this parameter according to the value of spec.flexVolume.options.storage_class of the FlexVolume PV. If the value of spec.flexVolume.options.storage_class is standard, set this parameter to STANDARD. If the value is standard_ia, set this parameter to WARM.

                    +

                    Storage class, including STANDARD (standard bucket) and WARM (infrequent access bucket). Set this parameter according to the value of spec.flexVolume.options.storage_class of the FlexVolume PV. If the value of spec.flexVolume.options.storage_class is standard, set this parameter to STANDARD. If the value is standard_ia, set this parameter to WARM.

                    everest.io/region

                    +

                    everest.io/region

                    Region where the OBS bucket is located. Use the value of spec.flexVolume.options.region of the FlexVolume PV.

                    +

                    Region where the OBS bucket is located. Use the value of spec.flexVolume.options.region of the FlexVolume PV.

                    volumeHandle

                    +

                    volumeHandle

                    OBS bucket name. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

                    +

                    OBS bucket name. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

                    storageClassName

                    +

                    storageClassName

                    Name of the Kubernetes storage class. Set this field to csi-obs.

                    +

                    Name of the Kubernetes storage class. Set this field to csi-obs.

                    Table 4 SFS Turbo volume configuration parameters

                    Parameter

                    +
                    - - - - - - - - - - - - -
                    Table 4 SFS Turbo volume configuration parameters

                    Parameter

                    Description

                    +

                    Description

                    name

                    +

                    name

                    Name of the PV, which must be unique in the cluster.

                    +

                    Name of the PV, which must be unique in the cluster.

                    storage

                    +

                    storage

                    File system size. Use the value of spec.capacity.storage of the FlexVolume PV.

                    +

                    File system size. Use the value of spec.capacity.storage of the FlexVolume PV.

                    driver

                    +

                    driver

                    Storage driver used to attach the volume. Set it to sfsturbo.csi.everest.io.

                    +

                    Storage driver used to attach the volume. Set it to sfsturbo.csi.everest.io.

                    everest.io/share-export-location

                    +

                    everest.io/share-export-location

                    Shared path of the SFS Turbo volume. Use the value of spec.flexVolume.options.deviceMountPath of the FlexVolume PV.

                    +

                    Shared path of the SFS Turbo volume. Use the value of spec.flexVolume.options.deviceMountPath of the FlexVolume PV.

                    volumeHandle

                    +

                    volumeHandle

                    SFS Turbo volume ID. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

                    +

                    SFS Turbo volume ID. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

                    storageClassName

                    +

                    storageClassName

                    Name of the Kubernetes storage class. Set this field to csi-sfsturbo for SFS Turbo volumes.

                    +

                    Name of the Kubernetes storage class. Set this field to csi-sfsturbo for SFS Turbo volumes.

                    -

                  3. Configure a YAML file of the PVC in the CSI format according to the PVC in the FlexVolume format and associate the PVC with the PV created in 3.

                    To be specific, run the following commands to configure the pvc-example.yaml file, which is used to create a PVC.

                    +

                  4. Configure a YAML file of the PVC in the CSI format according to the PVC in the FlexVolume format and associate the PVC with the PV created in 2.

                    To be specific, run the following commands to configure the pvc-example.yaml file, which is used to create a PVC.

                    touch pvc-example.yaml

                    vi pvc-example.yaml

                    Configuration example of a PVC for an EVS volume:

                    @@ -295,67 +298,68 @@ spec: kind: PersistentVolumeClaim metadata: labels: - failure-domain.beta.kubernetes.io/region: eu-de - failure-domain.beta.kubernetes.io/zone: eu-de-01 + failure-domain.beta.kubernetes.io/region: eu-de + failure-domain.beta.kubernetes.io/zone: <zone name> annotations: - everest.io/disk-volume-type: SATA + everest.io/disk-volume-type: SAS volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner - name: pvc-evs-example - namespace: default + name: pvc-evs-example + namespace: default spec: accessModes: - ReadWriteOnce resources: requests: - storage: 10Gi - volumeName: pv-evs-example + storage: 10Gi + volumeName: pv-evs-example storageClassName: csi-disk +

                    Pay attention to the fields in bold and red. The parameters are described as follows:

                    -
                    Table 5 PVC configuration parameters for an EVS volume

                    Parameter

                    +
                    - - - - - - - - - - - - - - - - - @@ -367,47 +371,48 @@ kind: PersistentVolumeClaim metadata: annotations: volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner - name: pvc-sfs-example - namespace: default + name: pvc-sfs-example + namespace: default spec: accessModes: - ReadWriteMany resources: requests: - storage: 10Gi + storage: 10Gi storageClassName: csi-nas - volumeName: pv-sfs-example + volumeName: pv-sfs-example +

                    Pay attention to the fields in bold and red. The parameters are described as follows:

                    -
                    Table 5 PVC configuration parameters for an EVS volume

                    Parameter

                    Description

                    +

                    Description

                    failure-domain.beta.kubernetes.io/region

                    +

                    failure-domain.beta.kubernetes.io/region

                    Region where the cluster is located. Use the same value as that of the FlexVolume PVC.

                    +

                    Region where the cluster is located. Use the same value as that of the FlexVolume PVC.

                    failure-domain.beta.kubernetes.io/zone

                    +

                    failure-domain.beta.kubernetes.io/zone

                    AZ where the EVS disk is deployed. Use the same value as that of the FlexVolume PVC.

                    +

                    AZ where the EVS disk is deployed. Use the same value as that of the FlexVolume PVC.

                    everest.io/disk-volume-type

                    +

                    everest.io/disk-volume-type

                    Storage class of the EVS disk. The value can be SAS, SSD, or SATA. Set this parameter to the same value as that of the PV created in 3.

                    +

                    Storage class of the EVS disk. The value can be SAS or SSD. Set this parameter to the same value as that of the PV created in 2.

                    name

                    +

                    name

                    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

                    +

                    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

                    namespace

                    +

                    namespace

                    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

                    +

                    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

                    storage

                    +

                    storage

                    Requested capacity of the PVC, which must be the same as the storage size of the existing PV.

                    +

                    Requested capacity of the PVC, which must be the same as the storage size of the existing PV.

                    volumeName

                    +

                    volumeName

                    Name of the PV. Set this parameter to the name of the static PV in 3.

                    +

                    Name of the PV. Set this parameter to the name of the static PV in 2.

                    storageClassName

                    +

                    storageClassName

                    Name of the Kubernetes storage class. Set this field to csi-disk for EVS disks.

                    +

                    Name of the Kubernetes storage class. Set this field to csi-disk for EVS disks.

                    Table 6 PVC configuration parameters for an SFS volume

                    Parameter

                    +
                    - - - - - - - - - - - @@ -419,10 +424,10 @@ kind: PersistentVolumeClaim metadata: annotations: volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner - everest.io/obs-volume-type: STANDARD - csi.storage.k8s.io/fstype: s3fs - name: pvc-obs-example - namespace: default + everest.io/obs-volume-type: STANDARD + csi.storage.k8s.io/fstype: s3fs + name: pvc-obs-example + namespace: default spec: accessModes: - ReadWriteMany @@ -430,48 +435,49 @@ spec: requests: storage: 1Gi storageClassName: csi-obs - volumeName: pv-obs-example + volumeName: pv-obs-example +

                    Pay attention to the fields in bold and red. The parameters are described as follows:

                    -
                    Table 6 PVC configuration parameters for an SFS volume

                    Parameter

                    Description

                    +

                    Description

                    name

                    +

                    name

                    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

                    +

                    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

                    namespace

                    +

                    namespace

                    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

                    +

                    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

                    storage

                    +

                    storage

                    Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

                    +

                    Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

                    storageClassName

                    +

                    storageClassName

                    Set this field to csi-nas.

                    +

                    Set this field to csi-nas.

                    volumeName

                    +

                    volumeName

                    Name of the PV. Set this parameter to the name of the static PV in 3.

                    +

                    Name of the PV. Set this parameter to the name of the static PV in 2.

                    Table 7 PVC configuration parameters for an OBS volume

                    Parameter

                    +
                    - - - - - - - - - - - - - - - @@ -483,66 +489,67 @@ kind: PersistentVolumeClaim metadata: annotations: volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner - name: pvc-efs-example - namespace: default + name: pvc-efs-example + namespace: default spec: accessModes: - ReadWriteMany resources: requests: - storage: 10Gi + storage: 10Gi storageClassName: csi-sfsturbo - volumeName: pv-efs-example + volumeName: pv-efs-example

                    Pay attention to the fields in bold and red. The parameters are described as follows:

                    -
                    Table 7 PVC configuration parameters for an OBS volume

                    Parameter

                    Description

                    +

                    Description

                    everest.io/obs-volume-type

                    +

                    everest.io/obs-volume-type

                    OBS volume type, which can be STANDARD (standard bucket) and WARM (infrequent access bucket). Set this parameter to the same value as that of the PV created in 3.

                    +

                    OBS volume type, which can be STANDARD (standard bucket) and WARM (infrequent access bucket). Set this parameter to the same value as that of the PV created in 2.

                    csi.storage.k8s.io/fstype

                    +

                    csi.storage.k8s.io/fstype

                    File type, which can be obsfs or s3fs. The value must be the same as that of fsType of the static OBS volume PV.

                    +

                    File type, which can be obsfs or s3fs. The value must be the same as that of fsType of the static OBS volume PV.

                    name

                    +

                    name

                    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

                    +

                    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

                    namespace

                    +

                    namespace

                    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

                    +

                    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

                    storage

                    +

                    storage

                    Storage capacity in the unit of Gi. Set this parameter to the fixed value 1Gi.

                    +

                    Storage capacity, in the unit of Gi. Set this parameter to the fixed value 1Gi.

                    storageClassName

                    +

                    storageClassName

                    Name of the Kubernetes storage class. Set this field to csi-obs.

                    +

                    Name of the Kubernetes storage class. Set this field to csi-obs.

                    volumeName

                    +

                    volumeName

                    Name of the PV. Set this parameter to the name of the static PV created in 3.

                    +

                    Name of the PV. Set this parameter to the name of the static PV created in 2.

                    Table 8 PVC configuration parameters for an SFS Turbo volume

                    Parameter

                    +
                    - - - - - - - - - - -
                    Table 8 PVC configuration parameters for an SFS Turbo volume

                    Parameter

                    Description

                    +

                    Description

                    name

                    +

                    name

                    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

                    +

                    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

                    namespace

                    +

                    namespace

                    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

                    +

                    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

                    storageClassName

                    +

                    storageClassName

                    Name of the Kubernetes storage class. Set this field to csi-sfsturbo.

                    +

                    Name of the Kubernetes storage class. Set this field to csi-sfsturbo.

                    storage

                    +

                    storage

                    Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

                    +

                    Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

                    volumeName

                    +

                    volumeName

                    Name of the PV. Set this parameter to the name of the static PV created in 3.

                    +

                    Name of the PV. Set this parameter to the name of the static PV created in 2.

                    -

                  5. Upgrade the workload to use a new PVC.

                    For Deployments
                    1. Run the kubectl create -f commands to create a PV and PVC.

                      kubectl create -f pv-example.yaml

                      -

                      kubectl create -f pvc-example.yaml

                      -

                      Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 3 and 4.

                      +

                    2. Upgrade the workload to use a new PVC.

                      For Deployments
                      1. Run the kubectl create -f commands to create a PV and PVC.

                        kubectl create -f pv-example.yaml

                        +

                        kubectl create -f pvc-example.yaml

                        +

                        Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 2 and 3.

                        -
                      2. Go to the CCE console. On the workload upgrade page, click Upgrade > Advanced Settings > Data Storage > Cloud Storage.
                      3. Uninstall the old storage and add the PVC in the CSI format. Retain the original mounting path in the container.
                      4. Click Submit.
                      5. Wait until the pods are running.
                      +
                    3. Go to the CCE console. On the workload upgrade page, click Upgrade > Advanced Settings > Data Storage > Cloud Storage.

                      +
                    4. Uninstall the old storage and add the PVC in the CSI format. Retain the original mounting path in the container.
                    5. Click Submit.
                    6. Wait until the pods are running.

                    For StatefulSets that use existing storage

                    -
                    1. Run the kubectl create -f commands to create a PV and PVC.

                      kubectl create -f pv-example.yaml

                      -

                      kubectl create -f pvc-example.yaml

                      -

                      Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 3 and 4.

                      +
                      1. Run the kubectl create -f commands to create a PV and PVC.

                        kubectl create -f pv-example.yaml

                        +

                        kubectl create -f pvc-example.yaml

                        +

                        Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 2 and 3.

                      2. Run the kubectl edit command to edit the StatefulSet and use the newly created PVC.

                        kubectl edit sts sts-example -n xxx

                        -

                        +

                        Replace sts-example in the preceding command with the actual name of the StatefulSet to upgrade. xxx indicates the namespace to which the StatefulSet belongs.

                      3. Wait until the pods are running.
                      @@ -551,9 +558,9 @@ spec:

                      For StatefulSets that use dynamically allocated storage

                      1. Back up the PV and PVC in the flexVolume format used by the StatefulSet.

                        kubectl get pvc xxx -n {namespaces} -oyaml > pvc-backup.yaml

                        kubectl get pv xxx -n {namespaces} -oyaml > pv-backup.yaml

                        -
                      2. Change the number of pods to 0.
                      3. On the storage page, disassociate the flexVolume PVC used by the StatefulSet.
                      4. Run the kubectl create -f commands to create a PV and PVC.

                        kubectl create -f pv-example.yaml

                        -

                        kubectl create -f pvc-example.yaml

                        -

                        Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 3 and 4.

                        +
                      5. Change the number of pods to 0.
                      6. On the storage page, disassociate the flexVolume PVC used by the StatefulSet.
                      7. Run the kubectl create -f commands to create a PV and PVC.

                        kubectl create -f pv-example.yaml

                        +

                        kubectl create -f pvc-example.yaml

                        +

                        Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 2 and 3.

                      8. Change the number of pods back to the original value and wait until the pods are running.

                      The dynamic allocation of storage for StatefulSets is achieved by using volumeClaimTemplates. This field cannot be modified by Kubernetes. Therefore, data cannot be migrated by using a new PVC.

                      @@ -574,7 +581,7 @@ spec: namespace: default creationTimestamp: null annotations: - everest.io/disk-volume-type: SATA + everest.io/disk-volume-type: SAS spec: accessModes: - ReadWriteOnce @@ -582,7 +589,7 @@ spec: requests: storage: 10Gi storageClassName: csi-disk -

                      The parameter value must be the same as the PVC of the EVS volume created in 4.

                      +

                      The parameter value must be the same as the PVC of the EVS volume created in 3.

                      Configuration example of volumeClaimTemplates for an SFS volume:

                        volumeClaimTemplates:
                           - metadata:
                      @@ -595,8 +602,8 @@ spec:
                               resources:
                                 requests:
                                   storage: 10Gi
                      -        storageClassName: csi-na
                      -

                      The parameter value must be the same as the PVC of the SFS volume created in 4.

                      + storageClassName: csi-nas +

                      The parameter value must be the same as the PVC of the SFS volume created in 3.

                      Configuration example of volumeClaimTemplates for an OBS volume:

                        volumeClaimTemplates:
                           - metadata:
                      @@ -613,24 +620,26 @@ spec:
                                 requests:
                                   storage: 1Gi
                               storageClassName: csi-obs
                      -

                      The parameter value must be the same as the PVC of the OBS volume created in 4.

                      +

                      The parameter value must be the same as the PVC of the OBS volume created in 3.

                      • Delete the StatefulSet.

                      kubectl delete sts xxx -n {namespaces}

                      • Create the StatefulSet.

                      kubectl create -f sts.yaml

                    2. Check service functions.

                      1. Check whether the application is running properly.
                      2. Checking whether the data storage is normal.
                      -

                      If a rollback is required, perform 5. Select the PVC in FlexVolume format and upgrade the application.

                      +

                      If a rollback is required, perform 4. Select the PVC in FlexVolume format and upgrade the application.

                    3. Uninstall the PVC in the FlexVolume format.

                      If the application functions normally, unbind the PVC in the FlexVolume format on the storage management page.

                      You can also run the kubectl command to delete the PVC and PV of the FlexVolume format.

                      -

                      Before deleting a PV, change the persistentVolumeReclaimPolicy policy of the PV to Retain. Otherwise, the underlying storage will be reclaimed after the PV is deleted.

                      +

                      Before deleting a PV, change the persistentVolumeReclaimPolicy of the PV to Retain. Otherwise, the underlying storage will be reclaimed after the PV is deleted.

                      +

                      If the cluster has been upgraded before the storage migration, PVs may fail to be deleted. You can remove the PV protection field finalizers to delete PVs.

                      +

                      kubectl patch pv {pv_name} -p '{"metadata":{"finalizers":null}}'

                    diff --git a/docs/cce/umn/cce_bestpractice_0306.html b/docs/cce/umn/cce_bestpractice_0306.html new file mode 100644 index 00000000..7e2330c7 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0306.html @@ -0,0 +1,29 @@ + + +

                    Migrating On-premises Kubernetes Clusters to CCE

                    +
                    + + diff --git a/docs/cce/umn/cce_bestpractice_0307.html b/docs/cce/umn/cce_bestpractice_0307.html new file mode 100644 index 00000000..4e873d7d --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0307.html @@ -0,0 +1,78 @@ + + +

                    Solution Overview

                    +

                    Scenario

                    Containers are growing in popularity and Kubernetes simplifies containerized deployment. Many companies choose to build their own Kubernetes clusters. However, the O&M workload of on-premises clusters is heavy, and O&M personnel need to configure the management systems and monitoring solutions by themselves. This increases the labor costs while decreasing the efficiency.

                    +

                    In terms of performance, an on-premises cluster has poor scalability due to its fixed specifications. Auto scaling cannot be implemented in case of traffic surges, which may easily result in the insufficient or waste of cluster resources. In addition, an on-premises cluster is usually deployed on a single node without considering disaster recovery risks. Once a fault occurs, the entire cluster cannot be used, which may cause serious production incident.

                    +

                    Now you can address the preceding challenges by using CCE, a service that allows easy cluster management and flexible scaling, integrated with application service mesh and Helm charts to simplify cluster O&M and reduce operations costs. CCE is easy to use and delivers high performance, security, reliability, openness, and compatibility. This section describes the solution and procedure for migrating on-premises clusters to CCE.

                    +
                    +

                    Migration Solution

                    This section describes a cluster migration solution, which applies to the following types of clusters:

                    +
                    • Kubernetes clusters built in local IDCs
                    • On-premises clusters built using multiple ECSs
                    • Cluster services provided by other cloud service providers
                    +
                    Before the migration, you need to analyze all resources in the source clusters and then determine the migration solution. Resources that can be migrated include resources inside and outside the clusters, as listed in the following table. +
                    + + + + + + + + + + + + + + + + + + + + + + +
                    Table 1 Resources that can be migrated

                    Category

                    +

                    Migration Object

                    +

                    Remarks

                    +

                    Resources inside a cluster

                    +

                    All objects in a cluster, including pods, jobs, Services, Deployments, and ConfigMaps.

                    +

                    You are not advised to migrate the resources in the velero and kube-system namespaces.

                    +
                    • velero: Resources in this namespace are created by the migration tool and do not need to be migrated.
                    • kube-system: Resources in this namespace are system resources. If this namespace of the source cluster contains resources created by users, migrate the resources on demand.
                    +
                    CAUTION:

                    If you are migrating or backing up cluster resources in CCE, for example, from a namespace to another, do not back up Secret paas.elb. It is because secret paas.elb is periodically updated. After the backup is complete, the secret may become invalid when it is restored. As a result, network storage functions are affected.

                    +
                    +

                    PersistentVolumes (PVs) mounted to containers

                    +

                    Due to restrictions of the Restic tool, migration is not supported for the hostPath storage volume. For details about how to solve the problem, see Storage Volumes of the HostPath Type Cannot Be Backed Up.

                    +

                    Resources outside a cluster

                    +

                    On-premises image repository

                    +

                    Resources can be migrated to SoftWare Repository for Container (SWR).

                    +

                    Non-containerized database

                    +

                    Resources can be migrated to Relational Database Service (RDS).

                    +

                    Non-local storage, such as object storage

                    +

                    Resources can be migrated to Object Storage Service (OBS).

                    +
                    +
                    +
                    +

                    Figure 1 shows the migration process. You can migrate resources outside a cluster as required.

                    +
                    Figure 1 Migration solution diagram
                    +
                    +

                    Migration Process

                    +

                    The cluster migration process is as follows:

                    +
                    1. Plan resources for the target cluster.

                      For details about the differences between CCE clusters and on-premises clusters, see Key Performance Parameter in Planning Resources for the Target Cluster. Plan resources as required and ensure that the performance configuration of the target cluster is the same as that of the source cluster.

                      +

                    2. Migrate resources outside a cluster.

                      If you need to migrate resources outside the cluster, see Migrating Resources Outside a Cluster.

                      +

                    3. Install the migration tool.

                      After resources outside a cluster are migrated, you can use a migration tool to back up and restore application configurations in the source and target clusters. For details about how to install the tool, see Installing the Migration Tool.

                      +

                    4. Migrate resources in the cluster.

                      Use Velero to back up resources in the source cluster to OBS and restore the resources in the target cluster. For details, see Migrating Resources in a Cluster.

                      +
                      • Backing Up Applications in the Source Cluster

                        To back up resources, use the Velero tool to create a backup object in the original cluster, query and back up cluster data and resources, package the data, and upload the package to the object storage that is compatible with the S3 protocol. Cluster resources are stored in the JSON format.

                        +
                      • Restoring Applications in the Target Cluster

                        During restoration in the target cluster, Velero specifies the temporary object bucket that stores the backup data, downloads the backup data to the new cluster, and redeploys resources based on the JSON file.

                        +
                      +

                    5. Update resources accordingly.

                      After the migration, cluster resources may fail to be deployed. You need to update the faulty resources. The possible adaptation problems are as follows:

                      + +

                    6. Perform additional tasks.

                      After cluster resources are properly deployed, verify application functions after the migration and switch service traffic to the target cluster. After confirming that all services are running properly, bring the source cluster offline.

                      +

                    +
                    +
                    + + diff --git a/docs/cce/umn/cce_bestpractice_0308.html b/docs/cce/umn/cce_bestpractice_0308.html new file mode 100644 index 00000000..d3b2be7f --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0308.html @@ -0,0 +1,80 @@ + + +

                    Planning Resources for the Target Cluster

                    +

                    CCE allows you to customize cluster resources to meet various service requirements. Table 1 lists the key performance parameters of a cluster and provides the planned values. You can set the parameters based on your service requirements. It is recommended that the performance configuration be the same as that of the source cluster.

                    +

                    After a cluster is created, the resource parameters marked with asterisks (*) in Table 1 cannot be modified.

                    +
                    + +
                    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                    Table 1 CCE cluster planning

                    Resource

                    +

                    Key Performance Parameter

                    +

                    Description

                    +

                    Example Value

                    +

                    Cluster

                    +

                    *Cluster Type

                    +
                    • CCE cluster: supports VM nodes. You can run your containers in a secure and stable container runtime environment based on a high-performance network model.
                    • CCE Turbo cluster: runs on a cloud native infrastructure that features software-hardware synergy to support passthrough networking, high security and reliability, intelligent scheduling, and BMS nodes.
                    +

                    CCE cluster

                    +

                    *Network Model

                    +
                    • VPC network: The container network uses VPC routing to integrate with the underlying network. This network model is applicable to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network.
                    • Tunnel network: The container network is an overlay tunnel network on top of a VPC network and uses the VXLAN technology. This network model is applicable when there is no high requirements on performance.
                    • Cloud Native Network 2.0: The container network deeply integrates the elastic network interface (ENI) capability of VPC, uses the VPC CIDR block to allocate container addresses, and supports passthrough networking to containers through a load balancer.
                    +

                    VPC network

                    +

                    *Number of master nodes

                    +
                    • 3: Three master nodes will be created to deliver better DR performance. If one master node is faulty, the cluster can still be available without affecting service functions.
                    • 1: A single master node will be created. This mode is not recommended in commercial scenarios.
                    +

                    3

                    +

                    Node

                    +

                    OS

                    +
                    • EulerOS
                    • CentOS
                    +

                    EulerOS

                    +

                    Node Specifications (vary depending on the actual region)

                    +
                    • General-purpose: provides a balance of computing, memory, and network resources. It is a good choice for many applications. General-purpose nodes can be used for web servers, workload development, workload testing, and small-scale databases.
                    • Memory-optimized: provides higher memory capacity than general-purpose nodes and is suitable for relational databases, NoSQL, and other workloads that are both memory-intensive and data-intensive.
                    • GPU-accelerated: provides powerful floating-point computing and is suitable for real-time, highly concurrent massive computing. Graphical processing units (GPUs) of P series are suitable for deep learning, scientific computing, and CAE. GPUs of G series are suitable for 3D animation rendering and CAD. GPU-accelerated nodes can be added only to clusters of v1.11 or later.
                    • General computing-plus: provides stable performance and exclusive resources to enterprise-class workloads with high and stable computing performance.
                    • Disk-intensive: supports local disk storage and provides high networking performance. It is designed for workloads requiring high throughput and data switching, such as big data workloads.
                    +

                    General-purpose (node specifications: 4 vCPUs and 8 GiB memory)

                    +

                    System Disk

                    +
                    • Common I/O: The backend storage media is SATA disks.
                    • High I/O: The backend storage media is SAS disks.
                    • Ultra-high I/O: The backend storage media is SSD disks.
                    +

                    High I/O

                    +

                    Storage Type

                    +
                    • EVS volumes: Mount an EVS volume to a container path. When containers are migrated, the attached EVS volumes are migrated accordingly. This storage mode is suitable for data that needs to be permanently stored.
                    • SFS volumes: Create SFS volumes and mount them to a container path. The file system volumes created by the underlying SFS service can also be used. SFS volumes are applicable to persistent storage for frequent read/write in multiple workload scenarios, including media processing, content management, big data analysis, and workload analysis.
                    • OBS volumes: Create OBS volumes and mount them to a container path. OBS volumes are applicable to scenarios such as cloud workload, data analysis, content analysis, and hotspot objects.
                    • SFS Turbo volumes: Create SFS Turbo volumes and mount them to a container path. SFS Turbo volumes are fast, on-demand, and scalable, which makes them suitable for DevOps, containerized microservices, and enterprise office applications.
                    +

                    EVS volumes

                    +
                    +
                    +
                    + + diff --git a/docs/cce/umn/cce_bestpractice_0309.html b/docs/cce/umn/cce_bestpractice_0309.html new file mode 100644 index 00000000..794acd71 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0309.html @@ -0,0 +1,25 @@ + + +

                    Migrating Resources Outside a Cluster

                    +

                    If your migration does not involve resources outside a cluster listed in Table 1 or you do not need to use other services to update resources after the migration, skip this section.

                    +

                    Migrating Container Images

                    To ensure that container images can be properly pulled after cluster migration and improve container deployment efficiency, you are advised to migrate private images to SoftWare Repository for Container (SWR). CCE works with SWR to provide a pipeline for automated container delivery. Images are pulled in parallel, which greatly improves container delivery efficiency.

                    +

                    You need to manually migrate container images.

                    +
                    1. Remotely log in to any node in the source cluster and run the docker pull command to pull all images to the local host.
                    2. Log in to the SWR console, click Login Command in the upper right corner of the page, and copy the command.
                    3. Run the copied login command on the node.

                      The message "Login Succeeded" will be displayed upon a successful login.

                      +

                    4. Add tags to all local images.

                      docker tag [Image name 1:tag 1] [Image repository address]/[Organization name]/[Image name 2:tag 2]
                      +
                      • [Image name 1:tag 1]: name and tag of the local image to be pulled.
                      • [Image repository address]: You can query the image repository address on the SWR console.
                      • [Organization name]: Enter the name of the organization you created on the SWR console.
                      • [Image name 2:tag 2]: image name and tag displayed on the SWR console.
                      +

                      Example

                      +
                      docker tag nginx:v1 swr.eu-de.otc.t-systems.com/cloud-develop/mynginx:v1
                      +

                    5. Run the docker push command to upload all local container image files to SWR.

                      docker push [Image repository address]/[Organization name]/[Image name 2:tag 2]
                      +

                      Example

                      +
                      docker push swr.eu-de.otc.t-systems.com/cloud-develop/mynginx:v1
                      +

                    +
                    +

                    Migrating Databases and Storage (On-Demand)

                    You can determine whether to use Relational Database Service (RDS) and Object Storage Service (OBS) based on your production requirements. After the migration is complete, you need to reconfigure the database and storage for applications in the target CCE cluster.

                    +
                    +
                    + + diff --git a/docs/cce/umn/cce_bestpractice_0310.html b/docs/cce/umn/cce_bestpractice_0310.html new file mode 100644 index 00000000..6ed4b549 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0310.html @@ -0,0 +1,133 @@ + + +

                    Installing the Migration Tool

                    +

                    Velero is an open-source backup and migration tool for Kubernetes clusters. It integrates the persistent volume (PV) data backup capability of the Restic tool and can be used to back up Kubernetes resource objects (such as Deployments, jobs, Services, and ConfigMaps) in the source cluster. Data in the PV mounted to the pod is backed up and uploaded to the object storage. When a disaster occurs or migration is required, the target cluster can use Velero to obtain the corresponding backup data from OBS and restore cluster resources as required.

                    +

                    According to Migration Solution, you need to prepare temporary object storage to store backup files before the migration. Velero supports OSB or MinIO as the object storage. OBS requires sufficient storage space for storing backup files. You can estimate the storage space based on your cluster scale and data volume. You are advised to use OBS for backup. For details about how to deploy Velero, see Installing Velero.

                    +

                    Prerequisites

                    • The Kubernetes version of the source on-premises cluster must be 1.10 or later, and the cluster can use DNS and Internet services properly.
                    • If you use OBS to store backup files, you need to obtain the AK/SK of a user who has the right to operate OBS. For details about how to obtain the AK/SK, see Access Keys.
                    • If you use MinIO to store backup files, bind an EIP to the server where MinIO is installed and enable the API and console port of MinIO in the security group.
                    • The target CCE cluster has been created.
                    • The source cluster and target cluster must each have at least one idle node. It is recommended that the node specifications be 4 vCPUs and 8 GB memory or higher.
                    +
                    +

                    Installing MinIO

                    MinIO is an open-source, high-performance object storage tool compatible with the S3 API protocol. If MinIO is used to store backup files for cluster migration, you need a temporary server to deploy MinIO and provide services for external systems. If you use OBS to store backup files, skip this section and go to Installing Velero.

                    +

                    MinIO can be installed in any of the following locations:

                    +
                    • Temporary ECS outside the cluster

                      If the MinIO server is installed outside the cluster, backup files will not be affected when a catastrophic fault occurs in the cluster.

                      +
                    • Idle nodes in the cluster
                      You can remotely log in to a node to install the MinIO server or install MinIO in a container. For details, see the official Velero documentation at https://velero.io/docs/v1.7/contributions/minio/#set-up-server.

                      For example, to install MinIO in a container, run the following command:

                      +
                      • The storage type in the YAML file provided by Velero is emptyDir. You are advised to change the storage type to HostPath or Local. Otherwise, backup files will be permanently lost after the container is restarted.
                      • Ensure that the MinIO service is accessible externally. Otherwise, backup files cannot be downloaded outside the cluster. You can change the Service type to NodePort or use other types of public network access Services.
                      +
                      +
                      +
                    +

                    Regardless of which deployment method is used, the server where MinIO is installed must have sufficient storage space, an EIP must be bound to the server, and the MinIO service port must be enabled in the security group. Otherwise, backup files cannot be uploaded or downloaded.

                    +

                    In this example, MinIO is installed on a temporary ECS outside the cluster.

                    +
                    1. Download MinIO.

                      mkdir /opt/minio
                      +mkdir /opt/miniodata
                      +cd /opt/minio
                      +wget https://dl.minio.io/server/minio/release/linux-amd64/minio
                      +chmod +x minio
                      +

                    2. Set the username and password of MinIO.

                      The username and password set using this method are temporary environment variables and must be reset after the service is restarted. Otherwise, the default root credential minioadmin:minioadmin will be used to create the service.
                      export MINIO_ROOT_USER=minio
                      +export MINIO_ROOT_PASSWORD=minio123
                      +
                      +

                    3. Create a service. In the command, /opt/miniodata/ indicates the local disk path for MinIO to store data.

                      The default API port of MinIO is 9000, and the console port is randomly generated. You can use the --console-address parameter to specify a console port.
                      ./minio server /opt/miniodata/ --console-address ":30840" &
                      +

                      Enable the API and console ports in the firewall and security group on the server where MinIO is to be installed. Otherwise, access to the object bucket will fail.

                      +
                      +
                      +

                    4. Use a browser to access http://{EIP of the node where MinIO resides}:30840. The MinIO console page is displayed.
                    +
                    +

                    Installing Velero

                    Go to the OBS console or MinIO console and create a bucket named velero to store backup files. You can custom the bucket name, which must be used when installing Velero. Otherwise, the bucket cannot be accessed and the backup fails. For details, see 4.

                    +
                    • Velero instances need to be installed and deployed in both the source and target clusters. The installation procedures are the same, which are used for backup and restoration, respectively.
                    • The master node of a CCE cluster does not provide a port for remote login. You can install Velero using kubectl.
                    • If there are a large number of resources to back up, you are advised to adjust the CPU and memory resources of Velero and Restic to 1 vCPU and 1 GB memory or higher. For details, see Backup Tool Resources Are Insufficient.
                    • The object storage bucket for storing backup files must be empty.
                    +
                    +

                    Download the latest, stable binary file from https://github.com/vmware-tanzu/velero/releases. This section uses Velero 1.7.0 as an example. The installation process in the source cluster is the same as that in the target cluster.

                    +
                    1. Download the binary file of Velero 1.7.0.

                      wget https://github.com/vmware-tanzu/velero/releases/download/v1.7.0/velero-v1.7.0-linux-amd64.tar.gz
                      +

                    2. Install the Velero client.

                      tar -xvf velero-v1.7.0-linux-amd64.tar.gz
                      +cp ./velero-v1.7.0-linux-amd64/velero /usr/local/bin
                      +

                    3. Create the access key file credentials-velero for the backup object storage.

                      vim credentials-velero
                      +
                      Replace the AK/SK in the file based on the site requirements. If OBS is used, obtain the AK/SK by referring to . If MinIO is used, the AK and SK are the username and password created in 2.
                      [default]
                      +aws_access_key_id = {AK}
                      +aws_secret_access_key = {SK}
                      +
                      +

                    4. Deploy the Velero server. Change the value of --bucket to the name of the created object storage bucket. In this example, the bucket name is velero. For more information about custom installation parameters, see Customize Velero Install.

                      velero install \
                      +  --provider aws \
                      +  --plugins velero/velero-plugin-for-aws:v1.2.1 \
                      +  --bucket velero \
                      +  --secret-file ./credentials-velero \
                      +  --use-restic \
                      +  --use-volume-snapshots=false \
                      +  --backup-location-config region=eu-de,s3ForcePathStyle="true",s3Url=http://obs.eu-de.otc.t-systems.com
                      + +
                      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                      Table 1 Installation parameters of Velero

                      Parameter

                      +

                      Description

                      +

                      --provider

                      +

                      Vendor who provides the plug-in.

                      +

                      --plugins

                      +

                      API component compatible with AWS S3. Both OBS and MinIO support the S3 protocol.

                      +

                      --bucket

                      +

                      Name of the object storage bucket for storing backup files. The bucket must be created in advance.

                      +

                      --secret-file

                      +

                      Secret file for accessing the object storage, that is, the credentials-velero file created in 3.

                      +

                      --use-restic

                      +

                      Whether to use Restic to support PV data backup. You are advised to enable this function. Otherwise, storage volume resources cannot be backed up.

                      +

                      --use-volume-snapshots

                      +

                      Whether to create the VolumeSnapshotLocation object for PV snapshot, which requires support from the snapshot program. Set this parameter to false.

                      +

                      --backup-location-config

                      +

                      OBS bucket configurations, including region, s3ForcePathStyle, and s3Url.

                      +

                      region

                      +

                      Region to which object storage bucket belongs.

                      +
                      • If OBS is used, set this parameter according to your region, for example, eu-de.
                      • If MinIO is used, set this parameter to minio.
                      +

                      s3ForcePathStyle

                      +

                      The value true indicates that the S3 file path format is used.

                      +

                      s3Url

                      +

                      API access address of the object storage bucket.

                      +
                      • If OBS is used, set this parameter to http://obs.{region}.otc.t-systems.com (region indicates the region where the object storage bucket is located). For example, if the region is eu-de, the parameter value is http://obs.eu-de.otc.t-systems.com.
                      • If MinIO is used, set this parameter to http://{EIP of the node where minio is located}:9000. The value of this parameter is determined based on the IP address and port of the node where MinIO is installed.
                        NOTE:
                        • The access port in s3Url must be set to the API port of MinIO instead of the console port. The default API port of MinIO is 9000.
                        • To access MinIO installed outside the cluster, you need to enter the public IP address of MinIO.
                        +
                        +
                      +
                      +
                      +

                    5. By default, a namespace named velero is created for the Velero instance. Run the following command to view the pod status:

                      $ kubectl get pod -n velero
                      +NAME                   READY   STATUS    RESTARTS   AGE
                      +restic-rn29c           1/1     Running   0          16s
                      +velero-c9ddd56-tkzpk   1/1     Running   0          16s
                      +

                      To prevent memory insufficiency during backup in the actual production environment, you are advised to change the CPU and memory allocated to Restic and Velero by referring to Backup Tool Resources Are Insufficient.

                      +
                      +

                    6. Check the interconnection between Velero and the object storage and ensure that the status is Available.

                      $ velero backup-location get
                      +NAME      PROVIDER   BUCKET/PREFIX   PHASE       LAST VALIDATED                  ACCESS MODE   DEFAULT
                      +default   aws        velero          Available   2021-10-22 15:21:12 +0800 CST   ReadWrite     true
                      +

                    +
                    +
                    + + diff --git a/docs/cce/umn/cce_bestpractice_0311.html b/docs/cce/umn/cce_bestpractice_0311.html new file mode 100644 index 00000000..83ef9bb7 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0311.html @@ -0,0 +1,67 @@ + + +

                    Migrating Resources in a Cluster

                    +

                    Scenario

                    WordPress is used as an example to describe how to migrate an application from an on-premises Kubernetes cluster to a CCE cluster. The WordPress application consists of the WordPress and MySQL components, which are containerized. The two components are bound to two local storage volumes of the Local type respectively and provide external access through the NodePort Service.

                    +

                    Before the migration, use a browser to access the WordPress site, create a site named Migrate to CCE, and publish an article to verify the integrity of PV data after the migration. The article published in WordPress will be stored in the wp_posts table of the MySQL database. If the migration is successful, all contents in the database will be migrated to the new cluster. You can verify the PV data migration based on the migration result.

                    +
                    +

                    Prerequisites

                    • Before the migration, clear the abnormal pod resources in the source cluster. If the pod is in the abnormal state and has a PVC mounted, the PVC is in the pending state after the cluster is migrated.
                    • Ensure that the cluster on the CCE side does not have the same resources as the cluster to be migrated because Velero does not restore the same resources by default.
                    • To ensure that container image images can be properly pulled after cluster migration, migrate the images to SWR.
                    • CCE does not support EVS disks of the ReadWriteMany type. If resources of this type exist in the source cluster, change the storage type to ReadWriteOnce.
                    • Velero integrates the Restic tool to back up and restore storage volumes. Currently, the storage volumes of the HostPath type are not supported. For details, see Restic Restrictions. If you need to back up storage volumes of this type, replace the hostPath volumes with local volumes by referring to Storage Volumes of the HostPath Type Cannot Be Backed Up. If a backup task involves storage of the HostPath type, the storage volumes of this type will be automatically skipped and a warning message will be generated. This will not cause a backup failure.
                    +
                    +

                    Backing Up Applications in the Source Cluster

                    1. (Optional) If you need to back up the data of a specified storage volume in the pod, add an annotation to the pod. The annotation template is as follows:

                      kubectl -n <namespace> annotate <pod/pod_name> backup.velero.io/backup-volumes=<volume_name_1>,<volume_name_2>,...
                      +
                      • <namespace>: namespace where the pod is located.
                      • <pod_name>: pod name.
                      • <volume_name>: name of the persistent volume mounted to the pod. You can run the describe statement to query the pod information. The Volume field indicates the names of all persistent volumes attached to the pod.
                      +

                      Add annotations to the pods of WordPress and MySQL. The pod names are wordpress-758fbf6fc7-s7fsr and mysql-5ffdfbc498-c45lh. As the pods are in the default namespace default, the -n <NAMESPACE> parameter can be omitted.

                      +
                      kubectl annotate pod/wordpress-758fbf6fc7-s7fsr backup.velero.io/backup-volumes=wp-storage
                      +kubectl annotate pod/mysql-5ffdfbc498-c45lh backup.velero.io/backup-volumes=mysql-storage
                      +

                    2. Back up the application. During the backup, you can specify resources based on parameters. If no parameter is added, the entire cluster resources are backed up by default. For details about the parameters, see Resource filtering.

                      • --default-volumes-to-restic: indicates that the Restic tool is used to back up all storage volumes mounted to the pod. Storage volumes of the HostPath type are not supported. If this parameter is not specified, the storage volume specified by annotation in 1 is backed up by default. This parameter is available only when --use-restic is specified during Velero installation.
                        velero backup create <backup-name> --default-volumes-to-restic
                        +
                      • --include-namespaces: backs up resources in a specified namespace.
                        velero backup create <backup-name> --include-namespaces <namespace>
                        +
                      • --include-resources: backs up the specified resources.
                        velero backup create <backup-name> --include-resources deployments
                        +
                      • --selector: backs up resources that match the selector.
                        velero backup create <backup-name> --selector <key>=<value>
                        +
                      +

                      In this section, resources in the namespace default are backed up. wordpress-backup is the backup name. You need to specify the same backup name when restoring applications. Example:

                      +
                      velero backup create wordpress-backup --include-namespaces default --default-volumes-to-restic
                      +

                      If the following information is displayed, the backup task is successfully created:

                      +
                      Backup request "wordpress-backup" submitted successfully. Run `velero backup describe wordpress-backup` or `velero backup logs wordpress-backup` for more details.
                      +

                    3. Check the backup status.

                      velero backup get
                      +
                      Information similar to the following is displayed:
                      NAME               STATUS      ERRORS   WARNINGS   CREATED                         EXPIRES   STORAGE LOCATION   SELECTOR
                      +wordpress-backup   Completed   0        0          2021-10-14 15:32:07 +0800 CST   29d       default            <none>
                      +
                      +

                      In addition, you can go to the object bucket to view the backup files. The backups path is the application resource backup path, and the restic path is the PV data backup path.

                      +

                      +

                    +
                    +

                    Restoring Applications in the Target Cluster

                    The storage infrastructure of an on-premises cluster is different from that of a cloud cluster. After the cluster is migrated, PVs cannot be mounted to pods. Therefore, during the migration, you need to update the storage class of the target cluster to shield the differences of underlying storage interfaces between the two clusters when creating a workload and request storage resources of the corresponding type. For details, see Updating the Storage Class.

                    +
                    1. Use kubectl to connect to the CCE cluster. Create a storage class with the same name as that of the source cluster.

                      In this example, the storage class name of the source cluster is local and the storage type is local disk. Local disks completely depend on the node availability. The data DR performance is poor. When the node is unavailable, the existing storage data is affected. Therefore, EVS volumes are used as storage resources in CCE clusters, and SAS disks are used as backend storage media.

                      +
                      • When an application containing PV data is restored in a CCE cluster, the defined storage class dynamically creates and mounts storage resources (such as EVS volumes) based on the PVC.
                      • The storage resources of the cluster can be changed as required, not limited to EVS volumes. To mount other types of storage, such as file storage and object storage, see Updating the Storage Class.
                      +
                      +

                      YAML file of the migrated cluster:

                      +
                      apiVersion: storage.k8s.io/v1
                      +kind: StorageClass
                      +metadata:
                      +  name: local
                      +provisioner: kubernetes.io/no-provisioner
                      +volumeBindingMode: WaitForFirstConsumer
                      +
                      The following is an example of the YAML file of the migration cluster:
                      allowVolumeExpansion: true
                      +apiVersion: storage.k8s.io/v1
                      +kind: StorageClass
                      +metadata:
                      +  name: local
                      +  selfLink: /apis/storage.k8s.io/v1/storageclasses/csi-disk
                      +parameters:
                      +  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
                      +  csi.storage.k8s.io/fstype: ext4
                      +  everest.io/disk-volume-type: SAS
                      +  everest.io/passthrough: "true"
                      +provisioner: everest-csi-provisioner
                      +reclaimPolicy: Delete
                      +volumeBindingMode: Immediate  
                      +
                      +

                    2. Use the Velero tool to create a restore and specify a backup named wordpress-backup to restore the WordPress application to the CCE cluster.

                      velero restore create --from-backup wordpress-backup
                      +

                      You can run the velero restore get statement to view the application restoration status.

                      +

                    3. After the restoration is complete, check whether the application is running properly. If other adaptation problems may occur, rectify the fault by following the procedure described in Updating Resources Accordingly.
                    +
                    +
                    + + diff --git a/docs/cce/umn/cce_bestpractice_0312.html b/docs/cce/umn/cce_bestpractice_0312.html new file mode 100644 index 00000000..c5971c38 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0312.html @@ -0,0 +1,124 @@ + + +

                    Updating Resources Accordingly

                    +

                    Updating Images

                    The WordPress and MySQL images used in this example can be pulled from SWR. Therefore, the image pull failure (ErrImagePull) will not occur. If the application to be migrated is created from a private image, perform the following steps to update the image:

                    +
                    1. Migrate the image resources to SWR. For details, see Uploading an Image Through a Container Engine Client.
                    2. Log in to the SWR console and obtain the image path used after the migration.

                      The image path is in the following format:

                      +
                      'swr.{Region}.otc.t-systems.com/{Organization name}/{Image name}:{Tag}'
                      +

                    3. Run the following command to modify the workload and replace the image field in the YAML file with the image path:

                      kubectl edit deploy wordpress
                      +

                    4. Check the running status of the workload.
                    +
                    +

                    Updating Services

                    After the cluster is migrated, the Service of the source cluster may fail to take effect. You can perform the following steps to update the Service. If ingresses are configured in the source cluster, you need to connect the new cluster to ELB again after the migration. For details, see Using kubectl to Create an ELB Ingress.

                    +
                    1. Connect to the cluster using kubectl.
                    2. Edit the YAML file of the corresponding Service to change the Service type and port number.

                      kubectl edit svc wordpress
                      +
                      To update load balancer resources, you need to connect to ELB again. Add the annotations by following the procedure described in LoadBalancer.
                      annotations: 
                      +  kubernetes.io/elb.class: union # Shared load balancer
                      +  kubernetes.io/elb.id: 9d06a39d-xxxx-xxxx-xxxx-c204397498a3    # Load balancer ID, which can be queried on the ELB console.
                      +  kubernetes.io/elb.subnet-id: f86ba71c-xxxx-xxxx-xxxx-39c8a7d4bb36    # ID of the cluster where the subnet resides
                      +  kubernetes.io/session-affinity-mode: SOURCE_IP    # Enable the sticky session based on the source IP address.
                      +
                      +

                    3. Use a browser to check whether the Service is available.
                    +
                    +

                    Updating the Storage Class

                    As the storage infrastructures of clusters may be different, storage volumes cannot be mounted to the target cluster. You can use either of the following methods to update the volumes:

                    +

                    Both update methods can be performed only before the application is restored in the target cluster. Otherwise, PV data resources may fail to be restored. In this case, use the Velero to restore applications after the storage class update is complete. For details, see Restoring Applications in the Target Cluster.

                    +
                    +

                    Method 1: Creating a ConfigMap mapping

                    +
                    1. Create a ConfigMap in the CCE cluster and map the storage class used by the source cluster to the default storage class of the CCE cluster.

                      apiVersion: v1
                      +kind: ConfigMap
                      +metadata:
                      +  name: change-storageclass-plugin-config
                      +  namespace: velero
                      +  labels:
                      +    app.kubernetes.io/name: velero
                      +    velero.io/plugin-config: "true"
                      +    velero.io/change-storage-class: RestoreItemAction
                      +data:
                      +  {Storage class name01 in the source cluster}: {Storage class name01 in the target cluster}
                      +  {Storage class name02 in the source cluster}: {Storage class name02 in the target cluster}
                      +

                    2. Run the following command to apply the ConfigMap configuration:

                      $ kubectl create -f change-storage-class.yaml
                      +configmap/change-storageclass-plugin-config created
                      +

                    +

                    +

                    Method 2: Creating a storage class with the same name

                    +
                    1. Run the following command to query the default storage class supported by CCE:

                      kubectl get sc
                      +
                      Information similar to the following is displayed:
                      NAME                PROVISIONER                     RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
                      +csi-disk            everest-csi-provisioner         Delete          Immediate              true                   3d23h
                      +csi-disk-topology   everest-csi-provisioner         Delete          WaitForFirstConsumer   true                   3d23h
                      +csi-nas             everest-csi-provisioner         Delete          Immediate              true                   3d23h
                      +csi-obs             everest-csi-provisioner         Delete          Immediate              false                  3d23h
                      +csi-sfsturbo        everest-csi-provisioner         Delete          Immediate              true                   3d23h
                      +
                      + +
                      + + + + + + + + + + + + + + + + + + + +
                      Table 1 Storage classes

                      Storage Class

                      +

                      Storage Resource

                      +

                      csi-disk

                      +

                      EVS

                      +

                      csi-disk-topology

                      +

                      EVS with delayed binding

                      +

                      csi-nas

                      +

                      SFS

                      +

                      csi-obs

                      +

                      OBS

                      +

                      csi-sfsturbo

                      +

                      SFS Turbo

                      +
                      +
                      +

                    2. Run the following command to export the required storage class details in YAML format:

                      kubectl get sc <storageclass-name> -o=yaml
                      +

                    3. Copy the YAML file and create a new storage class.

                      Change the storage class name to the name used in the source cluster to call basic storage resources of the cloud.

                      +
                      The YAML file of csi-obs is used as an example. Delete the unnecessary information in italic under the metadata field and modify the information in bold. You are advised not to modify other parameters.
                      apiVersion: storage.k8s.io/v1
                      +kind: StorageClass
                      +metadata:
                      +  creationTimestamp: "2021-10-18T06:41:36Z"
                      +  name: <your_storageclass_name>     # Use the name of the storage class used in the source cluster.
                      +  resourceVersion: "747"
                      +  selfLink: /apis/storage.k8s.io/v1/storageclasses/csi-obs
                      +  uid: 4dbbe557-ddd1-4ce8-bb7b-7fa15459aac7
                      +parameters:
                      +  csi.storage.k8s.io/csi-driver-name: obs.csi.everest.io
                      +  csi.storage.k8s.io/fstype: obsfs
                      +  everest.io/obs-volume-type: STANDARD
                      +provisioner: everest-csi-provisioner
                      +reclaimPolicy: Delete
                      +volumeBindingMode: Immediate
                      +
                      +
                      • SFS Turbo file systems cannot be directly created using StorageClass. You need to go to the SFS Turbo console to create SFS Turbo file systems that belong to the same VPC subnet and have inbound ports (111, 445, 2049, 2051, 2052, and 20048) enabled in the security group.
                      • CCE does not support EVS disks of the ReadWriteMany type. If resources of this type exist in the source cluster, change the storage type to ReadWriteOnce.
                      +
                      +

                    4. Restore the cluster application by referring to Restoring Applications in the Target Cluster and check whether the PVC is successfully created.

                      kubectl get pvc
                      +
                      In the command output, the VOLUME column indicates the name of the PV automatically created using the storage class.
                      NAME   STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
                      +pvc    Bound    pvc-4c8e655a-1dbc-4897-ae6c-446b502f5e77   5Gi        RWX            local          13s
                      +
                      +

                    +
                    +

                    Updating Databases

                    In this example, the database is a local MySQL database and does not need to be reconfigured after the migration.

                    +
                    • If the RDS instance is in the same VPC as the CCE cluster, it can be accessed using the private IP address. Otherwise, it can only be accessed only through public networks by binding an EIP. You are advised to use the private network access mode for high security and good RDS performance.
                    • Ensure that the inbound rule of the security group to which RDS belongs has been enabled for the cluster. Otherwise, the connection will fail.
                    +
                    +
                    1. Log in to the RDS console and obtain the private IP address and port number of the DB instance on the Basic Information page.
                    2. Run the following command to modify the WordPress workload:

                      kubectl edit deploy wordpress
                      +

                      Set the environment variables in the env field.

                      +
                      • WORDPRESS_DB_HOST: address and port number used for accessing the database, that is, the internal network address and port number obtained in the previous step.
                      • WORDPRESS_DB_USERU: username for accessing the database.
                      • WORDPRESS_DB_PASSWORD: password for accessing the database.
                      • WORDPRESS_DB_NAME: name of the database to be connected.
                      +

                    3. Check whether the RDS database is properly connected.
                    +
                    +
                    + + diff --git a/docs/cce/umn/cce_bestpractice_0313.html b/docs/cce/umn/cce_bestpractice_0313.html new file mode 100644 index 00000000..d87a1eef --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0313.html @@ -0,0 +1,20 @@ + + +

                    Performing Additional Tasks

                    +

                    Verifying Application Functions

                    Cluster migration involves full migration of application data, which may cause intra-application adaptation problems. In this example, after the cluster is migrated, the redirection link of the article published in WordPress is still the original domain name. If you click the article title, you will be redirected to the application in the source cluster. Therefore, you need to search for the original domain name in WordPress and replace it with the new domain name, change the values of site_url and primary URL in the database. For details, see Changing The Site URL.

                    +

                    Access the new address of the WordPress application. If the article published before the migration is displayed, the data of the persistent volume is successfully restored.

                    +

                    +
                    +

                    Switching Live Traffic to the Target Cluster

                    O&M personnel switch DNS to direct live traffic to the target cluster.

                    +
                    • DNS traffic switching: Adjust the DNS configuration to switch traffic.
                    • Client traffic switching: Upgrade the client code or update the configuration to switch traffic.
                    +
                    +

                    Bringing the Source Cluster Offline

                    After confirming that the service on the target cluster is normal, bring the source cluster offline and delete the backup files.

                    +
                    • Verify that the service on the target cluster is running properly.
                    • Bring the source cluster offline.
                    • Delete backup files.
                    +
                    +
                    + + diff --git a/docs/cce/umn/cce_bestpractice_0314.html b/docs/cce/umn/cce_bestpractice_0314.html new file mode 100644 index 00000000..38593f8e --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0314.html @@ -0,0 +1,74 @@ + + +

                    Troubleshooting

                    +

                    Storage Volumes of the HostPath Type Cannot Be Backed Up

                    Both HostPath and Local volumes are local storage volumes. However, the Restic tool integrated in Velero cannot back up the PVs of the HostPath type and supports only the Local type. Therefore, you need to replace the storage volumes of the HostPath type with the Local type in the source cluster.

                    +

                    It is recommended that Local volumes be used in Kubernetes v1.10 or later and can only be statically created. For details, see local.

                    +
                    +
                    1. Create a storage class for the Local volume.

                      Example YAML:
                      apiVersion: storage.k8s.io/v1
                      +kind: StorageClass
                      +metadata:
                      +  name: local
                      +provisioner: kubernetes.io/no-provisioner
                      +volumeBindingMode: WaitForFirstConsumer
                      +
                      +

                    2. Change the hostPath field to the local field, specify the original local disk path of the host machine, and add the nodeAffinity field.

                      Example YAML:
                      apiVersion: v1
                      +kind: PersistentVolume
                      +metadata:
                      +  name: mysql-pv
                      +  labels: 
                      +    app: mysql
                      +spec:
                      +  accessModes:
                      +  - ReadWriteOnce
                      +  capacity:
                      +    storage: 5Gi
                      +  storageClassName: local     # Storage class created in the previous step
                      +  persistentVolumeReclaimPolicy: Delete
                      +  local:
                      +    path: "/mnt/data"     # Path of the attached local disk
                      +  nodeAffinity:
                      +    required:
                      +      nodeSelectorTerms:
                      +      - matchExpressions:
                      +        - key: kubernetes.io/hostname
                      +          operator: Exists
                      +
                      +

                    3. Run the following commands to verify the creation result:

                      kubectl get pv
                      +

                      Information similar to the following is displayed:

                      +
                      NAME       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM               STORAGECLASS   REASON   AGE
                      +mysql-pv   5Gi        RWO            Delete           Available                       local                   3s
                      +

                    +
                    +

                    Backup Tool Resources Are Insufficient

                    In the production environment, if there are many backup resources, for example, the default resource size of the backup tool is used, the resources may be insufficient. In this case, perform the following steps to adjust the CPU and memory size allocated to the Velero and Restic:

                    +

                    Before installing Velero:

                    +

                    You can specify the size of resources used by Velero and Restic when installing Velero.

                    +

                    The following is an example of installation parameters:

                    +
                    velero install \
                    +   --velero-pod-cpu-request 500m \
                    +   --velero-pod-mem-request 1Gi \
                    +   --velero-pod-cpu-limit 1000m \
                    +   --velero-pod-mem-limit 1Gi \
                    +   --use-restic \
                    +   --restic-pod-cpu-request 500m \
                    +   --restic-pod-mem-request 1Gi \
                    +   --restic-pod-cpu-limit 1000m \
                    +   --restic-pod-mem-limit 1Gi
                    +

                    After Velero is installed:

                    +
                    1. Edit the YAML files of the Velero and Restic workloads in the velero namespace.

                      kubectl edit deploy velero -n velero
                      +kubectl edit deploy restic -n velero
                      +

                    2. Modify the resource size under the resources field. The modification is the same for the Velero and Restic workloads, as shown in the following:

                      resources:
                      +  limits:
                      +    cpu: "1"
                      +    memory: 1Gi
                      +  requests:
                      +    cpu: 500m
                      +    memory: 1Gi
                      +

                    +
                    +
                    + + diff --git a/docs/cce/umn/cce_bestpractice_0315.html b/docs/cce/umn/cce_bestpractice_0315.html new file mode 100644 index 00000000..c4eef920 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0315.html @@ -0,0 +1,25 @@ + + + +

                    Security

                    + +

                    +
                    + + + diff --git a/docs/cce/umn/cce_bestpractice_0317.html b/docs/cce/umn/cce_bestpractice_0317.html new file mode 100644 index 00000000..80c9b6ac --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0317.html @@ -0,0 +1,103 @@ + + +

                    Cluster Security

                    +

                    For security purposes, you are advised to configure a cluster as follows.

                    +

                    Using the CCE Cluster of the Latest Version

                    Kubernetes releases a major version in about four months. CCE follows the same frequency as Kubernetes to release major versions. To be specific, a new CCE version is released about three months after a new Kubernetes version is released in the community. For example, Kubernetes v1.19 was released in September 2020 and CCE v1.19 was released in March 2021.

                    +

                    The latest cluster version has known vulnerabilities fixed or provides a more comprehensive security protection mechanism. You are advised to select the latest cluster version when creating a cluster. Before a cluster version is deprecated and removed, upgrade your cluster to a supported version.

                    +
                    +

                    Disabling the Automatic Token Mounting Function of the Default Service Account

                    By default, Kubernetes associates the default service account with every pod. That is, the token is mounted to a container. The container can use this token to pass the authentication by the kube-apiserver and kubelet components. In a cluster with RBAC disabled, the service account who owns the token has the control permissions for the entire cluster. In a cluster with RBAC enabled, the permissions of the service account who owns the token depends on the roles associated by the administrator. The service account's token is generally used by workloads that need to access kube-apiserver, such as coredns, autoscaler, and prometheus. For workloads that do not need to access kube-apiserver, you are advised to disable the automatic association between the service account and token.

                    +

                    Two methods are available:

                    +
                    • Method 1: Set the automountServiceAccountToken field of the service account to false. After the configuration is complete, newly created workloads will not be associated with the default service account by default. Set this field for each namespace as required.
                      apiVersion: v1 
                      +kind: ServiceAccount 
                      +metadata: 
                      +  name: default 
                      +automountServiceAccountToken: false 
                      +...
                      +

                      When a workload needs to be associated with a service account, explicitly set the automountServiceAccountToken field to true in the YAML file of the workload.

                      +
                      ... 
                      + spec: 
                      +   template: 
                      +     spec: 
                      +       serviceAccountName: default 
                      +       automountServiceAccountToken: true 
                      +       ...
                      +

                      +
                    • Method 2: Explicitly disable the function of automatically associating with service accounts for workloads.
                      ... 
                      + spec: 
                      +   template: 
                      +     spec: 
                      +       automountServiceAccountToken: false 
                      +       ...
                      +
                    +
                    +

                    Configuring Proper Cluster Access Permissions for Users

                    CCE allows you to create multiple IAM users. Your account can create different user groups, assign different access permissions to different user groups, and add users to the user groups with corresponding permissions when creating IAM users. In this way, users can control permissions on different regions and assign read-only permissions. Your account can also assign namespace-level permissions for users or user groups. To ensure security, it is advised that minimum user access permissions are assigned.

                    +

                    If you need to create multiple IAM users, configure the permissions of the IAM users and namespaces properly.

                    +
                    +

                    Configuring Resource Quotas for Cluster Namespaces

                    CCE provides resource quota management, which allows users to limit the total amount of resources that can be allocated to each namespace. These resources include CPU, memory, storage volumes, pods, Services, Deployments, and StatefulSets. Proper configuration can prevent excessive resources created in a namespace from affecting the stability of the entire cluster.

                    +
                    +

                    Configuring LimitRange for Containers in a Namespace

                    With resource quotas, cluster administrators can restrict the use and creation of resources by namespace. In a namespace, a pod or container can use the maximum CPU and memory resources defined by the resource quota of the namespace. In this case, a pod or container may monopolize all available resources in the namespace. You are advised to configure LimitRange to restrict resource allocation within the namespace. The LimitRange parameter has the following restrictions:

                    +
                    • Limits the minimum and maximum resource usage of each pod or container in a namespace.

                      For example, create the maximum and minimum CPU usage limits for a pod in a namespace as follows:

                      +

                      cpu-constraints.yaml

                      +
                      apiVersion: v1
                      +kind: LimitRange
                      +metadata:
                      +  name: cpu-min-max-demo-lr
                      +spec:
                      +  limits:
                      +  - max:
                      +      cpu: "800m"
                      +    min:
                      +      cpu: "200m"
                      +    type: Container
                      +

                      Then, run kubectl -n <namespace> create -f cpu-constraints.yaml to complete the creation. If the default CPU usage is not specified for the container, the platform automatically configures the default CPU usage. That is, the default configuration is automatically added after the container is created.

                      +
                      ...
                      +spec:
                      +  limits:
                      +  - default:
                      +      cpu: 800m
                      +    defaultRequest:
                      +      cpu: 800m
                      +    max:
                      +      cpu: 800m
                      +    min:
                      +      cpu: 200m
                      +    type: Container
                      +
                    • Limits the maximum and minimum storage space that each PersistentVolumeClaim can apply for in a namespace.

                      storagelimit.yaml

                      +
                      apiVersion: v1
                      +kind: LimitRange
                      +metadata:
                      +  name: storagelimit
                      +spec:
                      +  limits:
                      +  - type: PersistentVolumeClaim
                      +    max:
                      +      storage: 2Gi
                      +    min:
                      +      storage: 1Gi
                      +

                      Then, run kubectl -n <namespace> create -f storagelimit.yaml to complete the creation.

                      +
                    +
                    +

                    Configuring Network Isolation in a Cluster

                    • Container tunnel network

                      If networks need to be isolated between namespaces in a cluster or between workloads in the same namespace, you can configure network policies to isolate the networks.

                      +
                    • Cloud Native Network 2.0

                      In the Cloud Native Network 2.0 model, you can configure security groups to isolate networks between pods. For details, see SecurityGroups.

                      +
                    • VPC network

                      Network isolation is not supported.

                      +
                    +
                    +

                    Enabling the Webhook Authentication Mode with kubelet

                    CCE clusters of v1.15.6-r1 or earlier are involved, whereas versions later than v1.15.6-r1 are not.

                    +

                    Upgrade the CCE cluster version to 1.13 or 1.15 and enable the RBAC capability for the cluster. If the version is 1.13 or later, no upgrade is required.

                    +
                    +

                    When creating a node, you can enable the kubelet authentication mode by injecting the postinstall file (by setting the kubelet startup parameter --authorization-node=Webhook).

                    +
                    1. Run the following command to create clusterrolebinding:

                      kubectl create clusterrolebinding kube-apiserver-kubelet-admin --clusterrole=system:kubelet-api-admin --user=system:kube-apiserver

                      +

                    2. For an existing node, log in to the node, change authorization mode in /var/paas/kubernetes/kubelet/kubelet_config.yaml on the node to Webhook, and restart kubelet.

                      sed -i s/AlwaysAllow/Webhook/g /var/paas/kubernetes/kubelet/kubelet_config.yaml; systemctl restart kubelet

                      +

                    3. For a new node, add the following command to the post-installation script to change the kubelet permission mode:

                      sed -i s/AlwaysAllow/Webhook/g /var/paas/kubernetes/kubelet/kubelet_config.yaml; systemctl restart kubelet

                      +

                      +

                    +
                    +

                    Uninstalling web-terminal After Use

                    The web-terminal add-on can be used to manage CCE clusters. Keep the login password secure and uninstall the add-on when it is no longer needed.

                    +
                    +
                    +
                    + +
                    + diff --git a/docs/cce/umn/cce_bestpractice_0318.html b/docs/cce/umn/cce_bestpractice_0318.html new file mode 100644 index 00000000..e01bcf11 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0318.html @@ -0,0 +1,41 @@ + + +

                    Node Security

                    +

                    Preventing Nodes from Being Exposed to Public Networks

                    • Do not bind an EIP to a node unless necessary to reduce the attack surface.
                    • If an EIP must be used, properly configure the firewall or security group rules to restrict access of unnecessary ports and IP addresses.
                    +

                    You may have configured the kubeconfig.json file on a node in your cluster. kubectl can use the certificate and private key in this file to control the entire cluster. You are advised to delete unnecessary files from the /root/.kube directory on the node to prevent malicious use.

                    +

                    rm -rf /root/.kube

                    +
                    +

                    Hardening VPC Security Group Rules

                    CCE is a universal container platform. Its default security group rules apply to common scenarios. Based on security requirements, you can harden the security group rules set for CCE clusters on the Security Groups page of Network Console.

                    +
                    +

                    Hardening Nodes on Demand

                    CCE cluster nodes use the default settings of open source OSs. After a node is created, you need to perform security hardening according to your service requirements.

                    +

                    In CCE, you can perform hardening as follows:

                    +
                    • Use the post-installation script after the node is created. For details, see the description about Post-installation Script in Advanced Settings when creating a node. This script is user-defined.
                    +
                    +

                    Forbidding Containers to Obtain Host Machine Metadata

                    If a single CCE cluster is shared by multiple users to deploy containers, containers cannot access the management address (169.254.169.254) of OpenStack, preventing containers from obtaining metadata of host machines.

                    +

                    For details about how to restore the metadata, see the "Notes" section in Obtaining Metadata.

                    +

                    This solution may affect the password change on the ECS console. Therefore, you must verify the solution before rectifying the fault.

                    +
                    +
                    1. Obtain the network model and container CIDR of the cluster.

                      On the Clusters page of the CCE console, view the network model and container CIDR of the cluster.

                      +

                      +

                    2. Prevent the container from obtaining host metadata.

                      • VPC network
                        1. Log in to each node in the CCE cluster as user root and run the following command:
                          iptables -I OUTPUT -s {container_cidr} -d 169.254.169.254 -j REJECT
                          +

                          {container_cidr} indicates the container CIDR of the cluster, for example, 10.0.0.0/16.

                          +

                          To ensure configuration persistence, you are advised to write the command to the /etc/rc.local script.

                          +
                        2. Run the following commands in the container to access the userdata and metadata interfaces of OpenStack and check whether the request is intercepted:
                          curl 169.254.169.254/openstack/latest/meta_data.json
                          +curl 169.254.169.254/openstack/latest/user_data
                          +
                        +
                      • Container tunnel network
                        1. Log in to each node in the CCE cluster as user root and run the following command:
                          iptables -I FORWARD -s {container_cidr} -d 169.254.169.254 -j REJECT
                          +

                          {container_cidr} indicates the container CIDR of the cluster, for example, 10.0.0.0/16.

                          +

                          To ensure configuration persistence, you are advised to write the command to the /etc/rc.local script.

                          +
                        2. Run the following commands in the container to access the userdata and metadata interfaces of OpenStack and check whether the request is intercepted:
                          curl 169.254.169.254/openstack/latest/meta_data.json
                          +curl 169.254.169.254/openstack/latest/user_data
                          +
                        +
                      +

                    +
                    +
                    +
                    + +
                    + diff --git a/docs/cce/umn/cce_bestpractice_0319.html b/docs/cce/umn/cce_bestpractice_0319.html new file mode 100644 index 00000000..19ae5aa3 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0319.html @@ -0,0 +1,82 @@ + + +

                    Container Security

                    +

                    Controlling the Pod Scheduling Scope

                    The nodeSelector or nodeAffinity is used to limit the range of nodes to which applications can be scheduled, preventing the entire cluster from being threatened due to the exceptions of a single application.

                    +
                    +

                    Suggestions on Container Security Configuration

                    • Set the computing resource limits (request and limit) of a container. This prevents the container from occupying too many resources and affecting the stability of the host and other containers on the same node.
                    • Unless necessary, do not mount sensitive host directories to containers, such as /, /boot, /dev, /etc, /lib, /proc, /sys, and /usr.
                    • Do not run the sshd process in containers unless necessary.
                    • Unless necessary, it is not recommended that containers and hosts share the network namespace.
                    • Unless necessary, it is not recommended that containers and hosts share the process namespace.
                    • Unless necessary, it is not recommended that containers and hosts share the IPC namespace.
                    • Unless necessary, it is not recommended that containers and hosts share the UTS namespace.
                    • Unless necessary, do not mount the sock file of Docker to any container.
                    +
                    +

                    Container Permission Access Control

                    When using a containerized application, comply with the minimum privilege principle and properly set securityContext of Deployments or StatefulSets.

                    +
                    • Configure runAsUser to specify a non-root user to run a container.
                    • Configure privileged to prevent containers being used in scenarios where privilege is not required.
                    • Configure capabilities to accurately control the privileged access permission of containers.
                    • Configure allowPrivilegeEscalation to disable privilege escape in scenarios where privilege escalation is not required for container processes.
                    • Configure seccomp to restrict the container syscalls. For details, see Restrict a Container's Syscalls with seccomp in the official Kubernetes documentation.
                    • Configure ReadOnlyRootFilesystem to protect the root file system of a container.

                      Example YAML for a Deployment:

                      +
                      apiVersion: apps/v1
                      +kind: Deployment
                      +metadata:
                      +  name: security-context-example
                      +  namespace: security-example
                      +spec:
                      +  replicas: 1
                      +  selector:
                      +    matchLabels:
                      +      app: security-context-example
                      +      label: security-context-example
                      +  strategy:
                      +    rollingUpdate:
                      +      maxSurge: 25%
                      +      maxUnavailable: 25%
                      +    type: RollingUpdate
                      +  template:
                      +    metadata:
                      +      annotations:
                      +        seccomp.security.alpha.kubernetes.io/pod: runtime/default
                      +      labels:
                      +        app: security-context-example
                      +        label: security-context-example
                      +    spec:
                      +      containers:
                      +        - image: ...
                      +          imagePullPolicy: Always
                      +          name: security-context-example
                      +          securityContext:
                      +            allowPrivilegeEscalation: false
                      +            readOnlyRootFilesystem: true
                      +            runAsUser: 1000
                      +            capabilities:
                      +              add:
                      +              - NET_BIND_SERVICE
                      +              drop:
                      +              - all		
                      +          volumeMounts:
                      +            - mountPath: /etc/localtime
                      +              name: localtime
                      +              readOnly: true
                      +            - mountPath: /opt/write-file-dir
                      +              name: tmpfs-example-001
                      +      securityContext:
                      +        seccompProfile:
                      +          type: RuntimeDefault
                      +      volumes:
                      +        - hostPath:
                      +            path: /etc/localtime
                      +            type: ""
                      +          name: localtime
                      +        - emptyDir: {}            
                      +          name: tmpfs-example-001 
                      +
                    +
                    +

                    Restricting the Access of Containers to the Management Plane

                    If application containers on a node do not need to access Kubernetes, you can perform the following operations to disable containers from accessing kube-apiserver:

                    +
                    1. Query the container CIDR block and private API server address.

                      On the Clusters page of the CCE console, click the name of the cluster to find the information on the details page.

                      +

                      +

                    2. Log in to each node in the CCE cluster as user root and run the following command:

                      • VPC network:
                        iptables -I OUTPUT -s {container_cidr} -d {Private API server IP} -j REJECT
                        +
                      • Container tunnel network:
                        iptables -I FORWARD -s {container_cidr} -d {Private API server IP} -j REJECT
                        +
                      +

                      {container_cidr} indicates the container network of the cluster, for example, 10.0.0.0/16, and {master_ip} indicates the IP address of the master node.

                      +

                      To ensure configuration persistence, you are advised to write the command to the /etc/rc.local script.

                      +

                    3. Run the following command in the container to access kube-apiserver and check whether the request is intercepted:

                      curl -k https://{Private API server IP}:5443
                      +

                    +
                    +
                    +
                    + +
                    + diff --git a/docs/cce/umn/cce_bestpractice_0320.html b/docs/cce/umn/cce_bestpractice_0320.html new file mode 100644 index 00000000..2734b477 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0320.html @@ -0,0 +1,103 @@ + + +

                    Secret Security

                    +

                    Currently, CCE has configured static encryption for secret resources. The secrets created by users will be encrypted and stored in etcd of the CCE cluster. Secrets can be used in two modes: environment variable and file mounting. No matter which mode is used, CCE still transfers the configured data to users. Therefore, it is recommended that:

                    +
                    1. Do not record sensitive information in logs.
                    2. For the secret that uses the file mounting mode, the default file permission mapped in the container is 0644. Configure stricter permissions for the file. For example:
                      apiversion: v1
                      +kind: Pod
                      +metadata:
                      +  name: mypod
                      +spec:
                      +  containers:
                      +  - name: mypod
                      +    image: redis
                      +    volumeMounts:
                      +    - name: foo
                      +      mountPath: "/etc/foo"
                      +  volumes:
                      +  - name: foo
                      +    secret:
                      +      secretName: mysecret
                      +      defaultMode: 256
                      +

                      In defaultMode: 256, 256 is a decimal number, which corresponds to the octal number 0400.

                      +
                    3. When the file mounting mode is used, configure the secret file name to hide the file in the container.
                      apiVersion: v1
                      +kind: Secret
                      +metadata:
                      +  name: dotfile-secret
                      +data:
                      +  .secret-file: dmFsdWUtMg0KDQo=
                      +---
                      +apiVersion: v1
                      +kind: Pod
                      +metadata;
                      +  name: secret-dotfiles-pod
                      +spec:
                      +  volumes:
                      +  - name: secret-volume
                      +    secret:
                      +      secretName: dotfile-secret
                      +  containers:
                      +  - name: dotfile-test-container
                      +    image: k8s.gcr.io/busybox
                      +    command:
                      +    - ls
                      +    - "-1"
                      +    - "/etc/secret-volume"
                      +    volumeMounts:
                      +    - name: secret-volume
                      +      readOnly: true
                      +      mountPath: "/etc/secret-volume"
                      +

                      In this way, .secret-file cannot be viewed by running the ls -l command in the /etc/secret-volume/ directory, but can be viewed by running the ls -al command.

                      +
                    4. Encrypt sensitive information before creating a secret and decrypt the information when using it.
                    +

                    Using a Bound ServiceAccount Token to Access a Cluster

                    The secret-based ServiceAccount token does not support expiration time or auto update. In addition, after the mounting pod is deleted, the token is still stored in the secret. Token leakage may incur security risks. A bound ServiceAccount token is recommended for CCE clusters of version 1.23 or later. In this mode, the expiration time can be set and is the same as the pod lifecycle, reducing token leakage risks. Example:

                    +
                    apiVersion: apps/v1
                    +kind: Deployment
                    +metadata:
                    +  name: security-token-example
                    +  namespace: security-example
                    +spec:
                    +  replicas: 1
                    +  selector:
                    +    matchLabels:
                    +      app: security-token-example
                    +      label: security-token-example
                    +  template:
                    +    metadata:
                    +      annotations:
                    +        seccomp.security.alpha.kubernetes.io/pod: runtime/default
                    +      labels:
                    +        app: security-token-example
                    +        label: security-token-example
                    +    spec:
                    +      serviceAccountName: test-sa
                    +      containers:
                    +        - image: ...
                    +          imagePullPolicy: Always
                    +          name: security-token-example
                    +      volumes:
                    +        - name: test-projected
                    +          projected:
                    +            defaultMode: 420
                    +            sources:
                    +              - serviceAccountToken:
                    +                  expirationSeconds: 1800
                    +                  path: token
                    +              - configMap:
                    +                  items:
                    +                    - key: ca.crt
                    +                      path: ca.crt
                    +                  name: kube-root-ca.crt
                    +              - downwardAPI:
                    +                  items:
                    +                    - fieldRef:
                    +                        apiVersion: v1
                    +                        fieldPath: metadata.namespace
                    +                      path: namespace
                    +

                    For details, visit https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/.

                    +
                    +
                    +
                    + +
                    + diff --git a/docs/cce/umn/cce_bestpractice_0322.html b/docs/cce/umn/cce_bestpractice_0322.html new file mode 100644 index 00000000..3fac4ff9 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0322.html @@ -0,0 +1,19 @@ + + + +

                    DevOps

                    + +

                    +
                    + + + diff --git a/docs/cce/umn/cce_bestpractice_0323.html b/docs/cce/umn/cce_bestpractice_0323.html new file mode 100644 index 00000000..ecb2381c --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0323.html @@ -0,0 +1,19 @@ + + + +

                    Disaster Recovery

                    + +

                    +
                    + + + diff --git a/docs/cce/umn/cce_bestpractice_0324.html b/docs/cce/umn/cce_bestpractice_0324.html new file mode 100644 index 00000000..2bbfa67a --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0324.html @@ -0,0 +1,109 @@ + + +

                    Interconnecting GitLab with SWR and CCE for CI/CD

                    +

                    +

                    Challenges

                    GitLab is an open-source version management system developed with Ruby on Rails for Git project repository management. It supports web-based access to public and private projects. Similar to GitHub, GitLab allows you to browse source code, manage bugs and comments, and control team member access to repositories. You will find it very easy to view committed versions and file history database. Team members can communicate with each other using the built-in chat program (Wall).

                    +

                    GitLab provides powerful CI/CD functions and is widely used in software development.

                    +
                    Figure 1 GitLab CI/CD process
                    +

                    This section describes how to interconnect GitLab with SWR and CCE for CI/CD.

                    +
                    +

                    Preparations

                    1. Create a CCE cluster and a node and bind an EIP to the node for downloading an image during GitLab Runner installation.
                    2. Download and configure kubectl to connect to the cluster.
                    3. Install Helm 3.
                    +
                    +

                    Installing GitLab Runner

                    Log in to GitLab, choose Settings > CI/CD in the project view, click Expand next to Runners, and search for the GitLab Runner registration URL and token.

                    +

                    +

                    Create the values.yaml file and fill in the following information:

                    +
                    # Registration URL
                    +gitlabUrl: https://gitlab.com/
                    +# Registration token
                    +runnerRegistrationToken: "GR13489411dKVzmTyaywEDTF_1QXb"
                    +rbac:
                    +    create: true
                    +runners:
                    +    privileged: true
                    +

                    Create a GitLab namespace.

                    +
                    kubectl create namespace gitlab
                    +

                    Install GitLab Runner using Helm.

                    +
                    helm repo add gitlab https://charts.gitlab.io 
                    +helm install --namespace gitlab gitlab-runner -f values.yaml gitlab/gitlab-runner
                    +

                    After the installation, you can query the workload of gitlab-runner on the CCE console and view the connection information in GitLab later.

                    +

                    +
                    +

                    Creating an Application

                    Place the application to be created in the GitLab project repository. This section takes Nginx modification as an example. For details, visit https://gitlab.com/c8147/cidemo/-/tree/main.

                    +

                    The following files are included:

                    +
                    • .gitlab-ci.yml: Gitlab CI file, which will be described in detail in Creating a Pipeline.
                    • Dockerfile: used to build Docker images.
                    • index.html: used to replace the index page of Nginx.
                    • k8s.yaml: used to deploy the Nginx app. A Deployment named nginx-test and a Service named nginx-test will be created.
                    +

                    The preceding files are only examples. You can replace or modify them accordingly.

                    +
                    +

                    Configuring Global Variables

                    When using pipelines, you need to build an image, upload it to SWR, and run kubectl commands to deploy the image in the cluster. Before performing these operations, you must log in to SWR and obtain the credential for connecting to the cluster. You can define the information as variables in GitLab.

                    +

                    Log in to GitLab, choose Settings > CI/CD in the project view, and click Expand next to Variables to add variables.

                    +

                    +
                    • kube_config

                      kubeconfig.json file used for kubectl command authentication. Run the following command on the host where kubectl is configured to convert the file to the Base64 format:

                      +

                      echo $(cat ~/.kube/config | base64) | tr -d " "

                      +

                      The command output is the content of kubeconfig.json.

                      +
                    • project: project name.

                      Log in to the management console, click your username in the upper right corner, and click My Credentials. In the Projects area on the API Credentials page, check the name of the project in your current region.

                      +
                    • swr_ak: access key.

                      Log in to the management console, click your username in the upper right corner, and click My Credentials. In the navigation pane on the left, choose Access Keys. Click Create Access Key, enter the description, and click OK. In the displayed Information dialog box, click Download. After the certificate is downloaded, obtain the AK and SK information from the credentials file.

                      +
                    • swr_sk: secret key for logging in to SWR.

                      Run the following command to obtain the key pair. Replace $AK and $SK with the AK and SK obtained in the preceding steps.

                      +

                      printf "$AK" | openssl dgst -binary -sha256 -hmac "$SK" | od -An -vtx1 | sed 's/[ \n]//g' | sed 'N;s/\n//'

                      +

                      The command output displays the login key pair.

                      +
                    +
                    +

                    Creating a Pipeline

                    Log in to Gitlab and add the .gitlab-ci.yml file to Repository.

                    +

                    +

                    The content is as follows:

                    +
                    # Define pipeline stages, including package, build, and deploy.
                    +stages:
                    +  - package  
                    +  - build
                    +  - deploy
                    +# If no image is specified in each stage, the default image docker:latest is used.
                    +image: docker:latest
                    +# In the package stage, only printing is performed.
                    +package:
                    +  stage: package
                    +  script:
                    +    - echo "package"
                    +# In the build stage, the Docker-in-Docker mode is used.
                    +build:
                    +  stage: build
                    +  # Define environment variables for the build stage.
                    +  variables:
                    +    DOCKER_HOST: tcp://docker:2375
                    +  # Define the image for running Docker-in-Docker.
                    +  services:
                    +    - docker:18.09-dind
                    +  script:
                    +    - echo "build"
                    +    # Log in to SWR.
                    +    - docker login -u $project@$swr_ak -p $swr_sk swr.eu-de.otc.t-systems.com
                    +    # Build an image. k8s-dev is the organization name in SWR. Replace it to the actual name.
                    +    - docker build -t swr.eu-de.otc.t-systems.com/k8s-dev/nginx:$CI_PIPELINE_ID .
                    +    # Push the image to SWR.
                    +    - docker push swr.eu-de.otc.t-systems.com/k8s-dev/nginx:$CI_PIPELINE_ID
                    +deploy:
                    +  # Use the kubectl image.
                    +  image: 
                    +    name: bitnami/kubectl:latest
                    +    entrypoint: [""]
                    +  stage: deploy
                    +  script:
                    +    # Configure the kubeconfig file.
                    +    - echo $kube_config |base64 -d > $KUBECONFIG
                    +    # Replace the image in the k8s.yaml file.
                    +    - sed -i "s/<IMAGE_NAME>/swr.eu-de.otc.t-systems.com\/k8s-dev\/nginx:$CI_PIPELINE_ID/g" k8s.yaml
                    +    - cat k8s.yaml
                    +    # Deploy an application.
                    +    - kubectl apply -f k8s.yaml
                    +

                    After the .gitlab-ci.yml file is saved, the pipeline is started immediately. You can view the pipeline execution status in GitLab.

                    +

                    +
                    +

                    Verifying Deployment

                    After the pipeline is deployed, locate the nginx-test Service on the CCE console, query its access address, and run the curl command to access the Service.

                    +
                    # curl xxx.xxx.xxx.xxx:31111
                    +Hello Gitlab!
                    +

                    If the preceding information is displayed, the deployment is correct.

                    +
                    +
                    +
                    + +
                    + diff --git a/docs/cce/umn/cce_bestpractice_0325.html b/docs/cce/umn/cce_bestpractice_0325.html new file mode 100644 index 00000000..506ca265 --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0325.html @@ -0,0 +1,50 @@ + + +

                    Configuring Core Dumps

                    +

                    Challenges

                    Linux allows you to create a core dump file if an application crashes, which contains the data the application had in memory at the time of the crash. You can analyze the file to locate the fault.

                    +

                    Generally, when a service application crashes, its container exits and is reclaimed and destroyed. Therefore, container core files need to be permanently stored on the host or cloud storage. This topic describes how to configure container core dumps.

                    +
                    +

                    Enabling Core Dump on a Node

                    Log in to the node, run the following command to enable core dump, and set the path and format for storing core files:

                    +

                    echo "/tmp/cores/core.%h.%e.%p.%t" > /proc/sys/kernel/core_pattern

                    +

                    Parameters:

                    +
                    • %h: host name (or pod name). You are advised to configure this parameter.
                    • %e: program file name. You are advised to configure this parameter.
                    • %p: (optional) process ID.
                    • %t: (optional) time of the core dump.
                    +

                    You can also configure a pre-installation or post-installation script to automatically run this command when creating a node.

                    +
                    +

                    Permanently Storing Core Dumps

                    A core file can be stored in your host (using a hostPath volume) or cloud storage (using a PVC). The following is an example YAML file for using a hostPath volume.
                    apiVersion: v1
                    +kind: Pod
                    +metadata:
                    +  name: coredump
                    +spec:
                    +  volumes:
                    +  - name: coredump-path
                    +    hostPath:
                    +      path: /home/coredump
                    +  containers:
                    +  - name: ubuntu
                    +    image: ubuntu:12.04
                    +    command: ["/bin/sleep","3600"]
                    +    volumeMounts:
                    +    - mountPath: /tmp/cores
                    +      name: coredump-path
                    +
                    +

                    Create a pod using kubectl.

                    +

                    kubectl create -f pod.yaml

                    +
                    +

                    Verification

                    After the pod is created, access the container and trigger a segmentation fault of the current shell terminal.

                    +
                    $ kubectl get pod
                    +NAME                          READY   STATUS    RESTARTS   AGE
                    +coredump                      1/1     Running   0          56s
                    +$ kubectl exec -it coredump -- /bin/bash
                    +root@coredump:/# kill -s SIGSEGV $$
                    +command terminated with exit code 139
                    +

                    Log in to the node and check whether a core file is generated in the /home/coredump directory. The following example indicates that a core file is generated.

                    +
                    # ls /home/coredump
                    +core.coredump.bash.18.1650438992
                    +
                    +
                    +
                    + +
                    + diff --git a/docs/cce/umn/cce_01_0236.html b/docs/cce/umn/cce_bulletin_0000.html similarity index 79% rename from docs/cce/umn/cce_01_0236.html rename to docs/cce/umn/cce_bulletin_0000.html index 68750f5e..003398f0 100644 --- a/docs/cce/umn/cce_01_0236.html +++ b/docs/cce/umn/cce_bulletin_0000.html @@ -1,4 +1,4 @@ - +

                    Product Bulletin

                    @@ -14,6 +14,8 @@
                  6. + diff --git a/docs/cce/umn/cce_bulletin_0003.html b/docs/cce/umn/cce_bulletin_0003.html index 504707ee..7711c70e 100644 --- a/docs/cce/umn/cce_bulletin_0003.html +++ b/docs/cce/umn/cce_bulletin_0003.html @@ -1,29 +1,29 @@

                    Kubernetes Version Support Mechanism

                    -

                    This section describes the Kubernetes version support mechanism of CCE.

                    -

                    Cluster Version Description

                    Version number: The format is x.y.z-r{n}, where x.y is the major version and z is the minor version. If the version number is followed by -r{n}, the version is a patch version, for example, v1.15.11-r1.

                    -

                    -

                    Starting from Kubernetes 1.21, CCE only displays the major version number, for example, v1.21.

                    -
                    -

                    Offline: After a version is brought offline, a cluster of this version cannot be created on the CCE console and no new features will be released for the clusters of this version.

                    -

                    Disuse: After a version is disused, CCE will no longer provide technical support for the version, including supporting new features, backporting Kubernetes bug fixes, fixing vulnerabilities, and upgrading to new versions.

                    +

                    This section explains versioning in CCE, and the policies for Kubernetes version support.

                    +

                    Version Description

                    Version number: The format is x.y.z, where x.y is the major version and z is the minor version. If the version number is followed by -r, the version is a patch version, for example, v1.15.6-r1.

                    +

                    +
                    +

                    Version Requirements

                    Offline: After a version is brought offline, a cluster of this version cannot be created on the CCE console and no new features will be released for the clusters of this version.

                    +

                    Obsolete: CCE will no longer provide support for this version, including release of new functions, community bug fixes, vulnerability management, and upgrade.

                    +
                    +

                    CCE releases only odd major Kubernetes versions, such as v1.25, v1.23, and v1.21. The specific version support policies in different scenarios are as follows:

                    +
                    • Cluster creation

                      CCE allows you to create clusters of two latest major Kubernetes versions, for example, v1.25 and v1.23. When v1.25 is commercially available, support for earlier versions (such as v1.21) will be removed. In this case, you will not be able to create clusters of v1.21 on the CCE console.

                      +
                    • Cluster maintenance

                      CCE maintains clusters of four major Kubernetes versions at most, such as v1.25, v1.23, v1.21, and v1.19. For example, after v1.25 is commercially available, support for v1.17 will be removed.

                      +

                      +
                    • Cluster upgrade
                      CCE allows you to upgrade clusters of three major versions at the same time. Clusters of 1.19 and later versions can be upgraded skipping one major version at most (for example, from 1.19 directly to 1.23). Each version is maintained for one year. For example, after v1.25 is available, support for earlier versions (such as v1.17) will be removed. You are advised to upgrade your Kubernetes cluster before the maintenance period ends.
                      • Cluster version upgrade: After the latest major version (for example, v1.25) is available, CCE allows you to upgrade clusters to the last stable version of the second-latest major version, for example, v1.23. For details, see Upgrade Overview.
                      • Cluster patch upgrade: For existing clusters running on the live network, if there are major Kubernetes issues or vulnerabilities, CCE will perform the patch upgrade on these clusters in the background. Users are unaware of the patch upgrade. If the patch upgrade has adverse impact on user services, CCE will release a notice one week in advance.
                      -

                      Lifecycle

                      CCE releases only odd major Kubernetes versions, such as v1.23, v1.21, v1.19, and v1.17. The specific version support policies in different scenarios are as follows:

                      -
                      • Cluster creation

                        CCE provides you with two major Kubernetes versions of clusters, for example, v1.23 and v1.21. For example, after v1.23 is brought online for commercial use, v1.19 is brought offline synchronously. In this case, the cluster of this version cannot be created on the console.

                        -
                      • Cluster maintenance

                        CCE maintains four major Kubernetes versions, such as v1.17, v1.19, v1.21 and v1.23. For example, when v1.23 is put into commercial use, earlier versions such as v1.15 will be disused.

                        -

                        -
                      • Cluster upgrade

                        CCE supports the upgrade of three major Kubernetes versions, such as v1.21, v1.19, and v1.17. For example, after v1.23 is put into commercial use, clusters of versions earlier than v1.17, such as v1.15, cannot be upgraded any more.

                      -

                      Version Release Cycle

                      CCE follows the Kubernetes community to release major versions. To be specific, a new CCE version is released about six months after a new Kubernetes version is released in the community.

                      +

                      Version Release Cycle

                      Kubernetes releases a major version in about four months. CCE will provide support to mirror the new Kubernetes version in about seven months after the version release.

                      -

                      Version Constraints

                      After a cluster is upgraded, it cannot be rolled back to the source version.

                      +

                      Version Constraints

                      After a cluster is upgraded, it cannot be rolled back to the source version.

                      diff --git a/docs/cce/umn/cce_bulletin_0011.html b/docs/cce/umn/cce_bulletin_0011.html new file mode 100644 index 00000000..cc58bb59 --- /dev/null +++ b/docs/cce/umn/cce_bulletin_0011.html @@ -0,0 +1,18 @@ + + +

                      Vulnerability Fixing Policies

                      +

                      Cluster Vulnerability Fixing SLA

                      • High-risk vulnerabilities:
                        • CCE fixes vulnerabilities as soon as possible after the Kubernetes community detects them and releases fixing solutions. The fixing policies are the same as those of the community.
                        • Emergency vulnerabilities of the operating system are released according to the operating system fixing policies and procedure. Generally, after a fixing solution is provided, you need to fix the vulnerabilities by yourself.
                        +
                      • Other vulnerabilities:

                        Other vulnerabilities can be fixed through a normal upgrade.

                        +
                      +
                      +

                      Fixing Statement

                      To prevent customers from being exposed to unexpected risks, CCE does not provide other information about the vulnerability except the vulnerability background, details, technical analysis, affected functions/versions/scenarios, solutions, and reference information.

                      +

                      In addition, CCE provides the same information for all customers to protect all customers equally. CCE will not notify individual customers in advance.

                      +

                      CCE does not develop or release exploitable intrusive code (or code for verification) using the vulnerabilities in the product.

                      +
                      +
                      +
                      + +
                      + diff --git a/docs/cce/umn/cce_bulletin_0054.html b/docs/cce/umn/cce_bulletin_0054.html index 2c7cd78a..bff99bbd 100644 --- a/docs/cce/umn/cce_bulletin_0054.html +++ b/docs/cce/umn/cce_bulletin_0054.html @@ -72,7 +72,7 @@
                      diff --git a/docs/cce/umn/cce_bulletin_0068.html b/docs/cce/umn/cce_bulletin_0068.html index 195990bb..1be6f35e 100644 --- a/docs/cce/umn/cce_bulletin_0068.html +++ b/docs/cce/umn/cce_bulletin_0068.html @@ -2,16 +2,33 @@

                      CCE Cluster Version Release Notes

                      To ensure that stable and reliable Kubernetes versions are available during your use of CCE, CCE provides the Kubernetes version support mechanism. A new supported version will be released every half a year with a support period of one year. You must upgrade your Kubernetes clusters before the support period ends.

                      -

                      V1.23

                      -
                      Table 1 Feature description of clusters of v1.23

                      Kubernetes Version

                      +

                      V1.25

                      +
                      - - - + + +
                      Table 1 Feature description of clusters of v1.25

                      Kubernetes Version

                      Description

                      +

                      Description

                      v1.23

                      +

                      v1.25

                      Main features:

                      +

                      Main features:

                      +
                      • Incorporates features of Kubernetes v1.25.
                      • PodSecurityPolicy is replaced by Pod Security Admission.
                      • The LegacyServiceAccountTokenNoAutoGeneration feature is in beta state. By default, this feature is enabled and no more secret token will be automatically generated for the service account. If you want to use a token that never expires, you need to create a secret and mount it. For details, see Service account token Secrets.
                      +
                      +
                      +
                      +

                      V1.23

                      +
                      + + + + + @@ -20,15 +37,15 @@

                      V1.21

                      -
                      Table 2 Feature description of clusters of v1.23

                      Kubernetes Version

                      +

                      Description

                      +

                      v1.23

                      +

                      Main features:

                      • Incorporates features of Kubernetes v1.23, the relevant Docker engine is version 18.09 and the operating system version is EulerOS 2.5, EulerOS 2.9 or Centos7.7.
                      • Egress rules can be configured when adding a Network Policy.
                      Table 2 Feature description of clusters of v1.21

                      Kubernetes Version

                      +
                      - - - @@ -37,21 +54,21 @@

                      V1.19

                      -
                      Table 3 Feature description of clusters of v1.21

                      Kubernetes Version

                      Description

                      +

                      Description

                      v1.21

                      +

                      v1.21

                      Main features:

                      +

                      Main features:

                      • Incorporates features of Kubernetes v1.21.
                      • Two-way authentication is supported for domain name access.
                      • The Docker storage mode of nodes running CentOS 7 in CCE clusters is changed from Device Mapper to OverlayFS.
                      Table 3 Feature description of clusters of v1.19

                      Kubernetes Version

                      +
                      - - - - - @@ -61,15 +78,15 @@

                      V1.17

                      -
                      Table 4 Feature description of clusters of v1.19

                      Kubernetes Version

                      Description

                      +

                      Description

                      v1.19.10

                      +

                      v1.19.10

                      Main features:

                      +

                      Main features:

                      • EulerOS 2.5 and CentOS 7.7 are supported.
                      • Incorporates features of Kubernetes v1.19.10.

                      v1.19.8

                      +

                      v1.19.8

                      Main features:

                      +

                      Main features:

                      • EulerOS 2.5 and CentOS 7.7 are supported.
                      • Features of Kubernetes v1.19.8 are incorporated.
                      • Pod security policies (PSPs) can be configured for a cluster.
                      Table 4 Feature description of clusters of v1.17

                      Kubernetes

                      +
                      - - - @@ -78,22 +95,22 @@

                      V1.15

                      -
                      Table 5 Feature description of clusters of v1.17

                      Kubernetes

                      Description

                      +

                      Description

                      v1.17.9-r0

                      +

                      v1.17.9-r0

                      Main features:

                      +

                      Main features:

                      • Support for EulerOS 2.5 and CentOS 7.7.
                      • Incorporates features of Kubernetes v1.17.9.
                      Table 5 Feature description of clusters of v1.15

                      Kubernetes

                      +
                      - - - - - @@ -101,15 +118,15 @@

                      V1.13

                      -
                      Table 6 Feature description of clusters of v1.15

                      Kubernetes

                      Description

                      +

                      Description

                      v1.15.11-r1

                      +

                      v1.15.11-r1

                      Main features:

                      +

                      Main features:

                      • EulerOS 2.5 is supported.
                      • Incorporates features of Kubernetes v1.15.11.

                      v1.15.6-r1

                      +

                      v1.15.6-r1

                      Main features:

                      - +

                      Main features:

                      +
                      Table 6 Feature description of clusters of v1.13

                      Kubernetes (CCE Enhanced Version)

                      +
                      - - - @@ -118,21 +135,21 @@

                      V1.11 and Earlier Versions

                      -
                      Table 7 Feature description of clusters of v1.13

                      Kubernetes (CCE Enhanced Version)

                      Description

                      +

                      Description

                      v1.13.10-r0

                      +

                      v1.13.10-r0

                      Main features:

                      +

                      Main features:

                      • Support for EulerOS 2.5.
                      • Support for GPU-accelerated.
                      • Incorporates features of Kubernetes v1.13.10.
                      Table 7 Feature description of clusters of v1.11 or earlier

                      Kubernetes (CCE Enhanced Version)

                      +
                      - - - - - @@ -143,7 +160,7 @@
                      diff --git a/docs/cce/umn/cce_bulletin_0169.html b/docs/cce/umn/cce_bulletin_0169.html index 4c6ad41e..a8403e12 100644 --- a/docs/cce/umn/cce_bulletin_0169.html +++ b/docs/cce/umn/cce_bulletin_0169.html @@ -4,6 +4,8 @@
                      diff --git a/docs/cce/umn/cce_bulletin_0301.html b/docs/cce/umn/cce_bulletin_0301.html index 97b45bf4..28021127 100644 --- a/docs/cce/umn/cce_bulletin_0301.html +++ b/docs/cce/umn/cce_bulletin_0301.html @@ -1,7 +1,7 @@

                      OS Patch Notes for Cluster Nodes

                      -

                      Nodes in Hybrid Clusters

                      CCE nodes in Hybrid clusters can run on EulerOS 2.2, EulerOS 2.5, EulerOS 2.9 and CentOS 7.7. The following table lists the supported patches for these OSs.

                      +

                      Nodes in Hybrid Clusters

                      CCE nodes in Hybrid clusters can run on EulerOS 2.5, EulerOS 2.9 and CentOS 7.7. The following table lists the supported patches for these OSs.

                      Table 8 Feature description of clusters of v1.11 or earlier

                      Kubernetes (CCE Enhanced Version)

                      Description

                      +

                      Description

                      v1.11.7-r2

                      +

                      v1.11.7-r2

                      Main features:

                      +

                      Main features:

                      • Support for GPU-accelerated.
                      • Support for EulerOS 2.2.
                      • Incorporates features of Kubernetes v1.11.7.

                      v1.9.10-r2

                      +

                      v1.9.10-r2

                      Main features:

                      +

                      Main features:

                      Incorporates features of Kubernetes v1.9.10.

                      @@ -9,24 +9,19 @@ - - - - - - - @@ -37,7 +32,7 @@
                      diff --git a/docs/cce/umn/cce_faq_00006.html b/docs/cce/umn/cce_faq_00006.html deleted file mode 100644 index 2fda550b..00000000 --- a/docs/cce/umn/cce_faq_00006.html +++ /dev/null @@ -1,180 +0,0 @@ - - -

                      Checklist for Migrating Containerized Applications to the Cloud

                      -

                      Overview

                      Cloud Container Engine (CCE) provides highly scalable, high-performance, enterprise-class Kubernetes clusters and supports Docker containers. With CCE, you can easily deploy, manage, and scale out containerized applications.

                      -

                      This checklist describes the system availability, data reliability, and O&M reliability of migrating containerized applications to the cloud. It contains check items, impact, FAQs, and examples, helping you migrate services to CCE and avoid application exceptions or cluster reconstruction caused by improper use.

                      -
                      -

                      Check Items

                      -
                      Table 1 Node OS patches

                      OS

                      EulerOS release 2.0 (SP2)

                      +

                      EulerOS release 2.0 (SP5)

                      3.10.0-327.62.59.83.h128.x86_64

                      -

                      EulerOS release 2.0 (SP5)

                      -

                      3.10.0-862.14.1.5.h591.eulerosv2r7.x86_64

                      +

                      3.10.0-862.14.1.5.h687.eulerosv2r7.x86_64

                      EulerOS release 2.0 (SP9)

                      4.18.0-147.5.1.6.h541.eulerosv2r9.x86_64

                      +

                      4.18.0-147.5.1.6.h766.eulerosv2r9.x86_64

                      CentOS Linux release 7.7

                      3.10.0-1062.18.1.el7.x86_64

                      +

                      3.10.0-1160.76.1.el7.x86_64

                      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                      Table 1 System availability

                      Category

                      -

                      Check Item

                      -

                      Type

                      -

                      Impact

                      -

                      Cluster

                      -

                      When creating a cluster, set High Availability to Yes.

                      -

                      Reliability

                      -

                      A cluster with High Availability set to No is a non-HA cluster with only one master. If the master node is faulty, the entire cluster will be unavailable. Therefore, you are advised to create an HA cluster in the production environment.

                      -

                      Before creating a cluster, determine the container network model that is suitable to the service scenario.

                      -

                      Network planning

                      -

                      Different container network models apply to different scenarios.

                      -

                      Before creating a cluster, plan the subnet CIDR block and container network CIDR block properly.

                      -

                      Network planning

                      -

                      If the range of the subnet and container network CIDR blocks is not properly set, the number of available nodes in the cluster will be less than the number of nodes supported by the cluster. Network planning has different constraints on different container network models.

                      -

                      Before creating a cluster, properly plan CIDR blocks for the related Direct Connect, peering connection, container network, service network, and subnet to avoid IP address conflicts.

                      -

                      Network planning

                      -

                      If CIDR blocks are not properly set and IP address conflicts occur, service access will be affected.

                      -

                      Workload

                      -

                      When creating a workload, set the upper and lower limits of CPU and memory resources.

                      -

                      Deployment

                      -

                      If the upper and lower resource limits are not set for an application, a resource leak of this application will make resources unavailable for other applications deployed on the same node. In addition, applications that do not have upper and lower resource limits cannot be accurately monitored.

                      -

                      When creating an application, set the number of pods to more than two and set the scheduling policy based on service requirements.

                      -

                      Reliability

                      -

                      A single-pod application will be faulty if the node or pod is faulty.

                      -

                      Properly set affinity and anti-affinity.

                      -

                      Reliability

                      -

                      If affinity and anti-affinity are both configured for an application that provides Services externally, Services may fail to be accessed after the application is upgraded or restarted.

                      -

                      When creating a workload, set the health check policy, that is, set the workload liveness probe and the readiness probe.

                      -

                      Reliability

                      -

                      If the two probes are not set, pods cannot detect service exceptions or automatically restart the service to restore it. This results in a situation where the pod status is normal but the service in the pod is abnormal.

                      -

                      When creating a workload, set the pre-stop processing command (Lifecycle > Pre-Stop) to ensure that the services running in the pods can be completed in advance in the case of application upgrade or pod deletion.

                      -

                      Reliability

                      -

                      If the pre-stop processing command is not configured, the pod will be directly killed and services will be interrupted during application upgrade.

                      -

                      When creating a Service, select an access mode based on service requirements. Currently, the following types of access modes are supported: intra-cluster access, intra-VPC access, and external access.

                      -

                      Deployment

                      -

                      If the access mode is not properly set, internal and external access may be in disorder and resources may be wasted.

                      -
                      -
                      - -
                      - - - - - - - - - - - - - - - - -
                      Table 2 Data reliability

                      Category

                      -

                      Check Item

                      -

                      Type

                      -

                      Impact

                      -

                      Container data persistency

                      -

                      Store application data in the cloud, rather than on a local disk.

                      -

                      Reliability

                      -

                      If a node is faulty and cannot be restored, data on the local disk cannot be restored.

                      -

                      Backup

                      -

                      Back up application data.

                      -

                      Reliability

                      -

                      Data cannot be restored after being lost.

                      -
                      -
                      - -
                      - - - - - - - - - - - - - - - - - - - - - - - - -
                      Table 3 O&M reliability

                      Category

                      -

                      Check Item

                      -

                      Type

                      -

                      Impact

                      -

                      Project

                      -

                      The quotas of ECS, VPC, subnet, EIP, and EVS resources must meet customer requirements.

                      -

                      Deployment

                      -

                      If the quota is insufficient, resources will fail to be created. Specifically, users who have configured automatic capacity expansion must have sufficient resource quotas.

                      -

                      Do not install private software or modify OS configurations on a cluster node.

                      -

                      Deployment

                      -

                      If private software is installed on a cluster node or OS configurations are modified, exceptions may occur on Kubernetes components on the node, making it unavailable for application deployment.

                      -

                      Do not modify information about resources created by CCE, such as security groups and EVS disks. Resources created by CCE are labeled cce.

                      -

                      Deployment

                      -

                      CCE cluster functions may be abnormal.

                      -

                      Proactive O&M

                      -

                      Configure alarm monitoring on AOM for the applications you deployed in CCE clusters.

                      -

                      Monitoring

                      -

                      If alarm monitoring is not configured, you cannot receive alarms when applications are faulty and need to manually locate the faults.

                      -
                      -
                      - - -
                      - -
                      - diff --git a/docs/cce/umn/cce_faq_0083.html b/docs/cce/umn/cce_faq_0083.html index 04b96a4c..da3bc80c 100644 --- a/docs/cce/umn/cce_faq_0083.html +++ b/docs/cce/umn/cce_faq_0083.html @@ -4,8 +4,6 @@
                      diff --git a/docs/cce/umn/cce_qs_0001.html b/docs/cce/umn/cce_qs_0001.html index 0a6f992d..9cd4c9ee 100644 --- a/docs/cce/umn/cce_qs_0001.html +++ b/docs/cce/umn/cce_qs_0001.html @@ -4,21 +4,21 @@

                      This document provides instructions for getting started with the Cloud Container Engine (CCE).

                      Procedure

                      Complete the following tasks to get started with CCE.

                      Figure 1 Procedure for getting started with CCE
                      -
                      1. Authorize an IAM user to use CCE.

                        The accounts have the permission to use CCE. However, IAM users created by the accounts do not have the permission. You need to manually assign the permission to IAM users.

                        +
                        1. Charts (Helm)Authorize an IAM user to use CCE.

                          The accounts have the permission to use CCE. However, IAM users created by the accounts do not have the permission. You need to manually assign the permission to IAM users.

                        2. Create a cluster.

                          For details on how to create a regular Kubernetes cluster, see Creating a CCE Cluster.

                        3. Create a workload from images or a chart.

                          Select existing images/chart, or create new images/chart.

                          -
                          • For details on how to create a workload from images, see Workloads.
                          • For details on how to create a workload from a chart, see Charts (Helm).
                          -

                        4. View workload status and logs. Upgrade, scale, and monitor the workload.

                          For details, see Managing Workloads and Jobs.

                          +
                          • For details on how to create a workload from images, see Workloads.
                          • For details on how to create a workload from a chart, see Charts (Helm).
                          +

                        5. View workload status and logs. Upgrade, scale, and monitor the workload.

                          For details, see Managing Workloads and Jobs.

                      FAQs

                      1. Is CCE suitable for users who are not familiar with Kubernetes?

                        Yes. The CCE console is easy-to-use.

                        -
                      2. Is CCE suitable for users who have little experience in building images?

                        Yes. You can select images from Third-party Images, and Shared Images pages on the CCE console. The My Images page displays only the images created by you. For details, see Workloads.

                        +
                      3. Is CCE suitable for users who have little experience in building images?

                        Yes. You can select images from Third-party Images, and Shared Images pages on the CCE console. The My Images page displays only the images created by you. For details, see Workloads.

                      4. How do I create a workload using CCE?

                        Create a cluster and then create a workload in the cluster.

                        -
                      5. How do I create a workload accessible to public networks?

                        CCE provides different types of Services for workload access in diverse scenarios. Currently, CCE provides two access types to expose a workload to public networks: NodePort and LoadBalancer. For details, see Networking.

                        +
                      6. How do I create a workload accessible to public networks?

                        CCE provides different types of Services for workload access in diverse scenarios. Currently, CCE provides two access types to expose a workload to public networks: NodePort and LoadBalancer. For details, see Networking.

                      7. How can I allow multiple workloads in the same cluster to access each other?

                        Select the access type ClusterIP, which allows workloads in the same cluster to use their cluster-internal domain names to access each other.

                        Cluster-internal domain names are in the format of <self-defined service name>.<workload's namespace>.svc.cluster.local:<port number>. For example, nginx.default.svc.cluster.local:80.

                        Example:

                        -

                        Assume that workload A needs to access workload B in the same cluster. Then, you can create a ClusterIP Service for workload B. After the ClusterIP Service is created, workload B is reachable at <self-defined service name>.<workload B's namespace>.svc.cluster.local:<port number>.

                        +

                        Assume that workload A needs to access workload B in the same cluster. Then, you can create a ClusterIP Service for workload B. After the ClusterIP Service is created, workload B is reachable at <self-defined service name>.<workload B's namespace>.svc.cluster.local:<port number>.

                      diff --git a/docs/cce/umn/en-us_image_0000001223473845.png b/docs/cce/umn/en-us_image_0000001082048529.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001223473845.png rename to docs/cce/umn/en-us_image_0000001082048529.png diff --git a/docs/cce/umn/en-us_image_0000001088110417.png b/docs/cce/umn/en-us_image_0000001088110417.png deleted file mode 100644 index b138cac6..00000000 Binary files a/docs/cce/umn/en-us_image_0000001088110417.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001113962636.png b/docs/cce/umn/en-us_image_0000001113962636.png new file mode 100644 index 00000000..7c35ae08 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001113962636.png differ diff --git a/docs/cce/umn/en-us_image_0000001126243447.png b/docs/cce/umn/en-us_image_0000001126243447.png deleted file mode 100644 index 19d17bee..00000000 Binary files a/docs/cce/umn/en-us_image_0000001126243447.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001144208440.png b/docs/cce/umn/en-us_image_0000001144208440.png deleted file mode 100644 index f6280ad0..00000000 Binary files a/docs/cce/umn/en-us_image_0000001144208440.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001144342236.png b/docs/cce/umn/en-us_image_0000001144342236.png deleted file mode 100644 index 07d911c9..00000000 Binary files a/docs/cce/umn/en-us_image_0000001144342236.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001144342238.png b/docs/cce/umn/en-us_image_0000001144342238.png deleted file mode 100644 index b8cd51de..00000000 Binary files a/docs/cce/umn/en-us_image_0000001144342238.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001144578756.png b/docs/cce/umn/en-us_image_0000001144578756.png deleted file mode 100644 index 4eda4beb..00000000 Binary files a/docs/cce/umn/en-us_image_0000001144578756.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001144738550.png b/docs/cce/umn/en-us_image_0000001144738550.png deleted file mode 100644 index 9a9b4449..00000000 Binary files a/docs/cce/umn/en-us_image_0000001144738550.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001144779784.png b/docs/cce/umn/en-us_image_0000001144779784.png deleted file mode 100644 index a4bbbd03..00000000 Binary files a/docs/cce/umn/en-us_image_0000001144779784.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001145535931.png b/docs/cce/umn/en-us_image_0000001145545261.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001145535931.png rename to docs/cce/umn/en-us_image_0000001145545261.png diff --git a/docs/cce/umn/en-us_image_0000001148989534.png b/docs/cce/umn/en-us_image_0000001148989534.png deleted file mode 100644 index f64927d6..00000000 Binary files a/docs/cce/umn/en-us_image_0000001148989534.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001150420952.png b/docs/cce/umn/en-us_image_0000001150420952.png deleted file mode 100644 index 9bca8f60..00000000 Binary files a/docs/cce/umn/en-us_image_0000001150420952.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001152953258.png b/docs/cce/umn/en-us_image_0000001152953258.png deleted file mode 100644 index b19a8248..00000000 Binary files a/docs/cce/umn/en-us_image_0000001152953258.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001160642447.png b/docs/cce/umn/en-us_image_0000001160642447.png new file mode 100644 index 00000000..1be0aa0a Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001160642447.png differ diff --git a/docs/cce/umn/en-us_image_0000001160731158.png b/docs/cce/umn/en-us_image_0000001160731158.png deleted file mode 100644 index 3e03cd3d..00000000 Binary files a/docs/cce/umn/en-us_image_0000001160731158.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001172076961.png b/docs/cce/umn/en-us_image_0000001172076961.png deleted file mode 100644 index 378328aa..00000000 Binary files a/docs/cce/umn/en-us_image_0000001172076961.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001172392670.png b/docs/cce/umn/en-us_image_0000001172392670.png new file mode 100644 index 00000000..05cb59e8 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001172392670.png differ diff --git a/docs/cce/umn/en-us_image_0000001176255102.png b/docs/cce/umn/en-us_image_0000001176255102.png deleted file mode 100644 index c32d9be4..00000000 Binary files a/docs/cce/umn/en-us_image_0000001176255102.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001176818150.png b/docs/cce/umn/en-us_image_0000001176818150.png new file mode 100644 index 00000000..efdf9ff7 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001176818150.png differ diff --git a/docs/cce/umn/en-us_image_0000001178034114.png b/docs/cce/umn/en-us_image_0000001178034114.png deleted file mode 100644 index d7b05530..00000000 Binary files a/docs/cce/umn/en-us_image_0000001178034114.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001178034116.png b/docs/cce/umn/en-us_image_0000001178034116.png deleted file mode 100644 index e6a687b9..00000000 Binary files a/docs/cce/umn/en-us_image_0000001178034116.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001178192666.png b/docs/cce/umn/en-us_image_0000001178192666.png deleted file mode 100644 index d000a3fd..00000000 Binary files a/docs/cce/umn/en-us_image_0000001178192666.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001178352604.png b/docs/cce/umn/en-us_image_0000001178352604.png deleted file mode 100644 index 5bfbc6ca..00000000 Binary files a/docs/cce/umn/en-us_image_0000001178352604.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001190048341.png b/docs/cce/umn/en-us_image_0000001190048341.png deleted file mode 100644 index 57a44dd1..00000000 Binary files a/docs/cce/umn/en-us_image_0000001190048341.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001190168507.png b/docs/cce/umn/en-us_image_0000001190168507.png deleted file mode 100644 index 5b3a3e05..00000000 Binary files a/docs/cce/umn/en-us_image_0000001190168507.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001190302085.png b/docs/cce/umn/en-us_image_0000001190302085.png deleted file mode 100644 index dce5dbc8..00000000 Binary files a/docs/cce/umn/en-us_image_0000001190302085.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001190302087.png b/docs/cce/umn/en-us_image_0000001190302087.png deleted file mode 100644 index d70fb1ab..00000000 Binary files a/docs/cce/umn/en-us_image_0000001190302087.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001190302089.png b/docs/cce/umn/en-us_image_0000001190302089.png deleted file mode 100644 index 42c594e5..00000000 Binary files a/docs/cce/umn/en-us_image_0000001190302089.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001190302091.png b/docs/cce/umn/en-us_image_0000001190302091.png deleted file mode 100644 index cbdee420..00000000 Binary files a/docs/cce/umn/en-us_image_0000001190302091.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001190302095.png b/docs/cce/umn/en-us_image_0000001190302095.png deleted file mode 100644 index 4be70fad..00000000 Binary files a/docs/cce/umn/en-us_image_0000001190302095.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001190302097.png b/docs/cce/umn/en-us_image_0000001190302097.png deleted file mode 100644 index 689fd9c4..00000000 Binary files a/docs/cce/umn/en-us_image_0000001190302097.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001190538605.png b/docs/cce/umn/en-us_image_0000001190538605.png deleted file mode 100644 index 4d6df5d3..00000000 Binary files a/docs/cce/umn/en-us_image_0000001190538605.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001190658439.png b/docs/cce/umn/en-us_image_0000001190658439.png deleted file mode 100644 index 7df14a7b..00000000 Binary files a/docs/cce/umn/en-us_image_0000001190658439.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001192028618.png b/docs/cce/umn/en-us_image_0000001192028618.png new file mode 100644 index 00000000..67590f40 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001192028618.png differ diff --git a/docs/cce/umn/en-us_image_0000001195057213.png b/docs/cce/umn/en-us_image_0000001195057213.png deleted file mode 100644 index c2958d95..00000000 Binary files a/docs/cce/umn/en-us_image_0000001195057213.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001198867835.png b/docs/cce/umn/en-us_image_0000001198867835.png deleted file mode 100644 index 101fab41..00000000 Binary files a/docs/cce/umn/en-us_image_0000001198867835.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001198980979.png b/docs/cce/umn/en-us_image_0000001198980979.png deleted file mode 100644 index 34322425..00000000 Binary files a/docs/cce/umn/en-us_image_0000001198980979.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001180446397.png b/docs/cce/umn/en-us_image_0000001199021278.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001180446397.png rename to docs/cce/umn/en-us_image_0000001199021278.png diff --git a/docs/cce/umn/en-us_image_0000001098645539.png b/docs/cce/umn/en-us_image_0000001199021298.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001098645539.png rename to docs/cce/umn/en-us_image_0000001199021298.png diff --git a/docs/cce/umn/en-us_image_0186273271.png b/docs/cce/umn/en-us_image_0000001199021308.png similarity index 100% rename from docs/cce/umn/en-us_image_0186273271.png rename to docs/cce/umn/en-us_image_0000001199021308.png diff --git a/docs/cce/umn/en-us_image_0000001243407853.png b/docs/cce/umn/en-us_image_0000001199021320.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001243407853.png rename to docs/cce/umn/en-us_image_0000001199021320.png diff --git a/docs/cce/umn/en-us_image_0000001093275701.png b/docs/cce/umn/en-us_image_0000001199021334.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001093275701.png rename to docs/cce/umn/en-us_image_0000001199021334.png diff --git a/docs/cce/umn/en-us_image_0000001190859184.png b/docs/cce/umn/en-us_image_0000001199181228.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001190859184.png rename to docs/cce/umn/en-us_image_0000001199181228.png diff --git a/docs/cce/umn/en-us_image_0000001192723194.png b/docs/cce/umn/en-us_image_0000001199181230.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001192723194.png rename to docs/cce/umn/en-us_image_0000001199181230.png diff --git a/docs/cce/umn/en-us_image_0000001409700093.png b/docs/cce/umn/en-us_image_0000001199181232.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001409700093.png rename to docs/cce/umn/en-us_image_0000001199181232.png diff --git a/docs/cce/umn/en-us_image_0000001168537057.png b/docs/cce/umn/en-us_image_0000001199181266.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001168537057.png rename to docs/cce/umn/en-us_image_0000001199181266.png diff --git a/docs/cce/umn/en-us_image_0000001190538599.png b/docs/cce/umn/en-us_image_0000001199181298.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001190538599.png rename to docs/cce/umn/en-us_image_0000001199181298.png diff --git a/docs/cce/umn/en-us_image_0000001159292060.png b/docs/cce/umn/en-us_image_0000001199181334.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001159292060.png rename to docs/cce/umn/en-us_image_0000001199181334.png diff --git a/docs/cce/umn/en-us_image_0000001231949185.png b/docs/cce/umn/en-us_image_0000001199181336.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001231949185.png rename to docs/cce/umn/en-us_image_0000001199181336.png diff --git a/docs/cce/umn/en-us_image_0000001116237931.png b/docs/cce/umn/en-us_image_0000001199181338.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001116237931.png rename to docs/cce/umn/en-us_image_0000001199181338.png diff --git a/docs/cce/umn/en-us_image_0295359661.png b/docs/cce/umn/en-us_image_0000001199181340.png similarity index 100% rename from docs/cce/umn/en-us_image_0295359661.png rename to docs/cce/umn/en-us_image_0000001199181340.png diff --git a/docs/cce/umn/en-us_image_0144049227.png b/docs/cce/umn/en-us_image_0000001199341250.png similarity index 100% rename from docs/cce/umn/en-us_image_0144049227.png rename to docs/cce/umn/en-us_image_0000001199341250.png diff --git a/docs/cce/umn/en-us_image_0000001199341268.png b/docs/cce/umn/en-us_image_0000001199341268.png new file mode 100644 index 00000000..672ccf8d Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001199341268.png differ diff --git a/docs/cce/umn/en-us_image_0000001199341330.png b/docs/cce/umn/en-us_image_0000001199341330.png new file mode 100644 index 00000000..5ddb8e3e Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001199341330.png differ diff --git a/docs/cce/umn/en-us_image_0000001192723190.png b/docs/cce/umn/en-us_image_0000001199501200.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001192723190.png rename to docs/cce/umn/en-us_image_0000001199501200.png diff --git a/docs/cce/umn/en-us_image_0000001163847995.png b/docs/cce/umn/en-us_image_0000001199501230.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001163847995.png rename to docs/cce/umn/en-us_image_0000001199501230.png diff --git a/docs/cce/umn/en-us_image_0000001199501262.png b/docs/cce/umn/en-us_image_0000001199501262.png new file mode 100644 index 00000000..e3a75afd Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001199501262.png differ diff --git a/docs/cce/umn/en-us_image_0000001409860177.png b/docs/cce/umn/en-us_image_0000001199501276.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001409860177.png rename to docs/cce/umn/en-us_image_0000001199501276.png diff --git a/docs/cce/umn/en-us_image_0000001199848585.png b/docs/cce/umn/en-us_image_0000001199501290.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001199848585.png rename to docs/cce/umn/en-us_image_0000001199501290.png diff --git a/docs/cce/umn/en-us_image_0000001199757520.png b/docs/cce/umn/en-us_image_0000001199757520.png new file mode 100644 index 00000000..0987110f Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001199757520.png differ diff --git a/docs/cce/umn/en-us_image_0258503428.png b/docs/cce/umn/en-us_image_0000001201381906.png similarity index 100% rename from docs/cce/umn/en-us_image_0258503428.png rename to docs/cce/umn/en-us_image_0000001201381906.png diff --git a/docs/cce/umn/en-us_image_0276664213.png b/docs/cce/umn/en-us_image_0000001201823500.png similarity index 100% rename from docs/cce/umn/en-us_image_0276664213.png rename to docs/cce/umn/en-us_image_0000001201823500.png diff --git a/docs/cce/umn/en-us_image_0000001202101148.png b/docs/cce/umn/en-us_image_0000001202101148.png new file mode 100644 index 00000000..42fccb7b Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001202101148.png differ diff --git a/docs/cce/umn/en-us_image_0276664792.png b/docs/cce/umn/en-us_image_0000001202103502.png similarity index 100% rename from docs/cce/umn/en-us_image_0276664792.png rename to docs/cce/umn/en-us_image_0000001202103502.png diff --git a/docs/cce/umn/en-us_image_0000001203031716.png b/docs/cce/umn/en-us_image_0000001203031716.png new file mode 100644 index 00000000..7cdfc47d Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001203031716.png differ diff --git a/docs/cce/umn/en-us_image_0000001359820608.png b/docs/cce/umn/en-us_image_0000001203385342.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001359820608.png rename to docs/cce/umn/en-us_image_0000001203385342.png diff --git a/docs/cce/umn/en-us_image_0000001204449561.png b/docs/cce/umn/en-us_image_0000001204449561.png deleted file mode 100644 index bf526e22..00000000 Binary files a/docs/cce/umn/en-us_image_0000001204449561.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001205757902.png b/docs/cce/umn/en-us_image_0000001205757902.png new file mode 100644 index 00000000..36a1a482 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001205757902.png differ diff --git a/docs/cce/umn/en-us_image_0000001206876656.png b/docs/cce/umn/en-us_image_0000001206876656.png new file mode 100644 index 00000000..303124ea Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001206876656.png differ diff --git a/docs/cce/umn/en-us_image_0249778542.png b/docs/cce/umn/en-us_image_0000001206959574.png similarity index 100% rename from docs/cce/umn/en-us_image_0249778542.png rename to docs/cce/umn/en-us_image_0000001206959574.png diff --git a/docs/cce/umn/en-us_image_0000001207511384.png b/docs/cce/umn/en-us_image_0000001207511384.png new file mode 100644 index 00000000..5a0e7609 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001207511384.png differ diff --git a/docs/cce/umn/en-us_image_0000001217183707.png b/docs/cce/umn/en-us_image_0000001217183707.png new file mode 100644 index 00000000..aa7b279e Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001217183707.png differ diff --git a/docs/cce/umn/en-us_image_0000001218074121.png b/docs/cce/umn/en-us_image_0000001218074121.png new file mode 100644 index 00000000..97a22d83 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001218074121.png differ diff --git a/docs/cce/umn/en-us_image_0000001221007635.png b/docs/cce/umn/en-us_image_0000001221007635.png deleted file mode 100644 index 29071310..00000000 Binary files a/docs/cce/umn/en-us_image_0000001221007635.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001221376671.png b/docs/cce/umn/en-us_image_0000001221376671.png deleted file mode 100644 index ba5937d9..00000000 Binary files a/docs/cce/umn/en-us_image_0000001221376671.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001221501677.png b/docs/cce/umn/en-us_image_0000001221501677.png new file mode 100644 index 00000000..f1403c1d Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001221501677.png differ diff --git a/docs/cce/umn/en-us_image_0000001221820189.png b/docs/cce/umn/en-us_image_0000001221820189.png new file mode 100644 index 00000000..2af028bd Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001221820189.png differ diff --git a/docs/cce/umn/en-us_image_0000001222591781.png b/docs/cce/umn/en-us_image_0000001222591781.png deleted file mode 100644 index 866483a2..00000000 Binary files a/docs/cce/umn/en-us_image_0000001222591781.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001223152415.png b/docs/cce/umn/en-us_image_0000001223152415.png deleted file mode 100644 index a0a3b169..00000000 Binary files a/docs/cce/umn/en-us_image_0000001223152415.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001223152417.png b/docs/cce/umn/en-us_image_0000001223152417.png deleted file mode 100644 index 64a42b7c..00000000 Binary files a/docs/cce/umn/en-us_image_0000001223152417.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001223393893.png b/docs/cce/umn/en-us_image_0000001223393893.png deleted file mode 100644 index 3f5e34ea..00000000 Binary files a/docs/cce/umn/en-us_image_0000001223393893.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001225747980.png b/docs/cce/umn/en-us_image_0000001225747980.png new file mode 100644 index 00000000..e36aa545 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001225747980.png differ diff --git a/docs/cce/umn/en-us_image_0000001226818003.png b/docs/cce/umn/en-us_image_0000001226818003.png new file mode 100644 index 00000000..c586bbf4 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001226818003.png differ diff --git a/docs/cce/umn/en-us_image_0000001227977765.png b/docs/cce/umn/en-us_image_0000001227977765.png deleted file mode 100644 index 10acfee9..00000000 Binary files a/docs/cce/umn/en-us_image_0000001227977765.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001229793402.png b/docs/cce/umn/en-us_image_0000001229793402.png deleted file mode 100644 index 794961c8..00000000 Binary files a/docs/cce/umn/en-us_image_0000001229793402.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001229794946.png b/docs/cce/umn/en-us_image_0000001229794946.png deleted file mode 100644 index ca497cf7..00000000 Binary files a/docs/cce/umn/en-us_image_0000001229794946.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001236263298.png b/docs/cce/umn/en-us_image_0000001236263298.png deleted file mode 100644 index 405ed945..00000000 Binary files a/docs/cce/umn/en-us_image_0000001236263298.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001236562704.png b/docs/cce/umn/en-us_image_0000001236562704.png new file mode 100644 index 00000000..d8f82666 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001236562704.png differ diff --git a/docs/cce/umn/en-us_image_0000001236582394.png b/docs/cce/umn/en-us_image_0000001236582394.png deleted file mode 100644 index 0e0debd1..00000000 Binary files a/docs/cce/umn/en-us_image_0000001236582394.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001236723668.png b/docs/cce/umn/en-us_image_0000001236723668.png new file mode 100644 index 00000000..4f4611d9 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001236723668.png differ diff --git a/docs/cce/umn/en-us_image_0000001238163131.png b/docs/cce/umn/en-us_image_0000001238163131.png deleted file mode 100644 index edccd543..00000000 Binary files a/docs/cce/umn/en-us_image_0000001238163131.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001238489436.png b/docs/cce/umn/en-us_image_0000001238489436.png new file mode 100644 index 00000000..22b800ae Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001238489436.png differ diff --git a/docs/cce/umn/en-us_image_0000001238830246.png b/docs/cce/umn/en-us_image_0000001238830246.png new file mode 100644 index 00000000..9d1f0355 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001238830246.png differ diff --git a/docs/cce/umn/en-us_image_0000001238903330.png b/docs/cce/umn/en-us_image_0000001238903330.png new file mode 100644 index 00000000..82ffe7e7 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001238903330.png differ diff --git a/docs/cce/umn/en-us_image_0000001238003081.png b/docs/cce/umn/en-us_image_0000001243981115.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001238003081.png rename to docs/cce/umn/en-us_image_0000001243981115.png diff --git a/docs/cce/umn/en-us_image_0000001117575950.png b/docs/cce/umn/en-us_image_0000001243981117.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001117575950.png rename to docs/cce/umn/en-us_image_0000001243981117.png diff --git a/docs/cce/umn/en-us_image_0000001144779790.png b/docs/cce/umn/en-us_image_0000001243981141.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001144779790.png rename to docs/cce/umn/en-us_image_0000001243981141.png diff --git a/docs/cce/umn/en-us_image_0000001098403383.png b/docs/cce/umn/en-us_image_0000001243981147.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001098403383.png rename to docs/cce/umn/en-us_image_0000001243981147.png diff --git a/docs/cce/umn/en-us_image_0000001243981177.png b/docs/cce/umn/en-us_image_0000001243981177.png new file mode 100644 index 00000000..d1794e79 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001243981177.png differ diff --git a/docs/cce/umn/en-us_image_0276664171.png b/docs/cce/umn/en-us_image_0000001243981181.png similarity index 100% rename from docs/cce/umn/en-us_image_0276664171.png rename to docs/cce/umn/en-us_image_0000001243981181.png diff --git a/docs/cce/umn/en-us_image_0000001144502022.png b/docs/cce/umn/en-us_image_0000001243981203.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001144502022.png rename to docs/cce/umn/en-us_image_0000001243981203.png diff --git a/docs/cce/umn/en-us_image_0000001086743939.png b/docs/cce/umn/en-us_image_0000001244101107.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001086743939.png rename to docs/cce/umn/en-us_image_0000001244101107.png diff --git a/docs/cce/umn/en-us_image_0000001134406294.png b/docs/cce/umn/en-us_image_0000001244101121.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001134406294.png rename to docs/cce/umn/en-us_image_0000001244101121.png diff --git a/docs/cce/umn/en-us_image_0000001159118361.png b/docs/cce/umn/en-us_image_0000001244101223.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001159118361.png rename to docs/cce/umn/en-us_image_0000001244101223.png diff --git a/docs/cce/umn/en-us_image_0000001244141105.png b/docs/cce/umn/en-us_image_0000001244141105.png new file mode 100644 index 00000000..8567ed8e Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001244141105.png differ diff --git a/docs/cce/umn/en-us_image_0000001144620002.png b/docs/cce/umn/en-us_image_0000001244141139.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001144620002.png rename to docs/cce/umn/en-us_image_0000001244141139.png diff --git a/docs/cce/umn/en-us_image_0144054048.gif b/docs/cce/umn/en-us_image_0000001244141141.gif similarity index 100% rename from docs/cce/umn/en-us_image_0144054048.gif rename to docs/cce/umn/en-us_image_0000001244141141.gif diff --git a/docs/cce/umn/en-us_image_0000001163928763.png b/docs/cce/umn/en-us_image_0000001244141181.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001163928763.png rename to docs/cce/umn/en-us_image_0000001244141181.png diff --git a/docs/cce/umn/en-us_image_0000001409580465.png b/docs/cce/umn/en-us_image_0000001244141191.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001409580465.png rename to docs/cce/umn/en-us_image_0000001244141191.png diff --git a/docs/cce/umn/en-us_image_0000001198861255.png b/docs/cce/umn/en-us_image_0000001244141217.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001198861255.png rename to docs/cce/umn/en-us_image_0000001244141217.png diff --git a/docs/cce/umn/en-us_image_0000001244261055.png b/docs/cce/umn/en-us_image_0000001244261055.png new file mode 100644 index 00000000..25122445 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001244261055.png differ diff --git a/docs/cce/umn/en-us_image_0000001360140132.png b/docs/cce/umn/en-us_image_0000001244261069.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001360140132.png rename to docs/cce/umn/en-us_image_0000001244261069.png diff --git a/docs/cce/umn/en-us_image_0000001142984374.png b/docs/cce/umn/en-us_image_0000001244261071.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001142984374.png rename to docs/cce/umn/en-us_image_0000001244261071.png diff --git a/docs/cce/umn/en-us_image_0000001120226646.png b/docs/cce/umn/en-us_image_0000001244261073.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001120226646.png rename to docs/cce/umn/en-us_image_0000001244261073.png diff --git a/docs/cce/umn/en-us_image_0254985211.png b/docs/cce/umn/en-us_image_0000001244261103.png similarity index 100% rename from docs/cce/umn/en-us_image_0254985211.png rename to docs/cce/umn/en-us_image_0000001244261103.png diff --git a/docs/cce/umn/en-us_image_0000001144342232.png b/docs/cce/umn/en-us_image_0000001244261119.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001144342232.png rename to docs/cce/umn/en-us_image_0000001244261119.png diff --git a/docs/cce/umn/en-us_image_0254986677.png b/docs/cce/umn/en-us_image_0000001244261161.png similarity index 100% rename from docs/cce/umn/en-us_image_0254986677.png rename to docs/cce/umn/en-us_image_0000001244261161.png diff --git a/docs/cce/umn/en-us_image_0000001244261167.png b/docs/cce/umn/en-us_image_0000001244261167.png new file mode 100644 index 00000000..f1ed744e Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001244261167.png differ diff --git a/docs/cce/umn/en-us_image_0000001160748146.png b/docs/cce/umn/en-us_image_0000001244261169.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001160748146.png rename to docs/cce/umn/en-us_image_0000001244261169.png diff --git a/docs/cce/umn/en-us_image_0000001159831938.png b/docs/cce/umn/en-us_image_0000001244261171.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001159831938.png rename to docs/cce/umn/en-us_image_0000001244261171.png diff --git a/docs/cce/umn/en-us_image_0000001153101092.png b/docs/cce/umn/en-us_image_0000001244261173.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001153101092.png rename to docs/cce/umn/en-us_image_0000001244261173.png diff --git a/docs/cce/umn/en-us_image_0000001244997085.png b/docs/cce/umn/en-us_image_0000001244997085.png new file mode 100644 index 00000000..ff8087ba Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001244997085.png differ diff --git a/docs/cce/umn/en-us_image_0000001247802971.png b/docs/cce/umn/en-us_image_0000001247802971.png new file mode 100644 index 00000000..fcdc7768 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001247802971.png differ diff --git a/docs/cce/umn/en-us_image_0276664178.png b/docs/cce/umn/en-us_image_0000001248663503.png similarity index 100% rename from docs/cce/umn/en-us_image_0276664178.png rename to docs/cce/umn/en-us_image_0000001248663503.png diff --git a/docs/cce/umn/en-us_image_0276664570.png b/docs/cce/umn/en-us_image_0000001249023453.png similarity index 100% rename from docs/cce/umn/en-us_image_0276664570.png rename to docs/cce/umn/en-us_image_0000001249023453.png diff --git a/docs/cce/umn/en-us_image_0000001249073211.png b/docs/cce/umn/en-us_image_0000001249073211.png new file mode 100644 index 00000000..87446d84 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001249073211.png differ diff --git a/docs/cce/umn/en-us_image_0000001249958645.png b/docs/cce/umn/en-us_image_0000001249958645.png new file mode 100644 index 00000000..36a1a482 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001249958645.png differ diff --git a/docs/cce/umn/en-us_image_0000001251716033.png b/docs/cce/umn/en-us_image_0000001251716033.png new file mode 100644 index 00000000..92f9830e Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001251716033.png differ diff --git a/docs/cce/umn/en-us_image_0000001256348238.jpg b/docs/cce/umn/en-us_image_0000001256348238.jpg new file mode 100644 index 00000000..7747408f Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001256348238.jpg differ diff --git a/docs/cce/umn/en-us_image_0000001274316069.png b/docs/cce/umn/en-us_image_0000001274316069.png deleted file mode 100644 index f86a5ecd..00000000 Binary files a/docs/cce/umn/en-us_image_0000001274316069.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001274882416.png b/docs/cce/umn/en-us_image_0000001274882416.png new file mode 100644 index 00000000..935bdf78 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001274882416.png differ diff --git a/docs/cce/umn/en-us_image_0000001276433425.png b/docs/cce/umn/en-us_image_0000001276433425.png new file mode 100644 index 00000000..6a1cd906 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001276433425.png differ diff --git a/docs/cce/umn/en-us_image_0000001280171657.png b/docs/cce/umn/en-us_image_0000001280171657.png deleted file mode 100644 index c3426e36..00000000 Binary files a/docs/cce/umn/en-us_image_0000001280171657.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001280181541.png b/docs/cce/umn/en-us_image_0000001280181541.png deleted file mode 100644 index 6e12ed43..00000000 Binary files a/docs/cce/umn/en-us_image_0000001280181541.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001280421317.png b/docs/cce/umn/en-us_image_0000001280421317.png deleted file mode 100644 index 108dd89d..00000000 Binary files a/docs/cce/umn/en-us_image_0000001280421317.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001280466745.png b/docs/cce/umn/en-us_image_0000001280466745.png deleted file mode 100644 index 0492b6b4..00000000 Binary files a/docs/cce/umn/en-us_image_0000001280466745.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001283301301.png b/docs/cce/umn/en-us_image_0000001283301301.png new file mode 100644 index 00000000..770a9fc0 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001283301301.png differ diff --git a/docs/cce/umn/en-us_image_0000001283343269.png b/docs/cce/umn/en-us_image_0000001283343269.png new file mode 100644 index 00000000..4c17d7c1 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001283343269.png differ diff --git a/docs/cce/umn/en-us_image_0000001290111529.png b/docs/cce/umn/en-us_image_0000001290111529.png new file mode 100644 index 00000000..226fcf76 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001290111529.png differ diff --git a/docs/cce/umn/en-us_image_0000001291567729.png b/docs/cce/umn/en-us_image_0000001291567729.png new file mode 100644 index 00000000..b48bf295 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001291567729.png differ diff --git a/docs/cce/umn/en-us_image_0000001325364477.png b/docs/cce/umn/en-us_image_0000001325364477.png new file mode 100644 index 00000000..b181bead Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001325364477.png differ diff --git a/docs/cce/umn/en-us_image_0000001325377749.png b/docs/cce/umn/en-us_image_0000001325377749.png new file mode 100644 index 00000000..6cab297a Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001325377749.png differ diff --git a/docs/cce/umn/en-us_image_0000001283755568.png b/docs/cce/umn/en-us_image_0000001336475537.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001283755568.png rename to docs/cce/umn/en-us_image_0000001336475537.png diff --git a/docs/cce/umn/en-us_image_0000001352539924.png b/docs/cce/umn/en-us_image_0000001352539924.png new file mode 100644 index 00000000..5ae1c1de Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001352539924.png differ diff --git a/docs/cce/umn/en-us_image_0000001360670117.png b/docs/cce/umn/en-us_image_0000001360670117.png new file mode 100644 index 00000000..603c9468 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001360670117.png differ diff --git a/docs/cce/umn/en-us_image_0000001378942548.png b/docs/cce/umn/en-us_image_0000001378942548.png new file mode 100644 index 00000000..a004b89a Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001378942548.png differ diff --git a/docs/cce/umn/en-us_image_0000001392259910.png b/docs/cce/umn/en-us_image_0000001392259910.png new file mode 100644 index 00000000..1dbc8381 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001392259910.png differ diff --git a/docs/cce/umn/en-us_image_0000001392280374.png b/docs/cce/umn/en-us_image_0000001392280374.png new file mode 100644 index 00000000..c569d0fc Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001392280374.png differ diff --git a/docs/cce/umn/en-us_image_0000001392318380.png b/docs/cce/umn/en-us_image_0000001392318380.png new file mode 100644 index 00000000..8e6d0509 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001392318380.png differ diff --git a/docs/cce/umn/en-us_image_0000001397733101.png b/docs/cce/umn/en-us_image_0000001397733101.png new file mode 100644 index 00000000..ece3f195 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001397733101.png differ diff --git a/docs/cce/umn/en-us_image_0000001402494682.png b/docs/cce/umn/en-us_image_0000001402494682.png new file mode 100644 index 00000000..cf481886 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001402494682.png differ diff --git a/docs/cce/umn/en-us_image_0165888686.png b/docs/cce/umn/en-us_image_0000001408895746.png similarity index 100% rename from docs/cce/umn/en-us_image_0165888686.png rename to docs/cce/umn/en-us_image_0000001408895746.png diff --git a/docs/cce/umn/en-us_image_0000001414561076.png b/docs/cce/umn/en-us_image_0000001414561076.png new file mode 100644 index 00000000..f57b6c81 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001414561076.png differ diff --git a/docs/cce/umn/en-us_image_0000001460905374.png b/docs/cce/umn/en-us_image_0000001460905374.png new file mode 100644 index 00000000..50d2b8fd Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001460905374.png differ diff --git a/docs/cce/umn/en-us_image_0000001461224886.png b/docs/cce/umn/en-us_image_0000001461224886.png new file mode 100644 index 00000000..fd35a0fd Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001461224886.png differ diff --git a/docs/cce/umn/en-us_image_0000001359980148.png b/docs/cce/umn/en-us_image_0000001464878016.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001359980148.png rename to docs/cce/umn/en-us_image_0000001464878016.png diff --git a/docs/cce/umn/en-us_image_0000001360140128.png b/docs/cce/umn/en-us_image_0000001465197524.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001360140128.png rename to docs/cce/umn/en-us_image_0000001465197524.png diff --git a/docs/cce/umn/en-us_image_0000001480191270.png b/docs/cce/umn/en-us_image_0000001480191270.png new file mode 100644 index 00000000..290b94f1 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001480191270.png differ diff --git a/docs/cce/umn/en-us_image_0000001482541956.png b/docs/cce/umn/en-us_image_0000001482541956.png new file mode 100644 index 00000000..ca9934b1 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001482541956.png differ diff --git a/docs/cce/umn/en-us_image_0000001482546084.png b/docs/cce/umn/en-us_image_0000001482546084.png new file mode 100644 index 00000000..b2f9f125 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001482546084.png differ diff --git a/docs/cce/umn/en-us_image_0000001482701968.png b/docs/cce/umn/en-us_image_0000001482701968.png new file mode 100644 index 00000000..9b7e01f2 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001482701968.png differ diff --git a/docs/cce/umn/en-us_image_0000001482796460.png b/docs/cce/umn/en-us_image_0000001482796460.png new file mode 100644 index 00000000..60b03efc Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001482796460.png differ diff --git a/docs/cce/umn/en-us_image_0000001409700089.png b/docs/cce/umn/en-us_image_0000001515838557.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001409700089.png rename to docs/cce/umn/en-us_image_0000001515838557.png diff --git a/docs/cce/umn/en-us_image_0000001409740389.png b/docs/cce/umn/en-us_image_0000001515917789.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001409740389.png rename to docs/cce/umn/en-us_image_0000001515917789.png diff --git a/docs/cce/umn/en-us_image_0000001528627005.png b/docs/cce/umn/en-us_image_0000001528627005.png new file mode 100644 index 00000000..d74a10fa Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001528627005.png differ diff --git a/docs/cce/umn/en-us_image_0000001531373685.png b/docs/cce/umn/en-us_image_0000001531373685.png new file mode 100644 index 00000000..23752228 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001531373685.png differ diff --git a/docs/cce/umn/en-us_image_0000001531533045.png b/docs/cce/umn/en-us_image_0000001531533045.png new file mode 100644 index 00000000..f963c973 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001531533045.png differ diff --git a/docs/cce/umn/en-us_image_0000001531533921.png b/docs/cce/umn/en-us_image_0000001531533921.png new file mode 100644 index 00000000..da5aed3a Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001531533921.png differ diff --git a/docs/cce/umn/en-us_image_0000001533181077.png b/docs/cce/umn/en-us_image_0000001533181077.png new file mode 100644 index 00000000..47324ea0 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001533181077.png differ diff --git a/docs/cce/umn/en-us_image_0000001533585325.png b/docs/cce/umn/en-us_image_0000001533585325.png new file mode 100644 index 00000000..c18b7997 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001533585325.png differ diff --git a/docs/cce/umn/en-us_image_0000001533586881.png b/docs/cce/umn/en-us_image_0000001533586881.png new file mode 100644 index 00000000..92f9830e Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001533586881.png differ diff --git a/docs/cce/umn/en-us_image_0121749065.png b/docs/cce/umn/en-us_image_0121749065.png deleted file mode 100644 index 7fab2b4e..00000000 Binary files a/docs/cce/umn/en-us_image_0121749065.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0144042759.png b/docs/cce/umn/en-us_image_0144042759.png deleted file mode 100644 index 8df3e3b2..00000000 Binary files a/docs/cce/umn/en-us_image_0144042759.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0144045351.png b/docs/cce/umn/en-us_image_0144045351.png deleted file mode 100644 index 4f6eaf1e..00000000 Binary files a/docs/cce/umn/en-us_image_0144045351.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0165899095.png b/docs/cce/umn/en-us_image_0165899095.png deleted file mode 100644 index 4be5db17..00000000 Binary files a/docs/cce/umn/en-us_image_0165899095.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0165899282.png b/docs/cce/umn/en-us_image_0165899282.png deleted file mode 100644 index 8e539c68..00000000 Binary files a/docs/cce/umn/en-us_image_0165899282.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0181616313.png b/docs/cce/umn/en-us_image_0181616313.png deleted file mode 100644 index bfcccfec..00000000 Binary files a/docs/cce/umn/en-us_image_0181616313.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0181616314.png b/docs/cce/umn/en-us_image_0181616314.png deleted file mode 100644 index bd210297..00000000 Binary files a/docs/cce/umn/en-us_image_0181616314.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0183134473.png b/docs/cce/umn/en-us_image_0183134473.png deleted file mode 100644 index c293cf90..00000000 Binary files a/docs/cce/umn/en-us_image_0183134473.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0183134479.png b/docs/cce/umn/en-us_image_0183134479.png deleted file mode 100644 index c293cf90..00000000 Binary files a/docs/cce/umn/en-us_image_0183134479.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0183134608.png b/docs/cce/umn/en-us_image_0183134608.png deleted file mode 100644 index c293cf90..00000000 Binary files a/docs/cce/umn/en-us_image_0183134608.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0183674977.png b/docs/cce/umn/en-us_image_0183674977.png deleted file mode 100644 index 1fe5ee34..00000000 Binary files a/docs/cce/umn/en-us_image_0183674977.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0195434915.png b/docs/cce/umn/en-us_image_0195434915.png deleted file mode 100644 index 4f6eaf1e..00000000 Binary files a/docs/cce/umn/en-us_image_0195434915.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0198873490.png b/docs/cce/umn/en-us_image_0198873490.png deleted file mode 100644 index 577d034f..00000000 Binary files a/docs/cce/umn/en-us_image_0198873490.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0198876479.png b/docs/cce/umn/en-us_image_0198876479.png deleted file mode 100644 index 577d034f..00000000 Binary files a/docs/cce/umn/en-us_image_0198876479.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0214003838.png b/docs/cce/umn/en-us_image_0214003838.png deleted file mode 100644 index 15ee848b..00000000 Binary files a/docs/cce/umn/en-us_image_0214003838.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0220702939.png b/docs/cce/umn/en-us_image_0220702939.png deleted file mode 100644 index 8ea2b58b..00000000 Binary files a/docs/cce/umn/en-us_image_0220702939.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0220765374.png b/docs/cce/umn/en-us_image_0220765374.png deleted file mode 100644 index e6d9c02a..00000000 Binary files a/docs/cce/umn/en-us_image_0220765374.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0250508826.png b/docs/cce/umn/en-us_image_0250508826.png deleted file mode 100644 index 229ce201..00000000 Binary files a/docs/cce/umn/en-us_image_0250508826.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0259714782.png b/docs/cce/umn/en-us_image_0259714782.png deleted file mode 100644 index e69e52c6..00000000 Binary files a/docs/cce/umn/en-us_image_0259714782.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0259716601.png b/docs/cce/umn/en-us_image_0259716601.png deleted file mode 100644 index 81ed369b..00000000 Binary files a/docs/cce/umn/en-us_image_0259716601.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0259814716.png b/docs/cce/umn/en-us_image_0259814716.png deleted file mode 100644 index 81ed369b..00000000 Binary files a/docs/cce/umn/en-us_image_0259814716.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0259814717.png b/docs/cce/umn/en-us_image_0259814717.png deleted file mode 100644 index e69e52c6..00000000 Binary files a/docs/cce/umn/en-us_image_0259814717.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001223152421.png b/docs/cce/umn/en-us_image_0261818822.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001223152421.png rename to docs/cce/umn/en-us_image_0261818822.png diff --git a/docs/cce/umn/en-us_image_0000001178034110.png b/docs/cce/umn/en-us_image_0261818824.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001178034110.png rename to docs/cce/umn/en-us_image_0261818824.png diff --git a/docs/cce/umn/en-us_image_0261818867.png b/docs/cce/umn/en-us_image_0261818867.png new file mode 100644 index 00000000..99f7849e Binary files /dev/null and b/docs/cce/umn/en-us_image_0261818867.png differ diff --git a/docs/cce/umn/en-us_image_0000001178034108.png b/docs/cce/umn/en-us_image_0261818875.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001178034108.png rename to docs/cce/umn/en-us_image_0261818875.png diff --git a/docs/cce/umn/en-us_image_0000001178192670.png b/docs/cce/umn/en-us_image_0261818885.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001178192670.png rename to docs/cce/umn/en-us_image_0261818885.png diff --git a/docs/cce/umn/en-us_image_0000001223393899.png b/docs/cce/umn/en-us_image_0261818886.png similarity index 100% rename from docs/cce/umn/en-us_image_0000001223393899.png rename to docs/cce/umn/en-us_image_0261818886.png diff --git a/docs/cce/umn/en-us_image_0261818893.png b/docs/cce/umn/en-us_image_0261818893.png new file mode 100644 index 00000000..e1b6ec8d Binary files /dev/null and b/docs/cce/umn/en-us_image_0261818893.png differ diff --git a/docs/cce/umn/en-us_image_0261818896.png b/docs/cce/umn/en-us_image_0261818896.png new file mode 100644 index 00000000..e1b6ec8d Binary files /dev/null and b/docs/cce/umn/en-us_image_0261818896.png differ diff --git a/docs/cce/umn/en-us_image_0261818899.png b/docs/cce/umn/en-us_image_0261818899.png new file mode 100644 index 00000000..e1b6ec8d Binary files /dev/null and b/docs/cce/umn/en-us_image_0261818899.png differ diff --git a/docs/cce/umn/en-us_image_0261820020.png b/docs/cce/umn/en-us_image_0261820020.png new file mode 100644 index 00000000..b0639578 Binary files /dev/null and b/docs/cce/umn/en-us_image_0261820020.png differ diff --git a/docs/cce/umn/en-us_image_0268523694.png b/docs/cce/umn/en-us_image_0268523694.png new file mode 100644 index 00000000..718c8fa2 Binary files /dev/null and b/docs/cce/umn/en-us_image_0268523694.png differ diff --git a/docs/cce/umn/en-us_image_0269288708.png b/docs/cce/umn/en-us_image_0269288708.png deleted file mode 100644 index 63c26fe3..00000000 Binary files a/docs/cce/umn/en-us_image_0269288708.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0273156799.png b/docs/cce/umn/en-us_image_0273156799.png deleted file mode 100644 index f6a55993..00000000 Binary files a/docs/cce/umn/en-us_image_0273156799.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0275445543.png b/docs/cce/umn/en-us_image_0275445543.png new file mode 100644 index 00000000..1909444d Binary files /dev/null and b/docs/cce/umn/en-us_image_0275445543.png differ diff --git a/docs/cce/umn/en-us_image_0275445566.png b/docs/cce/umn/en-us_image_0275445566.png new file mode 100644 index 00000000..1909444d Binary files /dev/null and b/docs/cce/umn/en-us_image_0275445566.png differ diff --git a/docs/cce/umn/en-us_image_0275452681.png b/docs/cce/umn/en-us_image_0275452681.png new file mode 100644 index 00000000..1909444d Binary files /dev/null and b/docs/cce/umn/en-us_image_0275452681.png differ diff --git a/docs/cce/umn/en-us_image_0278498565.png b/docs/cce/umn/en-us_image_0278498565.png new file mode 100644 index 00000000..2756b2fb Binary files /dev/null and b/docs/cce/umn/en-us_image_0278498565.png differ diff --git a/docs/cce/umn/en-us_image_0298565473.png b/docs/cce/umn/en-us_image_0298565473.png deleted file mode 100644 index 68d37b13..00000000 Binary files a/docs/cce/umn/en-us_image_0298565473.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0300973777.png b/docs/cce/umn/en-us_image_0300973777.png deleted file mode 100644 index 1fe5ee34..00000000 Binary files a/docs/cce/umn/en-us_image_0300973777.png and /dev/null differ diff --git a/docs/cce/umn/public_sys-resources/icon-arrowdn.gif b/docs/cce/umn/public_sys-resources/icon-arrowdn.gif index 84eec9be..37942803 100644 Binary files a/docs/cce/umn/public_sys-resources/icon-arrowdn.gif and b/docs/cce/umn/public_sys-resources/icon-arrowdn.gif differ diff --git a/docs/cce/umn/public_sys-resources/icon-arrowrt.gif b/docs/cce/umn/public_sys-resources/icon-arrowrt.gif index 39583d16..6aaaa11c 100644 Binary files a/docs/cce/umn/public_sys-resources/icon-arrowrt.gif and b/docs/cce/umn/public_sys-resources/icon-arrowrt.gif differ