diff --git a/docs/cce/umn/ALL_META.TXT.json b/docs/cce/umn/ALL_META.TXT.json index 1f489ef2..e00f7a4d 100644 --- a/docs/cce/umn/ALL_META.TXT.json +++ b/docs/cce/umn/ALL_META.TXT.json @@ -260,31 +260,11 @@ "title":"Kubernetes Version Support Mechanism", "githuburl":"" }, - { - "uri":"cce_bulletin_0301.html", - "node_id":"cce_bulletin_0301.xml", - "product_code":"cce", - "code":"15", - "des":"CCE nodes in Hybrid clusters can run on EulerOS 2.5, EulerOS 2.9 , HCE OS 2.0 and Ubuntu 22.04. You are not advised to use the CentOS 7.7 image to create nodes because th", - "doc_type":"usermanual2", - "kw":"OS Patch Notes for Cluster Nodes,Product Bulletin,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "opensource":"true", - "documenttype":"usermanual2", - "IsMulti":"Yes" - } - ], - "title":"OS Patch Notes for Cluster Nodes", - "githuburl":"" - }, { "uri":"cce_bulletin_0061.html", "node_id":"cce_bulletin_0061.xml", "product_code":"cce", - "code":"16", + "code":"15", "des":"Dear users,We are pleased to announce that a brand-new CCE console is available. The new console is modern, visually appealing, and concise, providing a more comfortable ", "doc_type":"usermanual2", "kw":"CCE Console Upgrade,Product Bulletin,User Guide", @@ -301,7 +281,7 @@ "uri":"cce_bulletin_0169.html", "node_id":"cce_bulletin_0169.xml", "product_code":"cce", - "code":"17", + "code":"16", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Security Vulnerability Responses", @@ -321,7 +301,7 @@ "uri":"cce_bulletin_0011.html", "node_id":"cce_bulletin_0011.xml", "product_code":"cce", - "code":"18", + "code":"17", "des":"High-risk vulnerabilities:CCE fixes vulnerabilities as soon as possible after the Kubernetes community detects them and releases fixing solutions. The fixing policies are", "doc_type":"usermanual2", "kw":"Vulnerability Fixing Policies,Security Vulnerability Responses,User Guide", @@ -340,7 +320,7 @@ "uri":"CVE-2021-4034.html", "node_id":"cve-2021-4034.xml", "product_code":"cce", - "code":"19", + "code":"18", "des":"Recently, a security research team disclosed a privilege escalation vulnerability (CVE-2021-4034, also dubbed PwnKit) in PolKit's pkexec. Unprivileged users can gain full", "doc_type":"usermanual2", "kw":"Linux Polkit Privilege Escalation Vulnerability (CVE-2021-4034),Security Vulnerability Responses,Use", @@ -360,7 +340,7 @@ "uri":"cce_bulletin_0206.html", "node_id":"cce_bulletin_0206.xml", "product_code":"cce", - "code":"20", + "code":"19", "des":"The Linux Kernel SACK vulnerabilities have been fixed. This section describes the solution to these vulnerabilities.On June 18, 2019, Red Hat released a security notice, ", "doc_type":"usermanual2", "kw":"Notice on Fixing Linux Kernel SACK Vulnerabilities,Security Vulnerability Responses,User Guide", @@ -380,7 +360,7 @@ "uri":"cce_qs_0000.html", "node_id":"cce_qs_0000.xml", "product_code":"cce", - "code":"21", + "code":"20", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Getting Started", @@ -400,7 +380,7 @@ "uri":"cce_qs_0001.html", "node_id":"cce_qs_0001.xml", "product_code":"cce", - "code":"22", + "code":"21", "des":"This section describes how to use Cloud Container Engine (CCE) and provides frequently asked questions (FAQs) to help you quickly get started with CCE.Complete the follow", "doc_type":"usermanual2", "kw":"Introduction,Getting Started,User Guide", @@ -408,7 +388,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Introduction", @@ -418,7 +398,7 @@ "uri":"cce_qs_0006.html", "node_id":"cce_qs_0006.xml", "product_code":"cce", - "code":"23", + "code":"22", "des":"Before using CCE, make the following preparations:Creating an IAM userObtaining Resource Permissions(Optional) Creating a VPC(Optional) Creating a Key PairIf you want to ", "doc_type":"usermanual2", "kw":"VPC,Preparations,Getting Started,User Guide", @@ -426,7 +406,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Preparations", @@ -436,7 +416,7 @@ "uri":"cce_qs_0008.html", "node_id":"cce_qs_0008.xml", "product_code":"cce", - "code":"24", + "code":"23", "des":"This section describes how to quickly create a CCE cluster. In this example, the default or simple configurations are in use.If you have no clusters, click Create CCE Sta", "doc_type":"usermanual2", "kw":"Creating a Kubernetes Cluster,Getting Started,User Guide", @@ -444,7 +424,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a Kubernetes Cluster", @@ -454,7 +434,7 @@ "uri":"cce_qs_0003.html", "node_id":"cce_qs_0003.xml", "product_code":"cce", - "code":"25", + "code":"24", "des":"You can use images to quickly create a single-pod workload that can be accessed from public networks. This section describes how to use CCE to quickly deploy an Nginx app", "doc_type":"usermanual2", "kw":"Creating a Deployment (Nginx),Getting Started,User Guide", @@ -462,7 +442,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a Deployment (Nginx)", @@ -472,7 +452,7 @@ "uri":"cce_qs_0007.html", "node_id":"cce_qs_0007.xml", "product_code":"cce", - "code":"26", + "code":"25", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Deploying WordPress and MySQL That Depend on Each Other", @@ -480,7 +460,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Deploying WordPress and MySQL That Depend on Each Other", @@ -490,7 +470,7 @@ "uri":"cce_qs_0009.html", "node_id":"cce_qs_0009.xml", "product_code":"cce", - "code":"27", + "code":"26", "des":"WordPress was originally a blog platform based on PHP and MySQL. It is gradually evolved into a content management system. You can set up your own blog website on any ser", "doc_type":"usermanual2", "kw":"Overview,Deploying WordPress and MySQL That Depend on Each Other,User Guide", @@ -498,7 +478,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Overview", @@ -508,7 +488,7 @@ "uri":"cce_qs_0004.html", "node_id":"cce_qs_0004.xml", "product_code":"cce", - "code":"28", + "code":"27", "des":"WordPress must be used together with MySQL. WordPress runs the content management program while MySQL serves as a database to store data.You have created a CCE cluster th", "doc_type":"usermanual2", "kw":"Creating a MySQL Workload,Deploying WordPress and MySQL That Depend on Each Other,User Guide", @@ -516,7 +496,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a MySQL Workload", @@ -526,7 +506,7 @@ "uri":"cce_qs_0005.html", "node_id":"cce_qs_0005.xml", "product_code":"cce", - "code":"29", + "code":"28", "des":"WordPress was originally a blog platform based on PHP and MySQL. It is gradually evolved into a content management system. You can set up your own blog website on any ser", "doc_type":"usermanual2", "kw":"Creating a WordPress Workload,Deploying WordPress and MySQL That Depend on Each Other,User Guide", @@ -534,7 +514,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a WordPress Workload", @@ -544,7 +524,7 @@ "uri":"cce_10_0054.html", "node_id":"cce_10_0054.xml", "product_code":"cce", - "code":"30", + "code":"29", "des":"During service deployment or running, you may trigger high-risk operations at different levels, causing service faults or interruption. To help you better estimate and av", "doc_type":"usermanual2", "kw":"High-Risk Operations and Solutions,User Guide", @@ -552,9 +532,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"High-Risk Operations and Solutions", @@ -564,7 +542,7 @@ "uri":"cce_10_0091.html", "node_id":"cce_10_0091.xml", "product_code":"cce", - "code":"31", + "code":"30", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Clusters", @@ -572,9 +550,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Clusters", @@ -584,7 +560,7 @@ "uri":"cce_10_0002.html", "node_id":"cce_10_0002.xml", "product_code":"cce", - "code":"32", + "code":"31", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Cluster Overview", @@ -592,9 +568,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Cluster Overview", @@ -604,7 +578,7 @@ "uri":"cce_10_0430.html", "node_id":"cce_10_0430.xml", "product_code":"cce", - "code":"33", + "code":"32", "des":"Kubernetes is an open source container orchestration engine for automating deployment, scaling, and management of containerized applications.For developers, Kubernetes is", "doc_type":"usermanual2", "kw":"Master Nodes,Basic Cluster Information,Cluster Overview,User Guide", @@ -612,9 +586,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Basic Cluster Information", @@ -624,20 +596,36 @@ "uri":"cce_10_0068.html", "node_id":"cce_10_0068.xml", "product_code":"cce", - "code":"34", + "code":"33", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"Kubernetes Release Notes", + "kw":"Kubernetes Version Release Notes", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"Kubernetes Release Notes", + "title":"Kubernetes Version Release Notes", + "githuburl":"" + }, + { + "uri":"cce_bulletin_0068.html", + "node_id":"cce_bulletin_0068.xml", + "product_code":"cce", + "code":"34", + "des":"CCE allows you to create Kubernetes clusters 1.28. This section describes the changes made in Kubernetes 1.28.Important NotesNew and Enhanced FeaturesAPI Changes and Remo", + "doc_type":"usermanual2", + "kw":"Kubernetes 1.28 Release Notes,Kubernetes Version Release Notes,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual2" + } + ], + "title":"Kubernetes 1.28 Release Notes", "githuburl":"" }, { @@ -647,14 +635,12 @@ "code":"35", "des":"CCE allows you to create clusters of Kubernetes 1.27. This section describes the changes made in Kubernetes 1.27 compared with Kubernetes 1.25.New FeaturesDeprecations an", "doc_type":"usermanual2", - "kw":"Kubernetes 1.27 Release Notes,Kubernetes Release Notes,User Guide", + "kw":"Kubernetes 1.27 Release Notes,Kubernetes Version Release Notes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Kubernetes 1.27 Release Notes", @@ -667,14 +653,12 @@ "code":"36", "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the changes made in Kubernetes 1.25 compared wi", "doc_type":"usermanual2", - "kw":"Kubernetes 1.25 Release Notes,Kubernetes Release Notes,User Guide", + "kw":"Kubernetes 1.25 Release Notes,Kubernetes Version Release Notes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Kubernetes 1.25 Release Notes", @@ -687,14 +671,12 @@ "code":"37", "des":"This section describes the updates in CCE Kubernetes 1.23.Kubernetes 1.23 Release NotesFlexVolume is deprecated. Use CSI.HorizontalPodAutoscaler v2 is promoted to GA, and", "doc_type":"usermanual2", - "kw":"Kubernetes 1.23 Release Notes,Kubernetes Release Notes,User Guide", + "kw":"Kubernetes 1.23 Release Notes,Kubernetes Version Release Notes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Kubernetes 1.23 Release Notes", @@ -707,14 +689,12 @@ "code":"38", "des":"This section describes the updates in CCE Kubernetes 1.21.Kubernetes 1.21 Release NotesCronJob is now in the stable state, and the version number changes to batch/v1.The ", "doc_type":"usermanual2", - "kw":"Kubernetes 1.21 Release Notes,Kubernetes Release Notes,User Guide", + "kw":"Kubernetes 1.21 Release Notes,Kubernetes Version Release Notes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Kubernetes 1.21 Release Notes", @@ -727,14 +707,12 @@ "code":"39", "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.19.Kubernetes v", "doc_type":"usermanual2", - "kw":"Kubernetes 1.19 (EOM) Release Notes,Kubernetes Release Notes,User Guide", + "kw":"Kubernetes 1.19 (EOM) Release Notes,Kubernetes Version Release Notes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Kubernetes 1.19 (EOM) Release Notes", @@ -747,14 +725,12 @@ "code":"40", "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.17.All resource", "doc_type":"usermanual2", - "kw":"Kubernetes 1.17 (EOM) Release Notes,Kubernetes Release Notes,User Guide", + "kw":"Kubernetes 1.17 (EOM) Release Notes,Kubernetes Version Release Notes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Kubernetes 1.17 (EOM) Release Notes", @@ -767,17 +743,15 @@ "code":"41", "des":"In CCE v1.27 and later versions, all nodes support only the containerd container engine.All nodes in the CCE clusters of version 1.25, except the ones running EulerOS 2.5", "doc_type":"usermanual2", - "kw":"Release Notes for CCE Cluster Versions,Cluster Overview,User Guide", + "kw":"Patch Version Release Notes,Cluster Overview,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"Release Notes for CCE Cluster Versions", + "title":"Patch Version Release Notes", "githuburl":"" }, { @@ -792,9 +766,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a Cluster", @@ -805,19 +777,17 @@ "node_id":"cce_10_0342.xml", "product_code":"cce", "code":"43", - "des":"The following table lists the differences between CCE Turbo clusters and CCE standard clusters.", + "des":"CCE provides different types of clusters for you to select. The following table lists the differences between them.", "doc_type":"usermanual2", - "kw":"CCE Turbo Clusters and CCE Standard Clusters,Creating a Cluster,User Guide", + "kw":"Comparison Between Cluster Types,Creating a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"CCE Turbo Clusters and CCE Standard Clusters", + "title":"Comparison Between Cluster Types", "githuburl":"" }, { @@ -827,17 +797,15 @@ "code":"44", "des":"On the CCE console, you can easily create Kubernetes clusters. After a cluster is created, the master node is hosted by CCE. You only need to create worker nodes. In this", "doc_type":"usermanual2", - "kw":"Creating a CCE Cluster,Creating a Cluster,User Guide", + "kw":"Creating a CCE Standard/Turbo Cluster,Creating a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"Creating a CCE Cluster", + "title":"Creating a CCE Standard/Turbo Cluster", "githuburl":"" }, { @@ -852,9 +820,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Comparing iptables and IPVS", @@ -872,9 +838,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Connecting to a Cluster", @@ -885,16 +849,14 @@ "node_id":"cce_10_0107.xml", "product_code":"cce", "code":"47", - "des":"This section uses a CCE standard cluster as an example to describe how to connect to a CCE cluster using kubectl.When you access a cluster using kubectl, CCE uses kubecon", + "des":"This section uses a CCE standard cluster as an example to describe how to access a CCE cluster using kubectl.When you access a cluster using kubectl, CCE uses kubeconfig ", "doc_type":"usermanual2", - "kw":"kubectl,kubeconfig,Intranet access,Two-Way Authentication for Domain Names,Error from server Forbidd", + "kw":"kubectl,Intranet access,Two-Way Authentication for Domain Names,Error from server Forbidden,The conn", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Connecting to a Cluster Using kubectl", @@ -905,16 +867,14 @@ "node_id":"cce_10_0175.xml", "product_code":"cce", "code":"48", - "des":"This section describes how to obtain the cluster certificate from the console and use it to access Kubernetes clusters.The downloaded certificate contains three files: cl", + "des":"This section describes how to obtain the cluster certificate from the console and use it access Kubernetes clusters.The downloaded certificate contains three files: clien", "doc_type":"usermanual2", "kw":"X.509 certificate,Connecting to a Cluster Using an X.509 Certificate,Connecting to a Cluster,User Gu", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Connecting to a Cluster Using an X.509 Certificate", @@ -927,14 +887,12 @@ "code":"49", "des":"Subject Alternative Name (SAN) allows multiple values (including IP addresses, domain names, and so on) to be associated with certificates. A SAN is usually used by the c", "doc_type":"usermanual2", - "kw":"SAN,Accessing a Cluster Using a Custom Domain Name,Connecting to a Cluster,User Guide", + "kw":"SAN,X.509 certificate,Accessing a Cluster Using a Custom Domain Name,Connecting to a Cluster,User Gu", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Accessing a Cluster Using a Custom Domain Name", @@ -952,9 +910,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Upgrading a Cluster", @@ -965,16 +921,14 @@ "node_id":"cce_10_0197.xml", "product_code":"cce", "code":"51", - "des":"To enable interoperability from one Kubernetes installation to the next, you must upgrade your Kubernetes clusters before the maintenance period ends.After the latest Kub", + "des":"CCE strictly complies with community consistency authentication. It releases three Kubernetes versions each year and offers a maintenance period of at least 24 months aft", "doc_type":"usermanual2", - "kw":"cluster upgrade process,Upgrade,In-place upgrade,Upgrade Overview,Upgrading a Cluster,User Guide", + "kw":"cluster upgrade process,Node Priority,In-place upgrade,Upgrade Overview,Upgrading a Cluster,User Gui", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Upgrade Overview", @@ -992,39 +946,17 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Before You Start", "githuburl":"" }, - { - "uri":"cce_10_0301.html", - "node_id":"cce_10_0301.xml", - "product_code":"cce", - "code":"53", - "des":"You can upgrade your clusters to a newer version on the CCE console.Before the upgrade, learn about the target version to which each CCE cluster can be upgraded in what w", - "doc_type":"usermanual2", - "kw":"Node Priority,Performing an In-place Upgrade,Upgrading a Cluster,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" - } - ], - "title":"Performing an In-place Upgrade", - "githuburl":"" - }, { "uri":"cce_10_0560.html", "node_id":"cce_10_0560.xml", "product_code":"cce", - "code":"54", + "code":"53", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Performing Post-Upgrade Verification", @@ -1032,79 +964,17 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Performing Post-Upgrade Verification", "githuburl":"" }, - { - "uri":"cce_10_0562.html", - "node_id":"cce_10_0562.xml", - "product_code":"cce", - "code":"55", - "des":"Check whether there are unexpected pods in the cluster.Check whether there are any pods that ran properly originally in the cluster restart unexpectedly.If there are abno", - "doc_type":"usermanual2", - "kw":"Pod Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" - } - ], - "title":"Pod Check", - "githuburl":"" - }, - { - "uri":"cce_10_0563.html", - "node_id":"cce_10_0563.xml", - "product_code":"cce", - "code":"56", - "des":"Check whether nodes are running properly.Check whether the node network is functional.Check whether the container network is functional.If the container network malfuncti", - "doc_type":"usermanual2", - "kw":"Node and Container Network Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" - } - ], - "title":"Node and Container Network Check", - "githuburl":"" - }, - { - "uri":"cce_10_0564.html", - "node_id":"cce_10_0564.xml", - "product_code":"cce", - "code":"57", - "des":"Check whether custom node labels are lost.Check whether there are any unexpected taints newly added on the node, which will affect workload scheduling.Custom labels will ", - "doc_type":"usermanual2", - "kw":"Node Label and Taint Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" - } - ], - "title":"Node Label and Taint Check", - "githuburl":"" - }, { "uri":"cce_10_0568.html", "node_id":"cce_10_0568.xml", "product_code":"cce", - "code":"58", + "code":"54", "des":"After a cluster is upgraded, check whether the cluster is in the Running state.CCE automatically checks your cluster status. Go to the cluster list page and confirm the c", "doc_type":"usermanual2", "kw":"Cluster Status Check,Performing Post-Upgrade Verification,User Guide", @@ -1112,9 +982,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Cluster Status Check", @@ -1124,7 +992,7 @@ "uri":"cce_10_0569.html", "node_id":"cce_10_0569.xml", "product_code":"cce", - "code":"59", + "code":"55", "des":"After a cluster is upgraded, check whether nodes in the cluster are in the Running state.CCE automatically checks your node statuses. Go to the node list page and confirm", "doc_type":"usermanual2", "kw":"Node Status Check,Performing Post-Upgrade Verification,User Guide", @@ -1132,9 +1000,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Status Check", @@ -1144,7 +1010,7 @@ "uri":"cce_10_0567.html", "node_id":"cce_10_0567.xml", "product_code":"cce", - "code":"60", + "code":"56", "des":"After a cluster is upgraded, check whether there are any nodes that skip the upgrade in the cluster. These nodes may affect the proper running of the cluster.CCE automati", "doc_type":"usermanual2", "kw":"Node Skipping Check,Performing Post-Upgrade Verification,User Guide", @@ -1152,9 +1018,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Skipping Check", @@ -1164,7 +1028,7 @@ "uri":"cce_10_0561.html", "node_id":"cce_10_0561.xml", "product_code":"cce", - "code":"61", + "code":"57", "des":"After a cluster is upgraded, check whether its services are running properly.Different services have different verification mode. Select a suitable one and verify the ser", "doc_type":"usermanual2", "kw":"Service Check,Performing Post-Upgrade Verification,User Guide", @@ -1172,9 +1036,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Service Check", @@ -1184,7 +1046,7 @@ "uri":"cce_10_0565.html", "node_id":"cce_10_0565.xml", "product_code":"cce", - "code":"62", + "code":"58", "des":"Check whether nodes can be created in the cluster.If nodes cannot be created in your cluster after the cluster is upgraded, contact technical support.", "doc_type":"usermanual2", "kw":"New Node Check,Performing Post-Upgrade Verification,User Guide", @@ -1192,9 +1054,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"New Node Check", @@ -1204,7 +1064,7 @@ "uri":"cce_10_0566.html", "node_id":"cce_10_0566.xml", "product_code":"cce", - "code":"63", + "code":"59", "des":"Check whether pods can be created on the existing nodes after the cluster is upgraded.Check whether pods can be created on new nodes after the cluster is upgraded.After c", "doc_type":"usermanual2", "kw":"New Pod Check,Performing Post-Upgrade Verification,User Guide", @@ -1212,9 +1072,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"New Pod Check", @@ -1224,7 +1082,7 @@ "uri":"cce_10_0210.html", "node_id":"cce_10_0210.xml", "product_code":"cce", - "code":"64", + "code":"60", "des":"This section describes how to migrate services from a cluster of an earlier version to a cluster of a later version in CCE.This operation is applicable when a cross-versi", "doc_type":"usermanual2", "kw":"Migrating Services Across Clusters of Different Versions,Upgrading a Cluster,User Guide", @@ -1232,9 +1090,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Migrating Services Across Clusters of Different Versions", @@ -1244,7 +1100,7 @@ "uri":"cce_10_0550.html", "node_id":"cce_10_0550.xml", "product_code":"cce", - "code":"65", + "code":"61", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Troubleshooting for Pre-upgrade Check Exceptions", @@ -1252,9 +1108,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Troubleshooting for Pre-upgrade Check Exceptions", @@ -1264,7 +1118,7 @@ "uri":"cce_10_0549.html", "node_id":"cce_10_0549.xml", "product_code":"cce", - "code":"66", + "code":"62", "des":"The system automatically checks a cluster before its upgrade. If the cluster does not meet the pre-upgrade check conditions, the upgrade cannot continue. To avoid risks, ", "doc_type":"usermanual2", "kw":"Pre-upgrade Check,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1272,9 +1126,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Pre-upgrade Check", @@ -1284,7 +1136,7 @@ "uri":"cce_10_0431.html", "node_id":"cce_10_0431.xml", "product_code":"cce", - "code":"67", + "code":"63", "des":"Check the following items:Check whether the node is available.Check whether the node OS supports the upgrade.Check whether the node is marked with unexpected node pool la", "doc_type":"usermanual2", "kw":"Node Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1292,9 +1144,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Restrictions", @@ -1304,7 +1154,7 @@ "uri":"cce_10_0432.html", "node_id":"cce_10_0432.xml", "product_code":"cce", - "code":"68", + "code":"64", "des":"Check whether the target cluster is under upgrade management.CCE may temporarily restrict the cluster upgrade due to the following reasons:The cluster is identified as th", "doc_type":"usermanual2", "kw":"Upgrade Management,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1312,9 +1162,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Upgrade Management", @@ -1324,7 +1172,7 @@ "uri":"cce_10_0433.html", "node_id":"cce_10_0433.xml", "product_code":"cce", - "code":"69", + "code":"65", "des":"Check the following items:Check whether the add-on status is normal.Check whether the add-on support the target version.Scenario 1: The add-on malfunctions.Log in to the ", "doc_type":"usermanual2", "kw":"Add-ons,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1332,9 +1180,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Add-ons", @@ -1344,7 +1190,7 @@ "uri":"cce_10_0434.html", "node_id":"cce_10_0434.xml", "product_code":"cce", - "code":"70", + "code":"66", "des":"Check whether the current HelmRelease record contains discarded Kubernetes APIs that are not supported by the target cluster version. If yes, the Helm chart may be unavai", "doc_type":"usermanual2", "kw":"Helm Charts,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1352,9 +1198,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Helm Charts", @@ -1364,7 +1208,7 @@ "uri":"cce_10_0435.html", "node_id":"cce_10_0435.xml", "product_code":"cce", - "code":"71", + "code":"67", "des":"Check whether CCE can connect to your master nodes.Contact technical support.", "doc_type":"usermanual2", "kw":"SSH Connectivity of Master Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1372,9 +1216,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"SSH Connectivity of Master Nodes", @@ -1384,7 +1226,7 @@ "uri":"cce_10_0436.html", "node_id":"cce_10_0436.xml", "product_code":"cce", - "code":"72", + "code":"68", "des":"Check the node pool status.Check whether the node pool OS or container runtime is supported after the upgrade.Scenario: The node pool malfunctions.Log in to the CCE conso", "doc_type":"usermanual2", "kw":"Node Pools,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1392,9 +1234,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Pools", @@ -1404,7 +1244,7 @@ "uri":"cce_10_0437.html", "node_id":"cce_10_0437.xml", "product_code":"cce", - "code":"73", + "code":"69", "des":"Check whether the Protocol & Port of the worker node security groups are set to ICMP: All and whether the security group with the source IP address set to the master node", "doc_type":"usermanual2", "kw":"Security Groups,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1412,9 +1252,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Security Groups", @@ -1424,7 +1262,7 @@ "uri":"cce_10_0439.html", "node_id":"cce_10_0439.xml", "product_code":"cce", - "code":"74", + "code":"70", "des":"Check whether the node needs to be migrated.For the 1.15 cluster that is upgraded from 1.13 in rolling mode, migrate (reset or create and replace) all nodes before perfor", "doc_type":"usermanual2", "kw":"To-Be-Migrated Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1432,9 +1270,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"To-Be-Migrated Nodes", @@ -1444,7 +1280,7 @@ "uri":"cce_10_0440.html", "node_id":"cce_10_0440.xml", "product_code":"cce", - "code":"75", + "code":"71", "des":"Check whether there are discarded resources in the clusters.Scenario: The Service in the clusters of v1.25 or later has discarded annotation: tolerate-unready-endpoints.E", "doc_type":"usermanual2", "kw":"Discarded Kubernetes Resources,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1452,9 +1288,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Discarded Kubernetes Resources", @@ -1464,7 +1298,7 @@ "uri":"cce_10_0441.html", "node_id":"cce_10_0441.xml", "product_code":"cce", - "code":"76", + "code":"72", "des":"Read the version compatibility differences and ensure that they are not affected. The patch upgrade does not involve version compatibility differences.", "doc_type":"usermanual2", "kw":"Compatibility Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1472,9 +1306,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Compatibility Risks", @@ -1484,27 +1316,25 @@ "uri":"cce_10_0442.html", "node_id":"cce_10_0442.xml", "product_code":"cce", - "code":"77", + "code":"73", "des":"Check whether cce-agent on the current node is of the latest version.Scenario 1: The error message \"you cce-agent no update, please restart it\" is displayed.cce-agent doe", "doc_type":"usermanual2", - "kw":"Node CCE Agent Versions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "kw":"CCE Agent Versions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"Node CCE Agent Versions", + "title":"CCE Agent Versions", "githuburl":"" }, { "uri":"cce_10_0443.html", "node_id":"cce_10_0443.xml", "product_code":"cce", - "code":"78", + "code":"74", "des":"Check whether the CPU usage of the node exceeds 90%.Upgrade the cluster during off-peak hours.Check whether too many pods are deployed on the node. If yes, reschedule pod", "doc_type":"usermanual2", "kw":"Node CPU Usage,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1512,9 +1342,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node CPU Usage", @@ -1524,7 +1352,7 @@ "uri":"cce_10_0444.html", "node_id":"cce_10_0444.xml", "product_code":"cce", - "code":"79", + "code":"75", "des":"Check the following items:Check whether the key CRD packageversions.version.cce.io of the cluster is deleted.Check whether the cluster key CRD network-attachment-definiti", "doc_type":"usermanual2", "kw":"CRDs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1532,9 +1360,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"CRDs", @@ -1544,7 +1370,7 @@ "uri":"cce_10_0445.html", "node_id":"cce_10_0445.xml", "product_code":"cce", - "code":"80", + "code":"76", "des":"Check the following items:Check whether the key data disks on the node meet the upgrade requirements.Check whether the /tmp directory has 500 MB available space.During th", "doc_type":"usermanual2", "kw":"Node Disks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1552,9 +1378,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Disks", @@ -1564,7 +1388,7 @@ "uri":"cce_10_0446.html", "node_id":"cce_10_0446.xml", "product_code":"cce", - "code":"81", + "code":"77", "des":"Check the following items:Check whether the DNS configuration of the current node can resolve the OBS address.Check whether the current node can access the OBS address of", "doc_type":"usermanual2", "kw":"Node DNS,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1572,9 +1396,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node DNS", @@ -1584,7 +1406,7 @@ "uri":"cce_10_0447.html", "node_id":"cce_10_0447.xml", "product_code":"cce", - "code":"82", + "code":"78", "des":"Check whether the owner and owner group of the files in the /var/paas directory used by the CCE are both paas.Scenario 1: The error message \"xx file permission has been c", "doc_type":"usermanual2", "kw":"Node Key Directory File Permissions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1592,9 +1414,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Key Directory File Permissions", @@ -1604,7 +1424,7 @@ "uri":"cce_10_0448.html", "node_id":"cce_10_0448.xml", "product_code":"cce", - "code":"83", + "code":"79", "des":"Check whether the kubelet on the node is running properly.Scenario 1: The kubelet status is abnormal.If the kubelet malfunctions, the node is unavailable. Restore the nod", "doc_type":"usermanual2", "kw":"Kubelet,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1612,9 +1432,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Kubelet", @@ -1624,7 +1442,7 @@ "uri":"cce_10_0449.html", "node_id":"cce_10_0449.xml", "product_code":"cce", - "code":"84", + "code":"80", "des":"Check whether the memory usage of the node exceeds 90%.Upgrade the cluster during off-peak hours.Check whether too many pods are deployed on the node. If yes, reschedule ", "doc_type":"usermanual2", "kw":"Node Memory,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1632,9 +1450,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Memory", @@ -1644,7 +1460,7 @@ "uri":"cce_10_0450.html", "node_id":"cce_10_0450.xml", "product_code":"cce", - "code":"85", + "code":"81", "des":"Check whether the clock synchronization server ntpd or chronyd of the node is running properly.Scenario 1: ntpd is running abnormally.Log in to the node and run the syste", "doc_type":"usermanual2", "kw":"Node Clock Synchronization Server,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1652,9 +1468,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Clock Synchronization Server", @@ -1664,17 +1478,15 @@ "uri":"cce_10_0451.html", "node_id":"cce_10_0451.xml", "product_code":"cce", - "code":"86", - "des":"Check whether the OS kernel version of the node is supported by CCE.CCE nodes run depending on the initial standard kernel version when they are created. CCE has performe", + "code":"82", + "des":"Check whether the OS kernel version of the node is supported by CCE.Case 1: The node image is not a standard CCE image.CCE nodes run depending on the initial standard ker", "doc_type":"usermanual2", "kw":"Node OS,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node OS", @@ -1684,7 +1496,7 @@ "uri":"cce_10_0452.html", "node_id":"cce_10_0452.xml", "product_code":"cce", - "code":"87", + "code":"83", "des":"Check whether the number of CPUs on the master node is greater than 2.If the number of CPUs on the master node is 2, contact technical support to expand the number to 4 o", "doc_type":"usermanual2", "kw":"Node CPUs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1692,9 +1504,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node CPUs", @@ -1704,7 +1514,7 @@ "uri":"cce_10_0453.html", "node_id":"cce_10_0453.xml", "product_code":"cce", - "code":"88", + "code":"84", "des":"Check whether the Python commands are available on a node.If the command output is not 0, the check fails.Install Python before the upgrade.", "doc_type":"usermanual2", "kw":"Node Python Commands,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1712,9 +1522,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Python Commands", @@ -1724,7 +1532,7 @@ "uri":"cce_10_0455.html", "node_id":"cce_10_0455.xml", "product_code":"cce", - "code":"89", + "code":"85", "des":"Check whether the nodes in the cluster are ready.Scenario 1: The nodes are in the unavailable status.Log in to the CCE console and click the cluster name to access the cl", "doc_type":"usermanual2", "kw":"Node Readiness,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1732,9 +1540,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Readiness", @@ -1744,7 +1550,7 @@ "uri":"cce_10_0456.html", "node_id":"cce_10_0456.xml", "product_code":"cce", - "code":"90", + "code":"86", "des":"Check whether journald of a node is normal.Log in to the node and run the systemctl is-active systemd-journald command to obtain the running status of journald. If the co", "doc_type":"usermanual2", "kw":"Node journald,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1752,9 +1558,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node journald", @@ -1764,7 +1568,7 @@ "uri":"cce_10_0457.html", "node_id":"cce_10_0457.xml", "product_code":"cce", - "code":"91", + "code":"87", "des":"Check whether the containerd.sock file exists on the node. This file affects the startup of container runtime in the Euler OS.Scenario: The Docker used by the node is the", "doc_type":"usermanual2", "kw":"containerd.sock,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1772,9 +1576,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"containerd.sock", @@ -1784,7 +1586,7 @@ "uri":"cce_10_0458.html", "node_id":"cce_10_0458.xml", "product_code":"cce", - "code":"92", + "code":"88", "des":"Before the upgrade, check whether an internal error occurs.If this check fails, contact technical support.", "doc_type":"usermanual2", "kw":"Internal Errors,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1792,9 +1594,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Internal Errors", @@ -1804,17 +1604,15 @@ "uri":"cce_10_0459.html", "node_id":"cce_10_0459.xml", "product_code":"cce", - "code":"93", - "des":"Check whether inaccessible mount points exist on the node.Scenario: There are inaccessible mount points on the node.If NFS (such as OBS parallel file systems and SFS) is ", + "code":"89", + "des":"Check whether inaccessible mount points exist on the node.Scenario: There are inaccessible mount points on the node.If NFS (such as obsfs or SFS) is used by the node and ", "doc_type":"usermanual2", "kw":"Node Mount Points,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Mount Points", @@ -1824,7 +1622,7 @@ "uri":"cce_10_0460.html", "node_id":"cce_10_0460.xml", "product_code":"cce", - "code":"94", + "code":"90", "des":"Check whether the taint needed for cluster upgrade exists on the node.Scenario 1: The node is skipped during the cluster upgrade.If the version of the node is different f", "doc_type":"usermanual2", "kw":"Kubernetes Node Taints,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1832,9 +1630,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Kubernetes Node Taints", @@ -1844,7 +1640,7 @@ "uri":"cce_10_0478.html", "node_id":"cce_10_0478.xml", "product_code":"cce", - "code":"95", + "code":"91", "des":"Check whether there are any compatibility restrictions on the current Everest add-on.There are compatibility restrictions on the current Everest add-on and it cannot be u", "doc_type":"usermanual2", "kw":"Everest Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1852,9 +1648,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Everest Restrictions", @@ -1864,7 +1658,7 @@ "uri":"cce_10_0479.html", "node_id":"cce_10_0479.xml", "product_code":"cce", - "code":"96", + "code":"92", "des":"Check whether the current cce-controller-hpa add-on has compatibility restrictions.The current cce-controller-hpa add-on has compatibility restrictions. An add-on that ca", "doc_type":"usermanual2", "kw":"cce-hpa-controller Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1872,9 +1666,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"cce-hpa-controller Restrictions", @@ -1884,7 +1676,7 @@ "uri":"cce_10_0480.html", "node_id":"cce_10_0480.xml", "product_code":"cce", - "code":"97", + "code":"93", "des":"Check whether the current cluster version and the target version support enhanced CPU policy.Scenario: Only the current cluster version supports the enhanced CPU policy f", "doc_type":"usermanual2", "kw":"Enhanced CPU Policies,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1892,9 +1684,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Enhanced CPU Policies", @@ -1904,7 +1694,7 @@ "uri":"cce_10_0484.html", "node_id":"cce_10_0484.xml", "product_code":"cce", - "code":"98", + "code":"94", "des":"Check whether the container runtime and network components on the worker nodes are healthy.If a worker node component malfunctions, log in to the node to check the status", "doc_type":"usermanual2", "kw":"Health of Worker Node Components,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1912,9 +1702,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Health of Worker Node Components", @@ -1924,7 +1712,7 @@ "uri":"cce_10_0485.html", "node_id":"cce_10_0485.xml", "product_code":"cce", - "code":"99", + "code":"95", "des":"Check whether the Kubernetes, container runtime, and network components of the master nodes are healthy.If a master node component malfunctions, contact technical support", "doc_type":"usermanual2", "kw":"Health of Master Node Components,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1932,9 +1720,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Health of Master Node Components", @@ -1944,7 +1730,7 @@ "uri":"cce_10_0486.html", "node_id":"cce_10_0486.xml", "product_code":"cce", - "code":"100", + "code":"96", "des":"Check whether the resources of Kubernetes components, such as etcd and kube-controller-manager, exceed the upper limit.Solution 1: Reduce Kubernetes resources that are ne", "doc_type":"usermanual2", "kw":"Memory Resource Limit of Kubernetes Components,Troubleshooting for Pre-upgrade Check Exceptions,User", @@ -1952,9 +1738,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Memory Resource Limit of Kubernetes Components", @@ -1964,7 +1748,7 @@ "uri":"cce_10_0487.html", "node_id":"cce_10_0487.xml", "product_code":"cce", - "code":"101", + "code":"97", "des":"The system scans the audit logs of the past day to check whether the user calls the deprecated APIs of the target Kubernetes version.Due to the limited time range of audi", "doc_type":"usermanual2", "kw":"Discarded Kubernetes APIs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1972,9 +1756,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Discarded Kubernetes APIs", @@ -1984,7 +1766,7 @@ "uri":"cce_10_0488.html", "node_id":"cce_10_0488.xml", "product_code":"cce", - "code":"102", + "code":"98", "des":"If IPv6 is enabled for a CCE Turbo cluster, check whether the target cluster version supports IPv6.CCE Turbo clusters support IPv6 since v1.23. This feature is available ", "doc_type":"usermanual2", "kw":"IPv6 Capabilities of a CCE Turbo Cluster,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -1992,9 +1774,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"IPv6 Capabilities of a CCE Turbo Cluster", @@ -2004,7 +1784,7 @@ "uri":"cce_10_0489.html", "node_id":"cce_10_0489.xml", "product_code":"cce", - "code":"103", + "code":"99", "des":"Check whether NetworkManager of a node is normal.Log in to the node and run the systemctl is-active NetworkManager command to obtain the running status of NetworkManager.", "doc_type":"usermanual2", "kw":"Node NetworkManager,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2012,9 +1792,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node NetworkManager", @@ -2024,7 +1802,7 @@ "uri":"cce_10_0490.html", "node_id":"cce_10_0490.xml", "product_code":"cce", - "code":"104", + "code":"100", "des":"Check the ID file format.", "doc_type":"usermanual2", "kw":"Node ID File,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2032,9 +1810,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node ID File", @@ -2044,7 +1820,7 @@ "uri":"cce_10_0491.html", "node_id":"cce_10_0491.xml", "product_code":"cce", - "code":"105", + "code":"101", "des":"When you upgrade a cluster to v1.19 or later, the system checks whether the following configuration files have been modified on the backend:/opt/cloud/cce/kubernetes/kube", "doc_type":"usermanual2", "kw":"Node Configuration Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2052,9 +1828,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Configuration Consistency", @@ -2064,7 +1838,7 @@ "uri":"cce_10_0492.html", "node_id":"cce_10_0492.xml", "product_code":"cce", - "code":"106", + "code":"102", "des":"Check whether the configuration files of key components exist on the node.The following table lists the files to be checked.Contact technical support to restore the confi", "doc_type":"usermanual2", "kw":"Node Configuration File,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2072,9 +1846,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Configuration File", @@ -2084,7 +1856,7 @@ "uri":"cce_10_0493.html", "node_id":"cce_10_0493.xml", "product_code":"cce", - "code":"107", + "code":"103", "des":"Check whether the current CoreDNS key configuration Corefile is different from the Helm release record. The difference may be overwritten during the add-on upgrade, affec", "doc_type":"usermanual2", "kw":"CoreDNS Configuration Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2092,9 +1864,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"CoreDNS Configuration Consistency", @@ -2104,7 +1874,7 @@ "uri":"cce_10_0494.html", "node_id":"cce_10_0494.xml", "product_code":"cce", - "code":"108", + "code":"104", "des":"Whether the sudo commands and sudo-related files of the node are workingScenario 1: The sudo command fails to be executed.During the in-place cluster upgrade, the sudo co", "doc_type":"usermanual2", "kw":"sudo Commands of a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2112,9 +1882,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"sudo Commands of a Node", @@ -2124,17 +1892,15 @@ "uri":"cce_10_0495.html", "node_id":"cce_10_0495.xml", "product_code":"cce", - "code":"109", - "des":"Whether some key commands that the node upgrade depends on are workingScenario 1: The package manager command fails to be executed.The rpm or dpkg command fails to be exe", + "code":"105", + "des":"Whether some key commands that the node upgrade depends on are workingScenario 1: Executing the package manager command failed.Executing the rpm or dpkg command failed. I", "doc_type":"usermanual2", "kw":"Key Commands of Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Key Commands of Nodes", @@ -2144,7 +1910,7 @@ "uri":"cce_10_0496.html", "node_id":"cce_10_0496.xml", "product_code":"cce", - "code":"110", + "code":"106", "des":"Check whether the docker/containerd.sock file is directly mounted to the pods on a node. During an upgrade, Docker or containerd restarts and the sock file on the host ch", "doc_type":"usermanual2", "kw":"Mounting of a Sock File on a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2152,9 +1918,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Mounting of a Sock File on a Node", @@ -2164,17 +1928,15 @@ "uri":"cce_10_0497.html", "node_id":"cce_10_0497.xml", "product_code":"cce", - "code":"111", - "des":"Check whether the certificate used by an HTTPS load balancer has been modified on ELB.The certificate referenced by an HTTPS Ingress created on CCE is modified on the ELB", + "code":"107", + "des":"Check whether the certificate used by an HTTPS load balancer has been modified on ELB.The certificate referenced by an HTTPS ingress created on CCE is modified on the ELB", "doc_type":"usermanual2", "kw":"HTTPS Load Balancer Certificate Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Gu", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"HTTPS Load Balancer Certificate Consistency", @@ -2184,7 +1946,7 @@ "uri":"cce_10_0498.html", "node_id":"cce_10_0498.xml", "product_code":"cce", - "code":"112", + "code":"108", "des":"Check whether the default mount directory and soft link on the node have been manually mounted or modified.Non-shared diskBy default, /var/lib/docker, containerd, or /mnt", "doc_type":"usermanual2", "kw":"Node Mounting,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2192,9 +1954,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Mounting", @@ -2204,7 +1964,7 @@ "uri":"cce_10_0499.html", "node_id":"cce_10_0499.xml", "product_code":"cce", - "code":"113", + "code":"109", "des":"Check whether user paas is allowed to log in to a node.Run the following command to check whether user paas is allowed to log in to a node:If the permissions assigned to ", "doc_type":"usermanual2", "kw":"Login Permissions of User paas on a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2212,9 +1972,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Login Permissions of User paas on a Node", @@ -2224,7 +1982,7 @@ "uri":"cce_10_0500.html", "node_id":"cce_10_0500.xml", "product_code":"cce", - "code":"114", + "code":"110", "des":"Check whether the load balancer associated with a Service is allocated with a private IPv4 address.Solution 1: Delete the Service that is associated with a load balancer ", "doc_type":"usermanual2", "kw":"Private IPv4 Addresses of Load Balancers,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2232,9 +1990,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Private IPv4 Addresses of Load Balancers", @@ -2244,7 +2000,7 @@ "uri":"cce_10_0501.html", "node_id":"cce_10_0501.xml", "product_code":"cce", - "code":"115", + "code":"111", "des":"Check whether the source version of the cluster is earlier than v1.11 and the target version is later than v1.23.If the source version of the cluster is earlier than v1.1", "doc_type":"usermanual2", "kw":"Historical Upgrade Records,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2252,9 +2008,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Historical Upgrade Records", @@ -2264,7 +2018,7 @@ "uri":"cce_10_0502.html", "node_id":"cce_10_0502.xml", "product_code":"cce", - "code":"116", + "code":"112", "des":"Check whether the CIDR block of the cluster management plane is the same as that configured on the backbone network.If the CIDR block of the cluster management plane is d", "doc_type":"usermanual2", "kw":"CIDR Block of the Cluster Management Plane,Troubleshooting for Pre-upgrade Check Exceptions,User Gui", @@ -2272,9 +2026,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"CIDR Block of the Cluster Management Plane", @@ -2284,7 +2036,7 @@ "uri":"cce_10_0503.html", "node_id":"cce_10_0503.xml", "product_code":"cce", - "code":"117", + "code":"113", "des":"The GPU add-on is involved in the upgrade, which may affect the GPU driver installation during the creation of a GPU node.The GPU add-on driver needs to be configured by ", "doc_type":"usermanual2", "kw":"GPU Add-on,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2292,9 +2044,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"GPU Add-on", @@ -2304,7 +2054,7 @@ "uri":"cce_10_0504.html", "node_id":"cce_10_0504.xml", "product_code":"cce", - "code":"118", + "code":"114", "des":"Check whether the default system parameter settings on your nodes are modified.If the MTU value of the bond0 network on your BMS node is not the default value 1500, this ", "doc_type":"usermanual2", "kw":"Nodes' System Parameter Settings,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2312,9 +2062,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Nodes' System Parameter Settings", @@ -2324,7 +2072,7 @@ "uri":"cce_10_0505.html", "node_id":"cce_10_0505.xml", "product_code":"cce", - "code":"119", + "code":"115", "des":"Check whether there are residual package version data in the current cluster.A message is displayed indicating that there are residual 10.12.1.109 CRD resources in your c", "doc_type":"usermanual2", "kw":"Residual Package Versions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2332,9 +2080,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Residual Package Versions", @@ -2344,7 +2090,7 @@ "uri":"cce_10_0506.html", "node_id":"cce_10_0506.xml", "product_code":"cce", - "code":"120", + "code":"116", "des":"Check whether the commands required for the upgrade are available on the node.The cluster upgrade failure is typically caused by the lack of key node commands that are re", "doc_type":"usermanual2", "kw":"Node Commands,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2352,9 +2098,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Commands", @@ -2364,7 +2108,7 @@ "uri":"cce_10_0507.html", "node_id":"cce_10_0507.xml", "product_code":"cce", - "code":"121", + "code":"117", "des":"Check whether swap has been enabled on cluster nodes.By default, swap is disabled on CCE nodes. Check the necessity of enabling swap manually and determine the impact of ", "doc_type":"usermanual2", "kw":"Node Swap,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2372,9 +2116,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Swap", @@ -2384,67 +2126,169 @@ "uri":"cce_10_0510.html", "node_id":"cce_10_0510.xml", "product_code":"cce", - "code":"122", - "des":"Check whether the service containers running on a node that uses containerd are restart when the node's containerd is upgraded.Upgrade the cluster when the impact on serv", + "code":"118", + "des":"Check whether the service container running on the node may restart when the containerd component is upgraded on the node that uses containerd in the current cluster.Ensu", "doc_type":"usermanual2", - "kw":"containerd Pod Restart Risk,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "kw":"Check containerd pod restart risk,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"containerd Pod Restart Risk", + "title":"Check containerd pod restart risk", "githuburl":"" }, { "uri":"cce_10_0511.html", "node_id":"cce_10_0511.xml", "product_code":"cce", - "code":"123", - "des":"Check whether some configurations of the CCE AI Suite add-on installed in a cluster are intrusively modified. If yes, the upgrade may fail.", + "code":"119", + "des":"Check whether the configuration of the CCE AI Suite add-on in a cluster has been intrusively modified. If so, upgrading the cluster may fail.", "doc_type":"usermanual2", - "kw":"Key Parameters of the GPU Add-on,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "kw":"Key GPU Add-on Parameters,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"Key Parameters of the GPU Add-on", + "title":"Key GPU Add-on Parameters", "githuburl":"" }, { "uri":"cce_10_0512.html", "node_id":"cce_10_0512.xml", "product_code":"cce", - "code":"124", - "des":"Check whether the node on which GPU/NPU service containers run is rebuilt when kubelet is restarted during the upgrade of the current cluster. If yes, the services will b", + "code":"120", + "des":"Check whether GPU or NPU service pods are rebuilt in a cluster when kubelet is restarted during the upgrade of the cluster.Upgrade the cluster when the impact on services", "doc_type":"usermanual2", - "kw":"GPU/NPU Pod Rebuilding Risk,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "kw":"GPU or NPU Pod Rebuild Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"GPU/NPU Pod Rebuilding Risk", + "title":"GPU or NPU Pod Rebuild Risks", + "githuburl":"" + }, + { + "uri":"cce_10_0513.html", + "node_id":"cce_10_0513.xml", + "product_code":"cce", + "code":"121", + "des":"Check whether the access control of the ELB listener has been configured for the Service in the current cluster using annotations and whether the configurations are corre", + "doc_type":"usermanual2", + "kw":"ELB Listener Access Control,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual2" + } + ], + "title":"ELB Listener Access Control", + "githuburl":"" + }, + { + "uri":"cce_10_0514.html", + "node_id":"cce_10_0514.xml", + "product_code":"cce", + "code":"122", + "des":"Check whether the flavor of the master nodes in the cluster is the same as the actual flavor of these nodes.Flavor inconsistency is typically due to a modification made o", + "doc_type":"usermanual2", + "kw":"Master Node Flavor,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual2" + } + ], + "title":"Master Node Flavor", + "githuburl":"" + }, + { + "uri":"cce_10_0515.html", + "node_id":"cce_10_0515.xml", + "product_code":"cce", + "code":"123", + "des":"Check whether the number of available IP addresses in the cluster subnet supports rolling upgrade.If the number of IP addresses in the selected cluster subnet is insuffic", + "doc_type":"usermanual2", + "kw":"Subnet Quota of Master Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual2" + } + ], + "title":"Subnet Quota of Master Nodes", + "githuburl":"" + }, + { + "uri":"cce_10_0516.html", + "node_id":"cce_10_0516.xml", + "product_code":"cce", + "code":"124", + "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If the runtime on your node is", + "doc_type":"usermanual2", + "kw":"Node Runtime,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual2" + } + ], + "title":"Node Runtime", + "githuburl":"" + }, + { + "uri":"cce_10_0517.html", + "node_id":"cce_10_0517.xml", + "product_code":"cce", + "code":"125", + "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If the runtime on your node po", + "doc_type":"usermanual2", + "kw":"Node Pool Runtime,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual2" + } + ], + "title":"Node Pool Runtime", + "githuburl":"" + }, + { + "uri":"cce_10_0518.html", + "node_id":"cce_10_0518.xml", + "product_code":"cce", + "code":"126", + "des":"Check the number of images on your node. If the number is greater than 1000, Docker startup may be slow.Contact O&M personnel to check whether this issue affects the upgr", + "doc_type":"usermanual2", + "kw":"Number of Node Images,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual2" + } + ], + "title":"Number of Node Images", "githuburl":"" }, { "uri":"cce_10_0031.html", "node_id":"cce_10_0031.xml", "product_code":"cce", - "code":"125", + "code":"127", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Managing a Cluster", @@ -2452,9 +2296,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Managing a Cluster", @@ -2464,17 +2306,15 @@ "uri":"cce_10_0213.html", "node_id":"cce_10_0213.xml", "product_code":"cce", - "code":"126", - "des":"CCE allows you to manage cluster parameters, through which you can let core components work under your very requirements.This function is supported only in clusters of v1", + "code":"128", + "des":"CCE allows you to manage cluster parameters, through which you can let core components work under your requirements.This function is supported only in clusters of v1.15 a", "doc_type":"usermanual2", "kw":"cluster parameters,kube-apiserver,kube-controller-manager,Cluster Configuration Management,Managing ", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Cluster Configuration Management", @@ -2484,7 +2324,7 @@ "uri":"cce_10_0602.html", "node_id":"cce_10_0602.xml", "product_code":"cce", - "code":"127", + "code":"129", "des":"If enabled, concurrent requests are dynamically controlled based on the resource pressure of master nodes to keep them and the cluster available.The cluster version must ", "doc_type":"usermanual2", "kw":"Cluster Overload Control,Managing a Cluster,User Guide", @@ -2492,9 +2332,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Cluster Overload Control", @@ -2504,7 +2342,7 @@ "uri":"cce_10_0403.html", "node_id":"cce_10_0403.xml", "product_code":"cce", - "code":"128", + "code":"130", "des":"CCE allows you to change the number of nodes managed in a cluster.This function is supported for clusters of v1.15 and later versions.Starting from v1.15.11, the number o", "doc_type":"usermanual2", "kw":"Changing Cluster Scale,Managing a Cluster,User Guide", @@ -2512,9 +2350,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Changing Cluster Scale", @@ -2524,7 +2360,7 @@ "uri":"cce_10_0212.html", "node_id":"cce_10_0212.xml", "product_code":"cce", - "code":"129", + "code":"131", "des":"Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workloads, and Services. Related services cannot be ", "doc_type":"usermanual2", "kw":"Deleting a Cluster,Managing a Cluster,User Guide", @@ -2532,9 +2368,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Deleting a Cluster", @@ -2544,7 +2378,7 @@ "uri":"cce_10_0214.html", "node_id":"cce_10_0214.xml", "product_code":"cce", - "code":"130", + "code":"132", "des":"If you do not need to use a cluster temporarily, hibernate the cluster.After a cluster is hibernated, resources such as workloads cannot be created or managed in the clus", "doc_type":"usermanual2", "kw":"Hibernating and Waking Up a Cluster,Managing a Cluster,User Guide", @@ -2552,9 +2386,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Hibernating and Waking Up a Cluster", @@ -2564,7 +2396,7 @@ "uri":"cce_10_0183.html", "node_id":"cce_10_0183.xml", "product_code":"cce", - "code":"131", + "code":"133", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Nodes", @@ -2572,9 +2404,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Nodes", @@ -2584,7 +2414,7 @@ "uri":"cce_10_0180.html", "node_id":"cce_10_0180.xml", "product_code":"cce", - "code":"132", + "code":"134", "des":"A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (P", "doc_type":"usermanual2", "kw":"paas,user group,Node Overview,Nodes,User Guide", @@ -2592,9 +2422,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Overview", @@ -2604,7 +2432,7 @@ "uri":"cce_10_0462.html", "node_id":"cce_10_0462.xml", "product_code":"cce", - "code":"133", + "code":"135", "des":"Container engines, one of the most important components of Kubernetes, manage the lifecycle of images and containers. The kubelet interacts with a container runtime throu", "doc_type":"usermanual2", "kw":"Container Engine,Nodes,User Guide", @@ -2612,19 +2440,35 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Container Engine", "githuburl":"" }, + { + "uri":"cce_10_0476.html", + "node_id":"cce_10_0476.xml", + "product_code":"cce", + "code":"136", + "des":"This section describes the mappings between released cluster versions and OS versions.", + "doc_type":"usermanual2", + "kw":"Node OS,Nodes,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual2" + } + ], + "title":"Node OS", + "githuburl":"" + }, { "uri":"cce_10_0363.html", "node_id":"cce_10_0363.xml", "product_code":"cce", - "code":"134", + "code":"137", "des":"At least one cluster has been created.A key pair has been created for identity authentication upon remote node login.The node has at least 2 vCPUs and 4 GiB of memory.To ", "doc_type":"usermanual2", "kw":"Creating a Node,Nodes,User Guide", @@ -2632,9 +2476,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a Node", @@ -2644,27 +2486,25 @@ "uri":"cce_10_0198.html", "node_id":"cce_10_0198.xml", "product_code":"cce", - "code":"135", - "des":"In CCE, you can create a node (Creating a Node) or add existing nodes (ECSs or) to your cluster.While an ECS is being accepted into a cluster, the operating system of the", + "code":"138", + "des":"In CCE, you can create a node (Creating a Node) or add existing nodes (ECSs) to your cluster for management.While an ECS is being accepted into a cluster, the operating s", "doc_type":"usermanual2", - "kw":"Adding Nodes for Management,Nodes,User Guide", + "kw":"Accepting Nodes for Management,Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"Adding Nodes for Management", + "title":"Accepting Nodes for Management", "githuburl":"" }, { "uri":"cce_10_0185.html", "node_id":"cce_10_0185.xml", "product_code":"cce", - "code":"136", + "code":"139", "des":"If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).Only login to a running ECS is allowed.Only the user linux can l", "doc_type":"usermanual2", "kw":"Logging In to a Node,Nodes,User Guide", @@ -2672,9 +2512,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Logging In to a Node", @@ -2684,7 +2522,7 @@ "uri":"cce_10_0672.html", "node_id":"cce_10_0672.xml", "product_code":"cce", - "code":"137", + "code":"140", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"node labels", @@ -2692,9 +2530,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Management Nodes", @@ -2704,7 +2540,7 @@ "uri":"cce_10_0004.html", "node_id":"cce_10_0004.xml", "product_code":"cce", - "code":"138", + "code":"141", "des":"You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.", "doc_type":"usermanual2", "kw":"node labels,Inherent Label of a Node,Managing Node Labels,Management Nodes,User Guide", @@ -2712,9 +2548,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Managing Node Labels", @@ -2724,17 +2558,15 @@ "uri":"cce_10_0352.html", "node_id":"cce_10_0352.xml", "product_code":"cce", - "code":"139", - "des":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.A taint is a key-value pair associated with an effect. The following ef", + "code":"142", + "des":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.On the CCE console, you can also batch manage nodes' taints.Enter the k", "doc_type":"usermanual2", "kw":"NoSchedule,PreferNoSchedule,NoExecute,System Taints,Managing Node Taints,Management Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Managing Node Taints", @@ -2744,7 +2576,7 @@ "uri":"cce_10_0003.html", "node_id":"cce_10_0003.xml", "product_code":"cce", - "code":"140", + "code":"143", "des":"You can reset a node to modify the node configuration, such as the node OS and login mode.Resetting a node will reinstall the node OS and the Kubernetes software on the n", "doc_type":"usermanual2", "kw":"reset a node,Resetting a Node,Management Nodes,User Guide", @@ -2752,9 +2584,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Resetting a Node", @@ -2764,7 +2594,7 @@ "uri":"cce_10_0338.html", "node_id":"cce_10_0338.xml", "product_code":"cce", - "code":"141", + "code":"144", "des":"Removing a node from a cluster will re-install the node OS and clear CCE components on the node.Removing a node will not delete the server corresponding to the node. You ", "doc_type":"usermanual2", "kw":"Removing a Node,Management Nodes,User Guide", @@ -2772,9 +2602,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Removing a Node", @@ -2784,37 +2612,33 @@ "uri":"cce_10_0184.html", "node_id":"cce_10_0184.xml", "product_code":"cce", - "code":"142", + "code":"145", "des":"Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required. Modifyi", "doc_type":"usermanual2", - "kw":"synchronize the ECS,Synchronizing Data with Cloud Servers,Management Nodes,User Guide", + "kw":"synchronize the ECS,Synchronizing the Data of Cloud Servers,Management Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"Synchronizing Data with Cloud Servers", + "title":"Synchronizing the Data of Cloud Servers", "githuburl":"" }, { "uri":"cce_10_0605.html", "node_id":"cce_10_0605.xml", "product_code":"cce", - "code":"143", - "des":"After you enable the nodal drainage function on the console, the system sets the node to be non-schedulable and securely evicts all pods that comply with Nodal Drainage R", + "code":"146", + "des":"After you enable nodal drainage on the console, CCE configures the node to be non-schedulable and securely evicts all pods that comply with Nodal Drainage Rules on the no", "doc_type":"usermanual2", - "kw":"nodal drainage,Draining a Node,Management Nodes,User Guide", + "kw":"nodal drainage,nodal drainage,Draining a Node,Management Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Draining a Node", @@ -2824,7 +2648,7 @@ "uri":"cce_10_0186.html", "node_id":"cce_10_0186.xml", "product_code":"cce", - "code":"144", + "code":"147", "des":"When a node in a CCE cluster is deleted, services running on the node will also be deleted. Exercise caution when performing this operation.VM nodes that are being used b", "doc_type":"usermanual2", "kw":"Deleting a Node,Management Nodes,User Guide", @@ -2832,9 +2656,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Deleting a Node", @@ -2844,7 +2666,7 @@ "uri":"cce_10_0036.html", "node_id":"cce_10_0036.xml", "product_code":"cce", - "code":"145", + "code":"148", "des":"After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not resu", "doc_type":"usermanual2", "kw":"Stopping a Node,Management Nodes,User Guide", @@ -2852,9 +2674,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Stopping a Node", @@ -2864,7 +2684,7 @@ "uri":"cce_10_0276.html", "node_id":"cce_10_0276.xml", "product_code":"cce", - "code":"146", + "code":"149", "des":"In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.The o", "doc_type":"usermanual2", "kw":"Performing Rolling Upgrade for Nodes,Management Nodes,User Guide", @@ -2872,9 +2692,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Performing Rolling Upgrade for Nodes", @@ -2884,7 +2702,7 @@ "uri":"cce_10_0704.html", "node_id":"cce_10_0704.xml", "product_code":"cce", - "code":"147", + "code":"150", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Node O&M", @@ -2892,9 +2710,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node O&M", @@ -2904,7 +2720,7 @@ "uri":"cce_10_0178.html", "node_id":"cce_10_0178.xml", "product_code":"cce", - "code":"148", + "code":"151", "des":"Some node resources are used to run mandatory Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total number of node res", "doc_type":"usermanual2", "kw":"total number of node resources,Node Resource Reservation Policy,Node O&M,User Guide", @@ -2912,9 +2728,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Resource Reservation Policy", @@ -2924,7 +2738,7 @@ "uri":"cce_10_0341.html", "node_id":"cce_10_0341.xml", "product_code":"cce", - "code":"149", + "code":"152", "des":"This section describes how to allocate data disk space to nodes so that you can configure the data disk space accordingly.When creating a node, configure data disks for t", "doc_type":"usermanual2", "kw":"data disk space allocation,Container engine and container image space,basesize,basesize,Container St", @@ -2932,9 +2746,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Data Disk Space Allocation", @@ -2944,7 +2756,7 @@ "uri":"cce_10_0348.html", "node_id":"cce_10_0348.xml", "product_code":"cce", - "code":"150", + "code":"153", "des":"The maximum number of pods that can be created on a node is calculated based on the cluster type:For a cluster using the container tunnel network model, the value depends", "doc_type":"usermanual2", "kw":"Maximum Number of Pods on a Node,alpha.cce/fixPoolMask,maximum number of pods,Maximum Number of Pods", @@ -2952,9 +2764,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Maximum Number of Pods That Can Be Created on a Node", @@ -2964,17 +2774,15 @@ "uri":"cce_10_0601.html", "node_id":"cce_10_0601.xml", "product_code":"cce", - "code":"151", - "des":"Kubernetes has removed dockershim from v1.24 and does not support Docker by default. CCE will continue to support Docker in v1.25 but just till v1.27. The following steps", + "code":"154", + "des":"Kubernetes has removed dockershim from v1.24 and does not support Docker by default. CCE is going to stop the support for Docker. Change the node container engine from Do", "doc_type":"usermanual2", "kw":"Migrating Nodes from Docker to containerd,Node O&M,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Migrating Nodes from Docker to containerd", @@ -2984,7 +2792,7 @@ "uri":"cce_10_0659.html", "node_id":"cce_10_0659.xml", "product_code":"cce", - "code":"152", + "code":"155", "des":"The node fault detection function depends on the NPD add-on. The add-on instances run on nodes and monitor nodes. This section describes how to enable node fault detectio", "doc_type":"usermanual2", "kw":"Node Fault Detection,Check Items,Node Fault Detection Policy,Node O&M,User Guide", @@ -2992,9 +2800,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Fault Detection Policy", @@ -3004,7 +2810,7 @@ "uri":"cce_10_0035.html", "node_id":"cce_10_0035.xml", "product_code":"cce", - "code":"153", + "code":"156", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Node Pools", @@ -3012,9 +2818,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Pools", @@ -3024,7 +2828,7 @@ "uri":"cce_10_0081.html", "node_id":"cce_10_0081.xml", "product_code":"cce", - "code":"154", + "code":"157", "des":"CCE introduces node pools to help you better manage nodes in Kubernetes clusters. A node pool contains one node or a group of nodes with identical configuration in a clus", "doc_type":"usermanual2", "kw":"DefaultPool,DefaultPool,Deploying a Workload in a Specified Node Pool,Node Pool Overview,Node Pools,", @@ -3032,9 +2836,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Pool Overview", @@ -3044,7 +2846,7 @@ "uri":"cce_10_0012.html", "node_id":"cce_10_0012.xml", "product_code":"cce", - "code":"155", + "code":"158", "des":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.The Autoscaler a", "doc_type":"usermanual2", "kw":"Creating a Node Pool,Node Pools,User Guide", @@ -3052,9 +2854,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a Node Pool", @@ -3064,7 +2864,7 @@ "uri":"cce_10_0222.html", "node_id":"cce_10_0222.xml", "product_code":"cce", - "code":"156", + "code":"159", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Managing a Node Pool", @@ -3072,9 +2872,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Managing a Node Pool", @@ -3084,17 +2882,15 @@ "uri":"cce_10_0653.html", "node_id":"cce_10_0653.xml", "product_code":"cce", - "code":"157", - "des":"When editing the resource tags of the node pool. The modified configuration takes effect only for new nodes. To synchronize the configuration to the existing nodes, manua", + "code":"160", + "des":"The modification of resource tags of a node pool takes effect only on new nodes. To synchronize the modification onto existing nodes, manually reset the existing nodes.Ch", "doc_type":"usermanual2", "kw":"Updating a Node Pool,Managing a Node Pool,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Updating a Node Pool", @@ -3104,7 +2900,7 @@ "uri":"cce_10_0727.html", "node_id":"cce_10_0727.xml", "product_code":"cce", - "code":"158", + "code":"161", "des":"Auto Scaling (AS) enables elastic scaling of nodes in a node pool based on scaling policies. Without this function, you have to manually adjust the number of nodes in a n", "doc_type":"usermanual2", "kw":"Updating an AS Configuration,Managing a Node Pool,User Guide", @@ -3112,9 +2908,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Updating an AS Configuration", @@ -3124,17 +2918,15 @@ "uri":"cce_10_0652.html", "node_id":"cce_10_0652.xml", "product_code":"cce", - "code":"159", + "code":"162", "des":"The default node pool DefaultPool does not support the following management operations.CCE allows you to highly customize Kubernetes parameter settings on core components", "doc_type":"usermanual2", - "kw":"PIDs,Configuring a Node Pool,Managing a Node Pool,User Guide", + "kw":"Configuring a Node Pool,Managing a Node Pool,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Configuring a Node Pool", @@ -3144,7 +2936,7 @@ "uri":"cce_10_0655.html", "node_id":"cce_10_0655.xml", "product_code":"cce", - "code":"160", + "code":"163", "des":"You can copy the configuration of an existing node pool on the CCE console to create new node pools.", "doc_type":"usermanual2", "kw":"Copying a Node Pool,Managing a Node Pool,User Guide", @@ -3152,9 +2944,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Copying a Node Pool", @@ -3164,7 +2954,7 @@ "uri":"cce_10_0654.html", "node_id":"cce_10_0654.xml", "product_code":"cce", - "code":"161", + "code":"164", "des":"After the configuration of a node pool is updated, some configurations cannot be automatically synchronized for existing nodes. You can manually synchronize configuration", "doc_type":"usermanual2", "kw":"Synchronizing Node Pools,Managing a Node Pool,User Guide", @@ -3172,9 +2962,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Synchronizing Node Pools", @@ -3184,7 +2972,7 @@ "uri":"cce_10_0660.html", "node_id":"cce_10_0660.xml", "product_code":"cce", - "code":"162", + "code":"165", "des":"When CCE releases a new OS image, existing nodes cannot be automatically upgraded. You can manually upgrade them in batches.This section describes how to upgrade an OS by", "doc_type":"usermanual2", "kw":"Upgrading an OS,Managing a Node Pool,User Guide", @@ -3192,9 +2980,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Upgrading an OS", @@ -3204,7 +2990,7 @@ "uri":"cce_10_0656.html", "node_id":"cce_10_0656.xml", "product_code":"cce", - "code":"163", + "code":"166", "des":"Nodes in a node pool can be migrated to the default node pool. Nodes in the default node pool or a custom node pool cannot be migrated to other custom node pools.The migr", "doc_type":"usermanual2", "kw":"Migrating a Node,Managing a Node Pool,User Guide", @@ -3212,9 +2998,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Migrating a Node", @@ -3224,7 +3008,7 @@ "uri":"cce_10_0657.html", "node_id":"cce_10_0657.xml", "product_code":"cce", - "code":"164", + "code":"167", "des":"Deleting a node pool will delete nodes in the pool. Pods on these nodes will be automatically migrated to available nodes in other node pools.Deleting a node pool will de", "doc_type":"usermanual2", "kw":"Deleting a Node Pool,Managing a Node Pool,User Guide", @@ -3232,9 +3016,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Deleting a Node Pool", @@ -3244,7 +3026,7 @@ "uri":"cce_10_0046.html", "node_id":"cce_10_0046.xml", "product_code":"cce", - "code":"165", + "code":"168", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Deployments,StatefulSets,DaemonSets,jobs,cron jobs", @@ -3252,9 +3034,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Workloads", @@ -3264,7 +3044,7 @@ "uri":"cce_10_0006.html", "node_id":"cce_10_0006.xml", "product_code":"cce", - "code":"166", + "code":"169", "des":"A workload is an application running on Kubernetes. No matter how many components are there in your workload, you can run it in a group of Kubernetes pods. A workload is ", "doc_type":"usermanual2", "kw":"Deployments,StatefulSets,DaemonSets,jobs,cron jobs,Overview,Workloads,User Guide", @@ -3272,9 +3052,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Overview", @@ -3284,7 +3062,7 @@ "uri":"cce_10_0673.html", "node_id":"cce_10_0673.xml", "product_code":"cce", - "code":"167", + "code":"170", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Creating a Workload", @@ -3292,9 +3070,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a Workload", @@ -3304,7 +3080,7 @@ "uri":"cce_10_0047.html", "node_id":"cce_10_0047.xml", "product_code":"cce", - "code":"168", + "code":"171", "des":"Deployments are workloads (for example, Nginx) that do not store any data or status. You can create Deployments on the CCE console or by running kubectl commands.Before c", "doc_type":"usermanual2", "kw":"create a workload using kubectl,Creating a Deployment,Creating a Workload,User Guide", @@ -3312,9 +3088,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a Deployment", @@ -3324,7 +3098,7 @@ "uri":"cce_10_0048.html", "node_id":"cce_10_0048.xml", "product_code":"cce", - "code":"169", + "code":"172", "des":"StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.A conta", "doc_type":"usermanual2", "kw":"Using kubectl,Creating a StatefulSet,Creating a Workload,User Guide", @@ -3332,9 +3106,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a StatefulSet", @@ -3344,7 +3116,7 @@ "uri":"cce_10_0216.html", "node_id":"cce_10_0216.xml", "product_code":"cce", - "code":"170", + "code":"173", "des":"CCE provides deployment and management capabilities for multiple types of containers and supports features of container workloads, including creation, configuration, moni", "doc_type":"usermanual2", "kw":"create a workload using kubectl,Creating a DaemonSet,Creating a Workload,User Guide", @@ -3352,9 +3124,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a DaemonSet", @@ -3364,7 +3134,7 @@ "uri":"cce_10_0150.html", "node_id":"cce_10_0150.xml", "product_code":"cce", - "code":"171", + "code":"174", "des":"Jobs are short-lived and run for a certain time to completion. They can be executed immediately after being deployed. It is completed after it exits normally (exit 0).A j", "doc_type":"usermanual2", "kw":"Creating a Job,Creating a Workload,User Guide", @@ -3372,9 +3142,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a Job", @@ -3384,7 +3152,7 @@ "uri":"cce_10_0151.html", "node_id":"cce_10_0151.xml", "product_code":"cce", - "code":"172", + "code":"175", "des":"A cron job runs on a repeating schedule. You can perform time synchronization for all active nodes at a fixed time point.A cron job runs periodically at the specified tim", "doc_type":"usermanual2", "kw":"time synchronization,Creating a Cron Job,Creating a Workload,User Guide", @@ -3392,9 +3160,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Creating a Cron Job", @@ -3404,7 +3170,7 @@ "uri":"cce_10_0130.html", "node_id":"cce_10_0130.xml", "product_code":"cce", - "code":"173", + "code":"176", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Configuring a Container", @@ -3412,9 +3178,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Configuring a Container", @@ -3424,7 +3188,7 @@ "uri":"cce_10_0354.html", "node_id":"cce_10_0354.xml", "product_code":"cce", - "code":"174", + "code":"177", "des":"When creating a workload, you can configure containers to use the same time zone as the node. You can enable time zone synchronization when creating a workload.The time z", "doc_type":"usermanual2", "kw":"Configuring Time Zone Synchronization,Configuring a Container,User Guide", @@ -3432,9 +3196,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Configuring Time Zone Synchronization", @@ -3444,7 +3206,7 @@ "uri":"cce_10_0353.html", "node_id":"cce_10_0353.xml", "product_code":"cce", - "code":"175", + "code":"178", "des":"When a workload is created, the container image is pulled from the image repository to the node. The image is also pulled when the workload is restarted or upgraded.By de", "doc_type":"usermanual2", "kw":"Configuring an Image Pull Policy,Configuring a Container,User Guide", @@ -3452,9 +3214,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Configuring an Image Pull Policy", @@ -3464,7 +3224,7 @@ "uri":"cce_10_0009.html", "node_id":"cce_10_0009.xml", "product_code":"cce", - "code":"176", + "code":"179", "des":"CCE allows you to create workloads using images pulled from third-party image repositories.Generally, a third-party image repository can be accessed only after authentica", "doc_type":"usermanual2", "kw":"Using Third-Party Images,Configuring a Container,User Guide", @@ -3472,9 +3232,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Using Third-Party Images", @@ -3484,17 +3242,15 @@ "uri":"cce_10_0163.html", "node_id":"cce_10_0163.xml", "product_code":"cce", - "code":"177", + "code":"180", "des":"CCE allows you to set resource requirements and limits, such as CPU and RAM, for added containers during workload creation. Kubernetes also allows using YAML to set requi", "doc_type":"usermanual2", - "kw":"Configuring Container Specifications,Configuring a Container,User Guide", + "kw":"ephemeral storage,Configuring Container Specifications,Configuring a Container,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Configuring Container Specifications", @@ -3504,7 +3260,7 @@ "uri":"cce_10_0105.html", "node_id":"cce_10_0105.xml", "product_code":"cce", - "code":"178", + "code":"181", "des":"CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before sto", "doc_type":"usermanual2", "kw":"Startup Command,Post-Start,Pre-Stop,Configuring Container Lifecycle Parameters,Configuring a Contain", @@ -3512,9 +3268,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Configuring Container Lifecycle Parameters", @@ -3524,7 +3278,7 @@ "uri":"cce_10_0112.html", "node_id":"cce_10_0112.xml", "product_code":"cce", - "code":"179", + "code":"182", "des":"Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect application ex", "doc_type":"usermanual2", "kw":"Health check,HTTP request,TCP port,CLI,Configuring Container Health Check,Configuring a Container,Us", @@ -3532,9 +3286,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Configuring Container Health Check", @@ -3544,7 +3296,7 @@ "uri":"cce_10_0113.html", "node_id":"cce_10_0113.xml", "product_code":"cce", - "code":"180", + "code":"183", "des":"An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deploy", "doc_type":"usermanual2", "kw":"Configuring Environment Variables,Configuring a Container,User Guide", @@ -3552,9 +3304,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Configuring Environment Variables", @@ -3564,7 +3314,7 @@ "uri":"cce_10_0397.html", "node_id":"cce_10_0397.xml", "product_code":"cce", - "code":"181", + "code":"184", "des":"In actual applications, upgrade is a common operation. A Deployment, StatefulSet, or DaemonSet can easily support application upgrade.You can set different upgrade polici", "doc_type":"usermanual2", "kw":"Workload Upgrade Policies,Configuring a Container,User Guide", @@ -3572,9 +3322,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Workload Upgrade Policies", @@ -3584,7 +3332,7 @@ "uri":"cce_10_0232.html", "node_id":"cce_10_0232.xml", "product_code":"cce", - "code":"182", + "code":"185", "des":"Kubernetes supports node affinity and pod affinity/anti-affinity. You can configure custom rules to achieve affinity and anti-affinity scheduling. For example, you can de", "doc_type":"usermanual2", "kw":"Scheduling Policies (Affinity/Anti-affinity),Configuring a Container,User Guide", @@ -3592,9 +3340,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Scheduling Policies (Affinity/Anti-affinity)", @@ -3604,7 +3350,7 @@ "uri":"cce_10_0728.html", "node_id":"cce_10_0728.xml", "product_code":"cce", - "code":"183", + "code":"186", "des":"Tolerations allow the scheduler to schedule pods to nodes with target taints. Tolerances work with node taints. Each node allows one or more taints. If no tolerance is co", "doc_type":"usermanual2", "kw":"Taints and Tolerations,Configuring a Container,User Guide", @@ -3612,9 +3358,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Taints and Tolerations", @@ -3624,7 +3368,7 @@ "uri":"cce_10_0386.html", "node_id":"cce_10_0386.xml", "product_code":"cce", - "code":"184", + "code":"187", "des":"CCE allows you to add annotations to a YAML file to realize some advanced pod functions. The following table describes the annotations you can add.When you create a workl", "doc_type":"usermanual2", "kw":"Labels and Annotations,Configuring a Container,User Guide", @@ -3632,9 +3376,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Labels and Annotations", @@ -3644,7 +3386,7 @@ "uri":"cce_10_00356.html", "node_id":"cce_10_00356.xml", "product_code":"cce", - "code":"185", + "code":"188", "des":"If you encounter unexpected problems when using a container, you can log in to the container to debug it.The example output is as follows:NAME ", "doc_type":"usermanual2", "kw":"Accessing a Container,Workloads,User Guide", @@ -3652,9 +3394,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Accessing a Container", @@ -3664,7 +3404,7 @@ "uri":"cce_10_0007.html", "node_id":"cce_10_0007.xml", "product_code":"cce", - "code":"186", + "code":"189", "des":"After a workload is created, you can upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.Workload/Job managementOperationDescriptionMonitor", "doc_type":"usermanual2", "kw":"Managing Workloads and Jobs,Workloads,User Guide", @@ -3672,19 +3412,35 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Managing Workloads and Jobs", "githuburl":"" }, + { + "uri":"cce_10_0833.html", + "node_id":"cce_10_0833.xml", + "product_code":"cce", + "code":"190", + "des":"Custom Resource Definition (CRD) is an extension of Kubernetes APIs. When default Kubernetes resources cannot meet service requirements, you can use CRDs to define new re", + "doc_type":"usermanual2", + "kw":"Managing Custom Resources,Workloads,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual2" + } + ], + "title":"Managing Custom Resources", + "githuburl":"" + }, { "uri":"cce_10_0463.html", "node_id":"cce_10_0463.xml", "product_code":"cce", - "code":"187", + "code":"191", "des":"The most significant difference is that each Kata container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualiz", "doc_type":"usermanual2", "kw":"Kata Runtime and Common Runtime,Workloads,User Guide", @@ -3692,9 +3448,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Kata Runtime and Common Runtime", @@ -3704,7 +3458,7 @@ "uri":"cce_10_0674.html", "node_id":"cce_10_0674.xml", "product_code":"cce", - "code":"188", + "code":"192", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Scheduling", @@ -3712,9 +3466,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Scheduling", @@ -3724,7 +3476,7 @@ "uri":"cce_10_0702.html", "node_id":"cce_10_0702.xml", "product_code":"cce", - "code":"189", + "code":"193", "des":"CCE supports different types of resource scheduling and task scheduling, improving application performance and overall cluster resource utilization. This section describe", "doc_type":"usermanual2", "kw":"Overview,Scheduling,User Guide", @@ -3732,9 +3484,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Overview", @@ -3744,7 +3494,7 @@ "uri":"cce_10_0551.html", "node_id":"cce_10_0551.xml", "product_code":"cce", - "code":"190", + "code":"194", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"CPU Scheduling", @@ -3752,9 +3502,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"CPU Scheduling", @@ -3764,7 +3512,7 @@ "uri":"cce_10_0351.html", "node_id":"cce_10_0351.xml", "product_code":"cce", - "code":"191", + "code":"195", "des":"By default, kubelet uses CFS quotas to enforce pod CPU limits. When the node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether t", "doc_type":"usermanual2", "kw":"CPU Policy,CPU Scheduling,User Guide", @@ -3772,9 +3520,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"CPU Policy", @@ -3784,7 +3530,7 @@ "uri":"cce_10_0552.html", "node_id":"cce_10_0552.xml", "product_code":"cce", - "code":"192", + "code":"196", "des":"Kubernetes provides two CPU policies: none and static.none: The CPU policy is disabled by default, indicating the existing scheduling behavior.static: The static CPU core", "doc_type":"usermanual2", "kw":"Enhanced CPU Policy,CPU Scheduling,User Guide", @@ -3792,9 +3538,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Enhanced CPU Policy", @@ -3804,7 +3548,7 @@ "uri":"cce_10_0720.html", "node_id":"cce_10_0720.xml", "product_code":"cce", - "code":"193", + "code":"197", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"GPU Scheduling", @@ -3812,9 +3556,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"GPU Scheduling", @@ -3824,7 +3566,7 @@ "uri":"cce_10_0345.html", "node_id":"cce_10_0345.xml", "product_code":"cce", - "code":"194", + "code":"198", "des":"You can use GPUs in CCE containers.A GPU node has been created. For details, see Creating a Node.The gpu-device-plugin (previously gpu-beta add-on) has been installed. Du", "doc_type":"usermanual2", "kw":"Default GPU Scheduling in Kubernetes,GPU Scheduling,User Guide", @@ -3832,9 +3574,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Default GPU Scheduling in Kubernetes", @@ -3844,7 +3584,7 @@ "uri":"cce_10_0423.html", "node_id":"cce_10_0423.xml", "product_code":"cce", - "code":"195", + "code":"199", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Volcano Scheduling", @@ -3852,9 +3592,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Volcano Scheduling", @@ -3864,17 +3602,15 @@ "uri":"cce_10_0721.html", "node_id":"cce_10_0721.xml", "product_code":"cce", - "code":"196", - "des":"Volcano Scheduler is a pod scheduling component, which consists of a series of actions and plugins. Actions should be executed in every step. Plugins provide the action a", + "code":"200", + "des":"Volcano is a Kubernetes-based batch processing platform that supports machine learning, deep learning, bioinformatics, genomics, and other big data applications. It provi", "doc_type":"usermanual2", "kw":"Overview,Volcano Scheduling,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Overview", @@ -3884,7 +3620,7 @@ "uri":"cce_10_0722.html", "node_id":"cce_10_0722.xml", "product_code":"cce", - "code":"197", + "code":"201", "des":"Volcano is a Kubernetes-based batch processing platform with high-performance general computing capabilities like task scheduling engine, heterogeneous chip management, a", "doc_type":"usermanual2", "kw":"Scheduling Workloads,Volcano Scheduling,User Guide", @@ -3892,9 +3628,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Scheduling Workloads", @@ -3904,77 +3638,69 @@ "uri":"cce_10_0768.html", "node_id":"cce_10_0768.xml", "product_code":"cce", - "code":"198", + "code":"202", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"Resource Utilization-based Scheduling", + "kw":"Resource Usage-based Scheduling", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"Resource Utilization-based Scheduling", + "title":"Resource Usage-based Scheduling", "githuburl":"" }, { "uri":"cce_10_0773.html", "node_id":"cce_10_0773.xml", "product_code":"cce", - "code":"199", - "des":"Binpack is a pod scheduling add-on that enables the scheduler to preferentially schedule pods to nodes with high resource allocation. This reduces resource fragments on e", + "code":"203", + "des":"Bin packing is an optimization algorithm that aims to properly allocate resources to each job and get the jobs done using the minimum amount of resources. After bin packi", "doc_type":"usermanual2", - "kw":"Binpack,Resource Utilization-based Scheduling,User Guide", + "kw":"Bin Packing,Resource Usage-based Scheduling,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"Binpack", + "title":"Bin Packing", "githuburl":"" }, { "uri":"cce_10_0766.html", "node_id":"cce_10_0766.xml", "product_code":"cce", - "code":"200", - "des":"Scheduling in a cluster is the process of binding pending pods to nodes, and is performed by a component called kube-scheduler or Volcano scheduler. The scheduler uses a ", + "code":"204", + "des":"Scheduling in a cluster is the process of binding pending pods to nodes, and is performed by a component called kube-scheduler or Volcano Scheduler. The scheduler uses a ", "doc_type":"usermanual2", - "kw":"Descheduler,Resource Utilization-based Scheduling,User Guide", + "kw":"Descheduling,Resource Usage-based Scheduling,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], - "title":"Descheduler", + "title":"Descheduling", "githuburl":"" }, { "uri":"cce_10_0767.html", "node_id":"cce_10_0767.xml", "product_code":"cce", - "code":"201", + "code":"205", "des":"In scenarios such as node pool replacement and rolling node upgrade, an old resource pool needs to be replaced with a new one. To prevent the node pool replacement from a", "doc_type":"usermanual2", - "kw":"Node Pool Affinity,Resource Utilization-based Scheduling,User Guide", + "kw":"Node Pool Affinity,Resource Usage-based Scheduling,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Node Pool Affinity", @@ -3984,7 +3710,7 @@ "uri":"cce_10_0774.html", "node_id":"cce_10_0774.xml", "product_code":"cce", - "code":"202", + "code":"206", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Priority-based Scheduling", @@ -3992,9 +3718,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Priority-based Scheduling", @@ -4004,7 +3728,7 @@ "uri":"cce_10_0775.html", "node_id":"cce_10_0775.xml", "product_code":"cce", - "code":"203", + "code":"207", "des":"A pod priority indicates the importance of a pod relative to other pods. Volcano supports pod PriorityClasses in Kubernetes. After PriorityClasses are configured, the sch", "doc_type":"usermanual2", "kw":"Priority-based Scheduling,Priority-based Scheduling,User Guide", @@ -4012,9 +3736,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Priority-based Scheduling", @@ -4024,7 +3746,7 @@ "uri":"cce_10_0776.html", "node_id":"cce_10_0776.xml", "product_code":"cce", - "code":"204", + "code":"208", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"AI Performance-based Scheduling", @@ -4032,9 +3754,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"AI Performance-based Scheduling", @@ -4044,7 +3764,7 @@ "uri":"cce_10_0777.html", "node_id":"cce_10_0777.xml", "product_code":"cce", - "code":"205", + "code":"209", "des":"Dominant Resource Fairness (DRF) is a scheduling algorithm based on the dominant resource of a container group. DRF scheduling can be used to enhance the service throughp", "doc_type":"usermanual2", "kw":"DRF,AI Performance-based Scheduling,User Guide", @@ -4052,9 +3772,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"DRF", @@ -4064,7 +3782,7 @@ "uri":"cce_10_0778.html", "node_id":"cce_10_0778.xml", "product_code":"cce", - "code":"206", + "code":"210", "des":"Gang scheduling is a scheduling algorithm that schedules correlated processes or threads to run simultaneously on different processors. It meets the scheduling requiremen", "doc_type":"usermanual2", "kw":"Gang,AI Performance-based Scheduling,User Guide", @@ -4072,9 +3790,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Gang", @@ -4084,17 +3800,15 @@ "uri":"cce_10_0425.html", "node_id":"cce_10_0425.xml", "product_code":"cce", - "code":"207", - "des":"When the node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether the pod is throttled and which CPU cores are available at schedu", + "code":"211", + "des":"When a node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether the pod is throttled and which CPU cores are available at scheduli", "doc_type":"usermanual2", "kw":"NUMA Affinity Scheduling,Volcano Scheduling,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"NUMA Affinity Scheduling", @@ -4104,7 +3818,7 @@ "uri":"cce_10_0709.html", "node_id":"cce_10_0709.xml", "product_code":"cce", - "code":"208", + "code":"212", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Cloud Native Hybrid Deployment", @@ -4112,9 +3826,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Cloud Native Hybrid Deployment", @@ -4124,7 +3836,7 @@ "uri":"cce_10_0384.html", "node_id":"cce_10_0384.xml", "product_code":"cce", - "code":"209", + "code":"213", "des":"Many services see surges in traffic. To ensure performance and stability, resources are often requested at the maximum needed. However, the surges may ebb very shortly an", "doc_type":"usermanual2", "kw":"Dynamic Resource Oversubscription,Cloud Native Hybrid Deployment,User Guide", @@ -4132,9 +3844,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Dynamic Resource Oversubscription", @@ -4144,7 +3854,7 @@ "uri":"cce_10_0020.html", "node_id":"cce_10_0020.xml", "product_code":"cce", - "code":"210", + "code":"214", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Network", @@ -4152,9 +3862,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Network", @@ -4164,7 +3872,7 @@ "uri":"cce_10_0010.html", "node_id":"cce_10_0010.xml", "product_code":"cce", - "code":"211", + "code":"215", "des":"You can learn about a cluster network from the following two aspects:What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are ru", "doc_type":"usermanual2", "kw":"Overview,Network,User Guide", @@ -4172,9 +3880,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Overview", @@ -4184,7 +3890,7 @@ "uri":"cce_10_0280.html", "node_id":"cce_10_0280.xml", "product_code":"cce", - "code":"212", + "code":"216", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Container Network Models", @@ -4192,9 +3898,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Container Network Models", @@ -4204,7 +3908,7 @@ "uri":"cce_10_0281.html", "node_id":"cce_10_0281.xml", "product_code":"cce", - "code":"213", + "code":"217", "des":"The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:Tun", "doc_type":"usermanual2", "kw":"Overview,Container Network Models,User Guide", @@ -4212,9 +3916,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Overview", @@ -4224,7 +3926,7 @@ "uri":"cce_10_0282.html", "node_id":"cce_10_0282.xml", "product_code":"cce", - "code":"214", + "code":"218", "des":"The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet pac", "doc_type":"usermanual2", "kw":"Container Tunnel Network,Container Network Models,User Guide", @@ -4232,9 +3934,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Container Tunnel Network", @@ -4244,7 +3944,7 @@ "uri":"cce_10_0283.html", "node_id":"cce_10_0283.xml", "product_code":"cce", - "code":"215", + "code":"219", "des":"The VPC network uses VPC routing to integrate with the underlying network. This network model is suitable for performance-intensive scenarios. The maximum number of nodes", "doc_type":"usermanual2", "kw":"VPC Network,Container Network Models,User Guide", @@ -4252,9 +3952,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"VPC Network", @@ -4264,7 +3962,7 @@ "uri":"cce_10_0284.html", "node_id":"cce_10_0284.xml", "product_code":"cce", - "code":"216", + "code":"220", "des":"Developed by CCE, Cloud Native 2.0 network deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are all", "doc_type":"usermanual2", "kw":"Cloud Native 2.0 Network,Container Network Models,User Guide", @@ -4272,9 +3970,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Cloud Native 2.0 Network", @@ -4284,7 +3980,7 @@ "uri":"cce_10_0247.html", "node_id":"cce_10_0247.xml", "product_code":"cce", - "code":"217", + "code":"221", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Service", @@ -4292,9 +3988,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Service", @@ -4304,7 +3998,7 @@ "uri":"cce_10_0249.html", "node_id":"cce_10_0249.xml", "product_code":"cce", - "code":"218", + "code":"222", "des":"After a pod is created, the following problems may occur if you directly access the pod:The pod can be deleted and recreated at any time by a controller such as a Deploym", "doc_type":"usermanual2", "kw":"Overview,Service,User Guide", @@ -4312,9 +4006,7 @@ "metedata":[ { "prodname":"cce", - "IsBot":"Yes", - "opensource":"true", - "documenttype":"usermanual" + "documenttype":"usermanual2" } ], "title":"Overview", @@ -4324,7 +4016,7 @@ "uri":"cce_10_0011.html", "node_id":"cce_10_0011.xml", "product_code":"cce", - "code":"219", + "code":"223", "des":"ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.The cluster-internal domain name format is -

2024-03-29

+

2024-05-30

- + + + +

2024-04-28

+ + + + +

2024-03-29

+ +

2024-01-29

@@ -30,7 +40,7 @@

2023-05-30

- +

2023-02-10

@@ -40,7 +50,7 @@

2022-12-20

- +

2022-11-21

@@ -50,7 +60,7 @@

2022-08-27

-

EulerOS 2.9 is supported. For details, see OS Patch Notes for Cluster Nodes.

+

EulerOS 2.9 is supported.

2022-07-13

@@ -70,7 +80,7 @@

2022-04-14

-

Allowed cluster upgrade from v1.19 to v1.21. For details, see Performing In-place Upgrade.

+

Allowed cluster upgrade from v1.19 to v1.21.

2022-03-24

diff --git a/docs/cce/umn/cce_10_0002.html b/docs/cce/umn/cce_10_0002.html index bd79eafb..5da4fd1c 100644 --- a/docs/cce/umn/cce_10_0002.html +++ b/docs/cce/umn/cce_10_0002.html @@ -6,9 +6,9 @@ diff --git a/docs/cce/umn/cce_10_0003.html b/docs/cce/umn/cce_10_0003.html index 3b3d3c65..a39d3d97 100644 --- a/docs/cce/umn/cce_10_0003.html +++ b/docs/cce/umn/cce_10_0003.html @@ -6,9 +6,9 @@

Constraints

-

Precautions

+

Precautions

  • Only worker nodes can be reset. If the node is still unavailable after the resetting, delete the node and create a new one.
  • After a node is reset, the node OS will be reinstalled. Before resetting a node, drain the node to gracefully evict the pods running on the node to other available nodes. Perform this operation during off-peak hours.
  • After a node is reset, its system disk and data disks will be cleared. Back up important data before resetting a node.
  • After a worker node with an extra data disk attached is reset, the attachment will be cleared. In this case, attach the disk again and data will be retained.
  • The IP addresses of the workload pods on the node will change, but the container network access is not affected.
  • There is remaining EVS disk quota.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
  • Resetting a node will clear the Kubernetes labels and taints you added (those added by editing a node pool will not be lost). As a result, node-specific resources (such as local storage and workloads scheduled to this node) may be unavailable.
  • Resetting a node will cause PVC/PV data loss for the local PV associated with the node. These PVCs and PVs cannot be restored or used again. In this scenario, the pod that uses the local PV is evicted from the reset node. A new pod is created and stays in the pending state. This is because the PVC used by the pod has a node label, due to which the pod cannot be scheduled. After the node is reset, the pod may be scheduled to the reset node. In this case, the pod remains in the creating state because the underlying logical volume corresponding to the PVC does not exist.
-

Procedure

The new console allows you to reset nodes in batches. You can also use a private image to reset nodes in batches.

+

Procedure

You can batch reset nodes using private images.

  1. Log in to the CCE console and click the cluster name to access the cluster console.
  2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
  3. In the node list, select one or more nodes to be reset and choose More > Reset Node in the Operation column.
  4. In the displayed dialog box, click Next.

    • For nodes in the DefaultPool node pool, the parameter setting page is displayed. Set the parameters by referring to 5.
    • For a node you create in a node pool, resetting the node does not support parameter configuration. You can directly use the configuration image of the node pool to reset the node.

  5. Specify node parameters.

    Compute Settings
    - @@ -63,7 +62,7 @@ @@ -78,22 +77,22 @@ - - - - + + + - - - - diff --git a/docs/cce/umn/cce_10_0004.html b/docs/cce/umn/cce_10_0004.html index 4412480d..4a2ed4e0 100644 --- a/docs/cce/umn/cce_10_0004.html +++ b/docs/cce/umn/cce_10_0004.html @@ -2,8 +2,7 @@

    Managing Node Labels

    Node Label Usage Scenario

    Node labels are mainly used in the following scenarios:

    -
    • Node management: Node labels are used to classify nodes.
    • Affinity and anti-affinity between a workload and node:
      • Different workloads have different resource requirements such as CPU, memory, and I/O. If a workload consumes too many resources in a cluster, other workloads in the same cluster may fail to run properly. In this case, you are advised to add different labels to nodes. When deploying a workload, you can select nodes with specified labels for affinity deployment to ensure the normal operation of the system. Otherwise, node anti-affinity deployment can be used.
      • A system can be divided into multiple modules. Each module consists of multiple microservices. To ensure efficient O&M, you can add a module label to each node so that each module can be deployed on the corresponding node. In this way, modules do not interfere with each other and microservices can be easily maintained on their nodes.
      -
    +
    • Node management: Node labels are used to classify nodes.
    • Node affinity or anti-affinity for workloads: By adding labels to nodes, you can schedule pods to specific nodes through node affinity or prevent pods from being scheduled to specific nodes through node anti-affinity. For details, see Scheduling Policies (Affinity/Anti-affinity).

    Inherent Label of a Node

    After a node is created, some fixed labels exist and cannot be deleted. For details about these labels, see Table 1.

    Do not manually change the inherent labels that are automatically added to a node. If the manually changed value conflicts with the system value, the system value is used.

    diff --git a/docs/cce/umn/cce_10_0007.html b/docs/cce/umn/cce_10_0007.html index c6dba1f1..22595b6d 100644 --- a/docs/cce/umn/cce_10_0007.html +++ b/docs/cce/umn/cce_10_0007.html @@ -77,7 +77,7 @@

    Before viewing logs, ensure that the time of the browser is the same as that on the backend server.

    1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
    2. Click the Deployments tab and click the View Log of the target workload.

      In the displayed View Log window, you can view logs.

      -

      The displayed logs are standard output logs of containers and do not have persistence and advanced O&M capabilities. To use more comprehensive log capabilities, see Logs. If the function of collecting standard output is enabled for the workload (enabled by default), you can go to AOM to view more workload logs. For details, see Connecting CCE to AOM.

      +

      The displayed logs are standard output logs of containers and do not have persistence and advanced O&M capabilities. To use more comprehensive log capabilities, see Logs. If the function of collecting standard output is enabled for the workload (enabled by default), you can go to AOM to view more workload logs. For details, see Collecting Container Logs Using ICAgent.

    diff --git a/docs/cce/umn/cce_10_0010.html b/docs/cce/umn/cce_10_0010.html index d8a3c6e3..dbbe0ae1 100644 --- a/docs/cce/umn/cce_10_0010.html +++ b/docs/cce/umn/cce_10_0010.html @@ -4,11 +4,11 @@

    You can learn about a cluster network from the following two aspects:

    • What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are running on the nodes. Nodes and containers need to communicate with each other. For details about the cluster network types and their functions, see Cluster Network Structure.
    • How is pod access implemented in a cluster? Accessing a pod or container is a process of accessing services of a user. Kubernetes provides Service and Ingress to address pod access issues. This section summarizes common network access scenarios. You can select the proper scenario based on site requirements. For details about the network access scenarios, see Access Scenarios.

    Cluster Network Structure

    All nodes in the cluster are located in a VPC and use the VPC network. The container network is managed by dedicated network add-ons.

    -

    +

    • Node Network

      A node network assigns IP addresses to hosts (nodes in the figure above) in a cluster. Select a VPC subnet as the node network of the CCE cluster. The number of available IP addresses in a subnet determines the maximum number of nodes (including master nodes and worker nodes) that can be created in a cluster. This quantity is also affected by the container network. For details, see the container network model.

    • Container Network

      A container network assigns IP addresses to containers in a cluster. CCE inherits the IP-Per-Pod-Per-Network network model of Kubernetes. That is, each pod has an independent IP address on a network plane and all containers in a pod share the same network namespace. All pods in a cluster exist in a directly connected flat network. They can access each other through their IP addresses without using NAT. Kubernetes only provides a network mechanism for pods, but does not directly configure pod networks. The configuration of pod networks is implemented by specific container network add-ons. The container network add-ons are responsible for configuring networks for pods and managing container IP addresses.

      Currently, CCE supports the following container network models:

      -
      • Container tunnel network: The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch.
      • VPC network: The VPC network uses VPC routing to integrate with the underlying network. This network model applies to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network. Each node is assigned a CIDR block of a fixed size. This networking model is free from tunnel encapsulation overhead and outperforms the container tunnel network model. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in a cluster can be directly accessed from outside the cluster.
      • Developed by CCE, Cloud Native 2.0 network deeply integrates Elastic Network Interfaces (ENIs) and Sub Network Interfaces (sub-ENIs) of VPC. Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and elastic IPs (EIPs) are bound to deliver high performance.
      +
      • Container tunnel network: The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch.
      • VPC network: The VPC network uses VPC routing to integrate with the underlying network. This network model applies to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network. Each node is assigned a CIDR block of a fixed size. This networking model is free from tunnel encapsulation overhead and outperforms the container tunnel network model. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in a cluster can be directly accessed from outside the cluster.
      • Developed by CCE, Cloud Native 2.0 network deeply integrates Elastic Network Interfaces (ENIs) and Sub Network Interfaces (sub-ENIs) of VPC. Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and EIPs are bound to deliver high performance.

      The performance, networking scale, and application scenarios of a container network vary according to the container network model. For details about the functions and features of different container network models, see Overview.

    • Service Network

      Service is also a Kubernetes object. Each Service has a static IP address. When creating a cluster on CCE, you can specify the Service CIDR block. The Service CIDR block cannot overlap with the node or container CIDR block. The Service CIDR block can be used only within a cluster.

    @@ -25,9 +25,9 @@

    Access Scenarios

    Workload access scenarios can be categorized as follows:

    • Intra-cluster access: A ClusterIP Service is used for workloads in the same cluster to access each other.
    • Access from outside a cluster: A Service (NodePort or LoadBalancer type) or an ingress is recommended for a workload outside a cluster to access workloads in the cluster.
      • Access through the public network: An EIP should be bound to the node or load balancer.
      • Access through the private network: The workload can be accessed through the internal IP address of the node or load balancer. If workloads are located in different VPCs, a peering connection is required to enable communication between different VPCs.
      -
    • The workload can access the external network as follows:
      • Accessing an intranet: The workload accesses the intranet address, but the implementation method varies depending on container network models. Ensure that the peer security group allows the access requests from the container CIDR block.
      • Accessing a public network: Assign an EIP to the node where the workload runs (when the VPC network or tunnel network model is used), bind an EIP to the pod IP address (when the Cloud Native Network 2.0 model is used), or configure SNAT rules through the NAT gateway. For details, see Accessing Public Networks from a Container.
      +
    • The workload can access the external network as follows:
      • Accessing an intranet: The workload accesses the intranet address, but the implementation method varies depending on container network models. Ensure that the peer security group allows the access requests from the container CIDR block.
      • Accessing a public network: Assign an EIP to the node where the workload runs (when the VPC network or tunnel network model is used), bind an EIP to the pod IP address (when the Cloud Native Network 2.0 model is used), or configure SNAT rules through the NAT gateway. For details, see Accessing the Internet from a Container.
    -
    Figure 3 Network access diagram
    +
    Figure 3 Network access diagram
    diff --git a/docs/cce/umn/cce_10_0011.html b/docs/cce/umn/cce_10_0011.html index cfdf722f..54cfdbfd 100644 --- a/docs/cce/umn/cce_10_0011.html +++ b/docs/cce/umn/cce_10_0011.html @@ -4,9 +4,9 @@

    Scenario

    ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.

    The cluster-internal domain name format is <Service name>.<Namespace of the workload>.svc.cluster.local:<Port>, for example, nginx.default.svc.cluster.local:80.

    Figure 1 shows the mapping relationships between access channels, container ports, and access ports.

    -
    Figure 1 Intra-cluster access (ClusterIP)
    +
    Figure 1 Intra-cluster access (ClusterIP)
    -

    Creating a ClusterIP Service

    1. Log in to the CCE console and click the cluster name to access the cluster console.
    2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
    3. Set intra-cluster access parameters.

      • Service Name: Service name, which can be the same as the workload name.
      • Service Type: Select ClusterIP.
      • Namespace: Namespace to which the workload belongs.
      • Selector: Add a label and click Confirm. A Service selects a pod based on the added label. You can also click Reference Workload Label to reference the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
      • Port Settings
        • Protocol: protocol used by the Service.
        • Service Port: port used by the Service. The port number ranges from 1 to 65535.
        • Container Port: port on which the workload listens. For example, Nginx uses port 80 by default.
        +

        Creating a ClusterIP Service

        1. Log in to the CCE console and click the cluster name to access the cluster console.
        2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
        3. Configure intra-cluster access parameters.

          • Service Name: Specify a Service name, which can be the same as the workload name.
          • Service Type: Select ClusterIP.
          • Namespace: Namespace to which the workload belongs.
          • Selector: Add a label and click Confirm. A Service selects a pod based on the added label. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
          • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
          • Port Settings
            • Protocol: protocol used by the Service.
            • Service Port: port used by the Service. The port number ranges from 1 to 65535.
            • Container Port: port on which the workload listens. For example, Nginx uses port 80 by default.

        4. Click OK.
        diff --git a/docs/cce/umn/cce_10_0012.html b/docs/cce/umn/cce_10_0012.html index c733270a..59f8bd58 100644 --- a/docs/cce/umn/cce_10_0012.html +++ b/docs/cce/umn/cce_10_0012.html @@ -18,7 +18,7 @@
    - @@ -26,9 +26,9 @@
    Table 1 Configuration parameters

    Parameter

    @@ -24,14 +24,13 @@

    Container Engine

    CCE clusters support Docker and containerd in some scenarios.
    • VPC network clusters of v1.23 and later versions support containerd. Tunnel network clusters of v1.23.2-r0 and later versions support containerd.
    • For a CCE Turbo cluster, both Docker and containerd are supported. For details, see Mapping between Node OSs and Container Engines.
    -
    +

    The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

    OS

    Select an OS type. Different types of nodes support different OSs.
    • Public image: Select a public image for the node.
    • Private image: Select a private image for the node.
    -
    NOTE:
    • Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.
    +
    NOTE:

    Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.

    Data Disk

    At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

    -

    Click Expand and select Allocate Disk Space to define the disk space occupied by the container runtime to store the working directories, container image data, and image metadata. For details about how to allocate data disk space, see Data Disk Space Allocation.

    +

    Click Expand to configure Data Disk Space Allocation, which is used to allocate space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.

    For other data disks, a raw disk is created without any processing by default. You can also click Expand and select Mount Disk to mount the data disk to a specified directory.

    Kubernetes Label

    -

    Click Add to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

    -

    Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

    -

    Resource Tag

    +

    Resource Tag

    You can add resource tags to classify resources.

    -

    You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

    +

    You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

    CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

    Kubernetes Label

    +

    Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

    +

    Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

    +

    Taint

    This parameter is left blank by default. You can add taints to configure anti-affinity for the node. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
    • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
    • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
    • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
    +
    This parameter is left blank by default. You can add taints to configure node anti-affinity. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
    • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
    • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
    • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
    NOTICE:
    • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.
    • After a node pool is created, you can click Edit to modify its configuration. The modification will be synchronized to all nodes in the node pool.
    @@ -101,19 +100,19 @@

    Max. Pods

    Maximum number of pods that can run on the node, including the default system pods.

    +

    Maximum number of pods that can run on the node, including the default system pods.

    This limit prevents the node from being overloaded with pods.

    Pre-installation Command

    Enter commands. A maximum of 1000 characters are allowed.

    +

    Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

    The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

    Post-installation Command

    Enter commands. A maximum of 1000 characters are allowed.

    +

    Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

    The script will be executed after Kubernetes software is installed, which does not affect the installation.

    Name of a node pool. By default, the name is in the format of Cluster name-nodepool-Random number. If you do not want to use the default name format, you can customize the name.

    Nodes

    +

    Expected Initial Nodes

    Number of nodes to be created in this node pool. A maximum of 50 nodes that can be created at a time.

    -

    Compute Settings

    +

    Configurations

    You can configure the flavor and OS of a cloud server, on which your containerized applications run. -
    Table 2 Compute parameters

    Parameter

    +
    @@ -37,33 +37,33 @@ - - - - - + + + @@ -80,7 +80,7 @@

    Storage Settings

    -
    Configure storage resources on a node for the containers running on it. Set the disk size according to site requirements. +
    Configure storage resources on a node for the containers running on it. Select a disk type and configure its size based on service requirements.
    Table 2 Node configuration parameters

    Parameter

    Description

    AZ

    AZ where the node is located. Nodes in a cluster can be created in different AZs for higher reliability. The value cannot be changed after the node is created.

    -

    You are advised to select Random to deploy your node in a random AZ based on the selected node flavor.

    +

    Select Random to deploy your node in a random AZ based on the selected node flavor.

    An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network. To enhance workload availability, create nodes in different AZs.

    Node Type

    CCE standard cluster:
    • ECS (VM): Containers run on ECSs.
    +

    Select a node type based on service requirements. Then, you can select a proper flavor from the node flavor list.

    +
    CCE standard clusters support the following node types:
    • ECS (VM): A virtualized ECS is used as a cluster node.
    -
    CCE Turbo cluster:
    • ECS (VM): Containers run on ECSs. Only the ECSs that can be bound with multiple NICs are supported.
    -
    -

    Container Engine

    -
    CCE clusters support Docker and containerd in some scenarios.
    • VPC network clusters of v1.23 and later versions support containerd. Tunnel network clusters of v1.23.2-r0 and later versions support containerd.
    • For a CCE Turbo cluster, both Docker and containerd are supported. For details, see Mapping between Node OSs and Container Engines.
    +
    CCE Turbo clusters support the following node types:
    • ECS (VM): A virtualized ECS is used as a cluster node. A CCE Turbo cluster supports only the cloud servers that allow multiple ENIs. Select a server type displayed on the CCE console.

    Specifications

    Select a node flavor based on service requirements. The available node flavors vary depending on regions or AZs. For details, see the CCE console.

    +

    Select a node flavor based on service requirements. The available node flavors vary depending on regions or AZs. For details, see the CCE console.

    +

    Container Engine

    +

    The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

    OS

    Select an OS type. Different types of nodes support different OSs.
    • Public image: Select a public image for the node.
    • Private image: Select a private image for the node.
    -
    NOTE:
    • Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.
    +
    NOTE:

    Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.

    - - - + + + @@ -141,6 +146,8 @@ @@ -155,25 +162,25 @@ - - - - + + + - @@ -193,13 +200,13 @@ - - diff --git a/docs/cce/umn/cce_10_0014.html b/docs/cce/umn/cce_10_0014.html index 7b5f47fe..4604a94b 100644 --- a/docs/cce/umn/cce_10_0014.html +++ b/docs/cce/umn/cce_10_0014.html @@ -6,16 +6,16 @@
    Table 3 Configuration parameters

    Parameter

    Description

    @@ -90,24 +90,23 @@

    System Disk

    System disk used by the node OS. The value ranges from 40 GiB to 1024 GiB. The default value is 50 GiB.

    -
    Encryption: System disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. This function is available only in certain regions.
    • Encryption is not selected by default.
    • After selecting Encryption, you can select an existing key in the displayed dialog box. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the Encryption text box.
    +
    Encryption: System disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. This function is available only in certain regions.
    • Encryption is not selected by default.
    • After setting System Disk Encryption to Encryption, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the Encryption text box.

    Data Disk

    At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

    -
    • First data disk: used for container runtime and kubelet components. The value ranges from 20 GiB to 32,768 GiB. The default value is 100 GiB.
    • Other data disks: You can set the data disk size to a value ranging from 10 GiB to 32,768 GiB. The default value is 100 GiB.
    -
    NOTE:

    If the node flavor is disk-intensive or ultra-high I/O, one data disk can be a local disk.

    -

    Local disks may break down and do not ensure data reliability. Store your service data in EVS disks, which are more reliable than local disks.

    +
    • First data disk: used for container runtime and kubelet components. The value ranges from 20 GiB to 32768 GiB. The default value is 100 GiB.
    • Other data disks: You can set the data disk size to a value ranging from 10 GiB to 32768 GiB. The default value is 100 GiB.
    +
    NOTE:
    • If the node flavor is disk-intensive or ultra-high I/O, one data disk can be a local disk.
    • Local disks may break down and do not ensure data reliability. Store your service data in EVS disks, which are more reliable than local disks.

    Advanced Settings

    -

    Click Expand to configure the following parameters:

    -
    • Data Disk Space Allocation: After selecting Set Container Engine Space, you can specify the proportion of the space for the container engine, image, and temporary storage on the data disk. The container engine space is used to store the working directory, container image data, and image metadata for the container runtime. The remaining space of the data disk is used for pod configuration files, keys, and EmptyDir. For details about how to allocate data disk space, see Data Disk Space Allocation.
    • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. This function is available only in certain regions.
      • Encryption is not selected by default.
      • After selecting Encryption, you can select an existing key in the displayed dialog box. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the Encryption text box.
      +

      Click Expand and configure the following parameters:

      +
      • Data Disk Space Allocation: allocates space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.
      • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. This function is available only in certain regions.
        • Encryption is not selected by default.
        • After selecting Encryption, you can select an existing key in the displayed dialog box. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the Encryption text box.
      -

      Adding Multiple Data Disks

      +

      Adding data disks

      A maximum of four data disks can be added. By default, raw disks are created without any processing. You can also click Expand and select any of the following options:

      -
      • Default: By default, a raw disk is created without any processing.
      • Mount Disk: The data disk is attached to a specified directory.
      • Use as PV: applicable to scenarios in which there is a high performance requirement on PVs. The node.kubernetes.io/local-storage-persistent label is added to the node with PV configured. The value is linear or striped.
      • Use as ephemeral volume: applicable to scenarios in which there is a high performance requirement on EmptyDir.
      +
      • Default: By default, a raw disk is created without any processing.
      • Mount Disk: The data disk is attached to a specified directory.
      • Use as PV: applicable when there is a high performance requirement on PVs. The node.kubernetes.io/local-storage-persistent label is added to the node with PV configured. The value is linear or striped.
      • Use as ephemeral volume: applicable when there is a high performance requirement on EmptyDir.
      NOTE:
      • Local PVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 2.1.23 or later. Version 2.1.23 or later is recommended.
      • Local EVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 1.2.29 or later.
      Local Persistent Volumes and Local EVs support the following write modes:
      • Linear: A linear logical volume integrates one or more physical volumes. Data is written to the next physical volume when the previous one is used up.
      • Striped: A striped logical volume stripes data into blocks of the same size and stores them in multiple physical volumes in sequence, allowing data to be concurrently read and written. A storage pool consisting of striped volumes cannot be scaled-out. This option can be selected only when multiple volumes exist.
      @@ -126,12 +125,18 @@

    Node Subnet

    +

    Virtual Private Cloud

    The node subnet selected during cluster creation is used by default. You can choose another subnet instead.

    +

    The VPC to which the cluster belongs by default, which cannot be changed.

    Node IP

    +

    Node Subnet

    +

    The node subnet selected during cluster creation is used by default. You can choose another subnet instead.

    +
    • Multiple subnets: You can select multiple subnets in the same VPC for your node pool. Newly added nodes for a scale-out will preferentially consume the IP addresses of the subnets in the top order.
    • Single subnet: Only one subnet is configured for your node pool. If the IP addresses of a single subnet are insufficient, configure multiple subnets. Otherwise, a node pool scale-out may fail.
    +

    Node IP Address

    Random allocation is supported.

    Security group used by the nodes created in the node pool. A maximum of five security groups can be selected.

    When a cluster is created, a node security group named {Cluster name}-cce-node-{Random ID} is created and used by default.

    Traffic needs to pass through certain ports in the node security group to ensure node communications. Ensure that you have enabled these ports if you select another security group.

    +
    NOTE:

    After a node pool is created, its associated security group cannot be modified.

    +

    Kubernetes Label

    -

    A Kubernetes label is a key-value pair added to a Kubernetes object (such as a pod). After specifying a label, click Add. A maximum of 20 labels can be added.

    -

    Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

    -

    Resource Tag

    +

    Resource Tag

    You can add resource tags to classify resources.

    -

    You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

    +

    You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

    CCE will automatically create the "CCE-Dynamic-Provisioning-Node=Node ID" tag.

    Kubernetes Label

    +

    A Kubernetes label is a key-value pair added to a Kubernetes object (such as a pod). After specifying a label, click Add. A maximum of 20 labels can be added.

    +

    Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

    +

    Taint

    This parameter is left blank by default. You can add taints to configure node anti-affinity. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
    • Key: A key must contain 1 to 63 characters starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
    • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
    • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
    +
    This parameter is left blank by default. You can add taints to configure anti-affinity for the node. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
    • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
    • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
    • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.

    For details, see Managing Node Taints.

    -
    NOTE:

    For a cluster of v1.19 or earlier, the workload may have been scheduled to a node before the taint is added. To avoid such a situation, select a cluster of a version later than v1.19.

    +
    NOTE:

    For a cluster of v1.19 or earlier, the workload may have been scheduled to a node before the taint is added. To avoid such a situation, select a cluster of v1.19 or later.

    Pre-installation Command

    Enter commands. A maximum of 1000 characters are allowed.

    +

    Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

    The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

    Post-installation Command

    Enter commands. A maximum of 1000 characters are allowed.

    +

    Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

    The script will be executed after Kubernetes software is installed, which does not affect the installation.

    NOTE:

    Do not run the reboot command in the post-installation script to restart the system immediately. To restart the system, run the shutdown -r 1 command to restart with a delay of one minute.

    @@ -208,7 +215,7 @@

    Agency

    An agency is created by the account administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources.

    -

    If no agency is available, click Create Agency to create one.

    +

    If no agency is available, click Create Agency on the right to create one.

    - @@ -98,7 +98,7 @@ spec: name: vol-log imagePullSecrets: - name: default-secret -

    The following shows how to use a hostPath volume. Compared with emptyDir, the type of volumes is changed to hostPath, and the path on the host needs to be configured for this hostPath volume. In the following example, /tmp/log on the host is mounted to /var/log/nginx. In this way, the ICAgent can collects logs in /var/log/nginx, without deleting the logs from /tmp/log.

    +

    The following shows how to use a hostPath volume. Compared with emptyDir, the type of volumes is changed to hostPath, and the path on the host needs to be configured for this hostPath volume. In the following example, /tmp/log on the host is mounted to /var/log/nginx. In this way, the ICAgent can collects logs in /var/log/nginx, without deleting the logs from /tmp/log.

    apiVersion: apps/v1
     kind: Deployment
     metadata:
    @@ -155,8 +155,8 @@ spec:
     
    @@ -174,7 +174,7 @@ spec:
    Table 1 Configuring log policies

    Parameter

    @@ -25,7 +25,7 @@

    Mount Path

    Container path (for example, /tmp) to which the storage resources will be mounted.
    NOTICE:
    • Do not mount a volume to a system directory such as / or /var/run. Otherwise, an exception occurs. Mount the volume to an empty directory. If the directory is not empty, ensure that there are no files that affect container startup. Otherwise, the files will be replaced, which leads to a container startup failure or workload creation failure.
    • If a volume is mounted to a high-risk directory, use an account with minimum permissions to start the container. Otherwise, high-risk files on the host machine may be damaged.
    • AOM collects only the first 20 logs that have been modified recently. It collects logs from 2 levels of subdirectories by default.
    • AOM only collects .log, .trace, and .out text logs in mounting paths.
    • For details about how to set permissions for mount points in a container, see Configure a Security Context for a Pod or Container.
    +
    Container path (for example, /tmp) to which the storage resources will be mounted.
    NOTICE:
    • Do not mount storage to a system directory such as / or /var/run; this action may cause a container error to occur. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
    • If the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host may be damaged.
    • AOM collects only the first 20 logs that have been modified recently. It collects logs from 2 levels of subdirectories by default.
    • AOM only collects .log, .trace, and .out text logs in mounting paths.
    • For details about how to set permissions for mount points in a container, see Configure a Security Context for a Pod or Container.

    Extended host path

    Extended host paths contain pod IDs or container names to distinguish different containers into which the host path is mounted.

    -

    A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

    -
    • None: No extended path is configured.
    • PodUID: ID of a pod.
    • PodName: name of a pod.
    • PodUID/ContainerName: ID of a pod or name of a container.
    • PodName/ContainerName: name of a pod or container.
    +

    A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

    +
    • None: No extended path is configured.
    • PodUID: ID of a pod.
    • PodName: name of a pod.
    • PodUID/ContainerName: ID of a pod or name of a container.
    • PodName/ContainerName: name of a pod or container.

    policy.logs.rotate

    @@ -164,7 +164,7 @@ spec:

    Log dump

    Log dump refers to rotating log files on a local host.

    -
    • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
    • Disabled: AOM does not dump log files.
    +
    • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
    • Disabled: AOM does not dump log files.
    NOTE:
    • AOM rotates log files using copytruncate. Before enabling log dumping, ensure that log files are written in the append mode. Otherwise, file holes may occur.
    • Currently, mainstream log components such as Log4j and Logback support log file rotation. If you have already set rotation for log files, skip the configuration. Otherwise, conflicts may occur.
    • You are advised to configure log file rotation for your own services to flexibly control the size and number of rolled files.

    Collection path

    A collection path narrows down the scope of collection to specified logs.

    -
    • If no collection path is specified, log files in .log, .trace, and .out formats will be collected from the specified path.
    • /Path/**/ indicates that all log files in .log, .trace, and .out formats will be recursively collected from the specified path and all subdirectories at 5 levels deep.
    • * in log file names indicates a fuzzy match.
    +
    • If no collection path is specified, log files in .log, .trace, and .out formats will be collected from the specified path.
    • /Path/**/ indicates that all log files in .log, .trace, and .out formats will be recursively collected from the specified path and all subdirectories at 5 levels deep.
    • * in log file names indicates a fuzzy match.

    Example: The collection path /tmp/**/test*.log indicates that all .log files prefixed with test will be collected from /tmp and subdirectories at 5 levels deep.

    CAUTION:

    Ensure that ICAgent is of v5.12.22 or later.

    @@ -217,7 +217,7 @@ kubectl logs -f <pod_name> -n namespace (real-time query in tail -f mode)< diff --git a/docs/cce/umn/cce_10_0020.html b/docs/cce/umn/cce_10_0020.html index 1ceefb94..cfb96256 100644 --- a/docs/cce/umn/cce_10_0020.html +++ b/docs/cce/umn/cce_10_0020.html @@ -20,7 +20,7 @@ - diff --git a/docs/cce/umn/cce_10_0026.html b/docs/cce/umn/cce_10_0026.html index fb1c412f..25c74537 100644 --- a/docs/cce/umn/cce_10_0026.html +++ b/docs/cce/umn/cce_10_0026.html @@ -14,7 +14,7 @@

  6. Click View Trace in the Operation column. The trace details are displayed.

    -
  7. For details about key fields in the trace structure, see section "Trace References" > "Trace Structure" and section "Trace References" > "Example Traces".
  8. +
  9. For details about key fields in the trace structure, see section "Trace References" > "Trace Structure" and section "Trace References" > "Example Traces" in the CTS User Guide.
  10. diff --git a/docs/cce/umn/cce_10_0028.html b/docs/cce/umn/cce_10_0028.html index 7f608e38..2a60f7d5 100644 --- a/docs/cce/umn/cce_10_0028.html +++ b/docs/cce/umn/cce_10_0028.html @@ -1,8 +1,8 @@ -

    Creating a CCE Cluster

    +

    Creating a CCE Standard/Turbo Cluster

    On the CCE console, you can easily create Kubernetes clusters. After a cluster is created, the master node is hosted by CCE. You only need to create worker nodes. In this way, you can implement cost-effective O&M and efficient service deployment.

    -

    Constraints

    • During the node creation, software packages are downloaded from OBS using the domain name. A private DNS server must be used to resolve the OBS domain name. Therefore, the DNS server address of the subnet where the node resides must be set to the private DNS server address so that the node can access the private DNS server. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
    • You can create a maximum of 50 clusters in a single region.
    • After a cluster is created, the following items cannot be changed:
      • Cluster type
      • Number of master nodes in the cluster
      • AZ of a master node
      • Network configurations of the cluster, such as the VPC, subnet, container CIDR block, Service CIDR block, and kube-proxy settings.
      • Network model. For example, change Tunnel network to VPC network.
      +

      Constraints

      • During the node creation, software packages are downloaded from OBS using the domain name. A private DNS server must be used to resolve the OBS domain name. Therefore, the DNS server address of the subnet where the node resides must be set to the private DNS server address so that the node can access the private DNS server. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
      • You can create a maximum of 50 clusters in a single region.
      • After a cluster is created, the following items cannot be changed:
        • Cluster type
        • Number of master nodes in the cluster
        • AZ of a master node
        • Network configurations of the cluster, such as the VPC, subnet, Service CIDR block, IPv6 settings, and kube-proxy settings.
        • Network model. For example, change Tunnel network to VPC network.

      Step 1: Log In to the CCE Console

      1. Log in to the CCE console.
      2. On the Clusters page, click Create Cluster in the upper right corner.
      @@ -20,7 +20,7 @@

    Select CCE Standard Cluster or CCE Turbo Cluster as required.

    • CCE standard clusters provide highly reliable and secure containers for commercial use.
    • CCE Turbo clusters use the high-performance cloud native network. Such clusters provide cloud native hybrid scheduling, achieving higher resource utilization and wider scenario coverage.
    -

    For details about the differences between CCE standard clusters and CCE Turbo clusters, see CCE Turbo Clusters and CCE Standard Clusters.

    +

    For more details, see cluster types.

    Cluster Name

    @@ -41,8 +41,8 @@

    Master Nodes

    Select the number of master nodes. The master nodes are automatically hosted by CCE and deployed with Kubernetes cluster management components such as kube-apiserver, kube-controller-manager, and kube-scheduler.

    -
    • Multiple: Three master nodes will be created to ensure cluster reliability.
    • Single: Only one master node will be created in your cluster.
    -
    You can also select AZs for the master nodes. By default, AZs are allocated automatically for the master nodes.
    • Automatic: Master nodes are randomly distributed in different AZs for cluster DR. If the number of available AZs is less than the number of nodes to be created, CCE will create the nodes in the AZs with sufficient resources to preferentially ensure cluster creation. In this case, AZ-level DR may not be ensured.
    • Custom: You can select AZ for each master node.
      If there is one master node in your cluster, you can select one AZ for the master node. If there are multiple master nodes in your cluster, you can select multiple AZs for the master nodes.
      • AZ: Master nodes are deployed in different AZs for cluster DR.
      • Host: Master nodes are deployed on different hosts in the same AZ for cluster DR.
      • Custom: Master nodes are deployed in the AZs you specified.
      +
      • Multiple: Three master nodes will be created for high cluster availability.
      • Single: Only one master node will be created in your cluster.
      +
      You can also select AZs for the master nodes. By default, AZs are allocated automatically for the master nodes.
      • Automatic: Master nodes are randomly distributed in different AZs for cluster DR. If the number of available AZs is less than the number of nodes to be created, CCE will create the nodes in the AZs with sufficient resources to preferentially ensure cluster creation. In this case, AZ-level DR may not be ensured.
      • Custom: Master nodes are deployed in specific AZs.
        If there is one master node in your cluster, you can select one AZ for the master node. If there are multiple master nodes in your cluster, you can select multiple AZs for the master nodes.
        • AZ: Master nodes are deployed in different AZs for cluster DR.
        • Host: Master nodes are deployed on different hosts in the same AZ for cluster DR.
        • Custom: Master nodes are deployed in the AZs you specified.
      @@ -54,7 +54,7 @@

      Network Settings

      The network settings cover nodes, containers, and Services. For details about the cluster networking and container network models, see Overview.

      -
      Table 1 Cluster Network

      Parameter

      +
      @@ -62,19 +62,25 @@ - - + + +
      Table 1 Network settings

      Parameter

      Description

      VPC

      Select the VPC to which the cluster belongs. If no VPC is available, click Create VPC to create one. This configuration cannot be modified after the cluster is created.

      +

      Select the VPC to which the cluster belongs. If no VPC is available, click Create VPC to create one. The value cannot be changed after the cluster is created.

      Subnet

      Select the subnet to which the master nodes belong. If no subnet is available, click Create Subnet to create one. This configuration cannot be modified after the cluster is created.

      +

      Select the subnet to which the master nodes belong. If no subnet is available, click Create Subnet to create one. The value cannot be changed after the cluster is created.

      +

      IPv6

      +

      If enabled, cluster resources, including nodes and workloads, can be accessed through IPv6 CIDR blocks.

      +
      • IPv4/IPv6 dual stack is not supported by clusters using the VPC networks.
      -
      Table 2 Container Network

      Parameter

      +
      @@ -92,16 +98,16 @@ - -
      Table 2 Network settings

      Parameter

      Description

      Configure the CIDR block used by containers. The value determines the maximum number of containers in your cluster.

      Pod Subnet (configured for CCE Turbo clusters)

      +

      Default Pod Subnet (configured for CCE Turbo clusters)

      Select the subnet to which the containers belong. If no subnet is available, click Create Subnet to create one. The pod subnet determines the maximum number of containers in a cluster. You can add pod subnets after a cluster is created.

      +

      Select the subnet to which the pod belongs. If no subnet is available, click Create Subnet to create one. The pod subnet determines the maximum number of containers in a cluster. You can add pod subnets after a cluster is created.

      -
      - - @@ -258,7 +269,7 @@

      Step 5: Confirm the Configuration

      After the parameters are specified, click Next: Confirm configuration. The cluster resource list is displayed. Confirm the information and click Submit.

      -

      It takes about 6 to 10 minutes to create a cluster. You can click Back to Cluster List to perform other operations on the cluster or click Go to Cluster Events to view the cluster details.

      +

      It takes about 5 to 10 minutes to create a cluster. You can click Back to Cluster List to perform other operations on the cluster or click Go to Cluster Events to view the cluster details.

      Related Operations

      diff --git a/docs/cce/umn/cce_10_0036.html b/docs/cce/umn/cce_10_0036.html index 07e112da..0c1e60e5 100644 --- a/docs/cce/umn/cce_10_0036.html +++ b/docs/cce/umn/cce_10_0036.html @@ -5,7 +5,7 @@

      Constraints

      • Deleting a node will lead to pod migration, which may affect services. Therefore, delete nodes during off-peak hours.
      • Unexpected risks may occur during the operation. Back up related data in advance.
      • While the node is being deleted, the backend will set the node to the unschedulable state.
      • Only worker nodes can be stopped.
      -

      Procedure

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
      3. Locate the target node and click its name.
      4. In the upper right corner of the ECS details page, click Stop. In the displayed dialog box, click Yes.

        Figure 1 ECS details page
        +

        Procedure

        1. Log in to the CCE console and click the cluster name to access the cluster console.
        2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
        3. Locate the target node and click its name.
        4. In the upper right corner of the ECS details page, click Stop. In the displayed dialog box, click Yes.

          Figure 1 ECS details page

      diff --git a/docs/cce/umn/cce_10_0046.html b/docs/cce/umn/cce_10_0046.html index c42d01c6..13f96000 100644 --- a/docs/cce/umn/cce_10_0046.html +++ b/docs/cce/umn/cce_10_0046.html @@ -14,6 +14,8 @@ + diff --git a/docs/cce/umn/cce_10_0047.html b/docs/cce/umn/cce_10_0047.html index dcf41bc2..15ab1241 100644 --- a/docs/cce/umn/cce_10_0047.html +++ b/docs/cce/umn/cce_10_0047.html @@ -3,7 +3,7 @@

      Creating a Deployment

      Scenario

      Deployments are workloads (for example, Nginx) that do not store any data or status. You can create Deployments on the CCE console or by running kubectl commands.

      -

      Prerequisites

      • Before creating a workload, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Cluster.
      • To enable public access to a workload, ensure that an EIP or load balancer has been bound to at least one node in the cluster.

        If a pod has multiple containers, ensure that the ports used by the containers do not conflict with each other. Otherwise, creating the Deployment will fail.

        +

        Prerequisites

        • Before creating a workload, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Standard/Turbo Cluster.
        • To enable public access to a workload, ensure that an EIP or load balancer has been bound to at least one node in the cluster.

          If a pod has multiple containers, ensure that the ports used by the containers do not conflict with each other. Otherwise, creating the Deployment will fail.

        @@ -39,7 +39,7 @@
      - @@ -73,7 +73,7 @@
    • (Optional) Lifecycle: Configure operations to be performed in a specific phase of the container lifecycle, such as Startup Command, Post-Start, and Pre-Stop. For details, see Configuring Container Lifecycle Parameters.
    • (Optional) Health Check: Set the liveness probe, ready probe, and startup probe as required. For details, see Configuring Container Health Check.
    • (Optional) Environment Variables: Configure variables for the container running environment using key-value pairs. These variables transfer external information to containers running in pods and can be flexibly modified after application deployment. For details, see Configuring Environment Variables.
    • (Optional) Data Storage: Mount local storage or cloud storage to the container. The application scenarios and mounting modes vary with the storage type. For details, see Storage.

      If the workload contains more than one pod, EVS volumes cannot be mounted.

      -
    • (Optional) Security Context: Assign container permissions to protect the system and other containers from being affected. Enter the user ID to assign container permissions and prevent systems and other containers from being affected.
    • (Optional) Logging: Report standard container output logs to AOM by default, without requiring manual settings. You can manually configure the log collection path. For details, see Connecting CCE to AOM.

      To disable the standard output of the current workload, add the annotation kubernetes.AOM.log.stdout: [] in Labels and Annotations. For details about how to use this annotation, see Table 1.

      +
    • (Optional) Security Context: Assign container permissions to protect the system and other containers from being affected. Enter the user ID to assign container permissions and prevent systems and other containers from being affected.
    • (Optional) Logging: Report standard container output logs to AOM by default, without requiring manual settings. You can manually configure the log collection path. For details, see Collecting Container Logs Using ICAgent.

      To disable the standard output of the current workload, add the annotation kubernetes.AOM.log.stdout: [] in Labels and Annotations. For details about how to use this annotation, see Table 1.

    • Image Access Credential: Select the credential used for accessing the image repository. The default value is default-secret. You can use default-secret to access images in SWR. For details about default-secret, see default-secret.
    • (Optional) GPU: All is selected by default. The workload instance will be scheduled to the node of the specified GPU type.
    • @@ -81,10 +81,10 @@

      (Optional) Service Settings

      A Service provides external access for pods. With a static IP address, a Service forwards access traffic to pods and automatically balances load for these pods.

      You can also create a Service after creating a workload. For details about Services of different types, see Overview.

      -
      (Optional) Advanced Settings
      • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Workload Upgrade Policies.
      • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Load affinity and node affinity are provided.
        • Load Affinity: Common load affinity policies are offered for quick load affinity deployment.
          • Multi-AZ deployment is preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ but onto different nodes for high availability. If there are fewer nodes than pods, the extra pods will fail to run.
          • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If there are fewer AZs than pods, the extra pods will fail to run.
          • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
          -
        • Node Affinity: Common load affinity policies are offered for quick load affinity deployment.
          • Node Affinity: Workload pods can be deployed on specified nodes through node affinity (nodeAffinity). If no node is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
          • Specified node pool scheduling: Workload pods can be deployed in a specified node pool through node affinity (nodeAffinity). If no node pool is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
          • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
          +
          (Optional) Advanced Settings
          • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Workload Upgrade Policies.
          • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Load affinity and node affinity are provided.
            • Load Affinity: Common load affinity policies are offered for quick load affinity deployment.
              • Multi-AZ deployment is preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ but onto different nodes for high availability. If there are fewer nodes than pods, the extra pods will fail to run.
              • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If there are fewer AZs than pods, the extra pods will fail to run.
              • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
              +
            • Node Affinity: Common load affinity policies are offered for quick load affinity deployment.
              • Node Affinity: Workload pods can be deployed on specified nodes through node affinity (nodeAffinity). If no node is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
              • Specified node pool scheduling: Workload pods can be deployed in a specified node pool through node affinity (nodeAffinity). If no node pool is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
              • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
            -
          • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Taints and Tolerations.
          • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
          • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
          • Network Configuration
            • Pod ingress/egress bandwidth limitation: You can set ingress/egress bandwidth limitation for pods. For details, see Configuring QoS for a Pod.
            +
          • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Taints and Tolerations.
          • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
          • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
          • Network Configuration

        • Click Create Workload in the lower right corner.
        • diff --git a/docs/cce/umn/cce_10_0048.html b/docs/cce/umn/cce_10_0048.html index 48c90d06..3d94fea4 100644 --- a/docs/cce/umn/cce_10_0048.html +++ b/docs/cce/umn/cce_10_0048.html @@ -4,9 +4,9 @@

          Scenario

          StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.

          A container can be migrated between different hosts, but data is not stored on the hosts. To store StatefulSet data persistently, attach HA storage volumes provided by CCE to the container.

          -

          Constraints

          • When you delete or scale a StatefulSet, the system does not delete the storage volumes associated with the StatefulSet to ensure data security.
          • When you delete a StatefulSet, reduce the number of replicas to 0 before deleting the StatefulSet so that pods in the StatefulSet can be stopped in order.
          • When you create a StatefulSet, a headless Service is required for pod access. For details, see Headless Service.
          • When a node is unavailable, pods become Unready. In this case, manually delete the pods of the StatefulSet so that the pods can be migrated to a normal node.
          +

          Constraints

          • When you delete or scale a StatefulSet, the system does not delete the storage volumes associated with the StatefulSet to ensure data security.
          • When you delete a StatefulSet, reduce the number of replicas to 0 before deleting the StatefulSet so that pods in the StatefulSet can be stopped in order.
          • When you create a StatefulSet, a headless Service is required for pod access. For details, see Headless Services.
          • When a node is unavailable, pods become Unready. In this case, manually delete the pods of the StatefulSet so that the pods can be migrated to a normal node.
          -

          Prerequisites

          • Before creating a workload, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Cluster.
          • To enable public access to a workload, ensure that an EIP or load balancer has been bound to at least one node in the cluster.

            If a pod has multiple containers, ensure that the ports used by the containers do not conflict with each other. Otherwise, creating the StatefulSet will fail.

            +

            Prerequisites

            • Before creating a workload, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Standard/Turbo Cluster.
            • To enable public access to a workload, ensure that an EIP or load balancer has been bound to at least one node in the cluster.

              If a pod has multiple containers, ensure that the ports used by the containers do not conflict with each other. Otherwise, creating the StatefulSet will fail.

            @@ -42,7 +42,7 @@
      - @@ -77,22 +77,22 @@
    • (Optional) Lifecycle: Configure operations to be performed in a specific phase of the container lifecycle, such as Startup Command, Post-Start, and Pre-Stop. For details, see Configuring Container Lifecycle Parameters.
    • (Optional) Health Check: Set the liveness probe, ready probe, and startup probe as required. For details, see Configuring Container Health Check.
    • (Optional) Environment Variables: Configure variables for the container running environment using key-value pairs. These variables transfer external information to containers running in pods and can be flexibly modified after application deployment. For details, see Configuring Environment Variables.
    • (Optional) Data Storage: Mount local storage or cloud storage to the container. The application scenarios and mounting modes vary with the storage type. For details, see Storage.
      • StatefulSets support dynamic attachment of EVS disks. For details, see Dynamically Mounting an EVS Disk to a StatefulSet and Dynamically Mounting a Local PV to a StatefulSet.

        Dynamic mounting is achieved by using the volumeClaimTemplates field and depends on the dynamic creation capability of StorageClass. A StatefulSet associates each pod with a PVC using the volumeClaimTemplates field, and the PVC is bound to the corresponding PV. Therefore, after the pod is rescheduled, the original data can still be mounted based on the PVC name.

      • After a workload is created, the storage that is dynamically mounted cannot be updated.
      -
    • (Optional) Security Context: Assign container permissions to protect the system and other containers from being affected. Enter the user ID to assign container permissions and prevent systems and other containers from being affected.
    • (Optional) Logging: Report standard container output logs to AOM by default, without requiring manual settings. You can manually configure the log collection path. For details, see Connecting CCE to AOM.

      To disable the standard output of the current workload, add the annotation kubernetes.AOM.log.stdout: [] in Labels and Annotations. For details about how to use this annotation, see Table 1.

      +
    • (Optional) Security Context: Assign container permissions to protect the system and other containers from being affected. Enter the user ID to assign container permissions and prevent systems and other containers from being affected.
    • (Optional) Logging: Report standard container output logs to AOM by default, without requiring manual settings. You can manually configure the log collection path. For details, see Collecting Container Logs Using ICAgent.

      To disable the standard output of the current workload, add the annotation kubernetes.AOM.log.stdout: [] in Labels and Annotations. For details about how to use this annotation, see Table 1.

    • Image Access Credential: Select the credential used for accessing the image repository. The default value is default-secret. You can use default-secret to access images in SWR. For details about default-secret, see default-secret.
    • (Optional) GPU: All is selected by default. The workload instance will be scheduled to the node of the specified GPU type.
    • Headless Service Parameters

      -

      A headless Service is used to solve the problem of mutual access between pods in a StatefulSet. The headless Service provides a fixed access domain name for each pod. For details, see Headless Service.

      +

      A headless Service is used to solve the problem of mutual access between pods in a StatefulSet. The headless Service provides a fixed access domain name for each pod. For details, see Headless Services.

      (Optional) Service Settings

      A Service provides external access for pods. With a static IP address, a Service forwards access traffic to pods and automatically balances load for these pods.

      You can also create a Service after creating a workload. For details about Services of different types, see Overview.

      (Optional) Advanced Settings
      • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Workload Upgrade Policies.
      • Pod Management Policies

        For some distributed systems, the StatefulSet sequence is unnecessary and/or should not occur. These systems require only uniqueness and identifiers.

        • OrderedReady: The StatefulSet will deploy, delete, or scale pods in order and one by one. (The StatefulSet continues only after the previous pod is ready or deleted.) This is the default policy.
        • Parallel: The StatefulSet will create pods in parallel to match the desired scale without waiting, and will delete all pods at once.
        -
      • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Load affinity and node affinity are provided.
        • Load Affinity: Common load affinity policies are offered for quick load affinity deployment.
          • Multi-AZ deployment is preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ but onto different nodes for high availability. If there are fewer nodes than pods, the extra pods will fail to run.
          • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If there are fewer AZs than pods, the extra pods will fail to run.
          • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
          -
        • Node Affinity: Common load affinity policies are offered for quick load affinity deployment.
          • Node Affinity: Workload pods can be deployed on specified nodes through node affinity (nodeAffinity). If no node is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
          • Specified node pool scheduling: Workload pods can be deployed in a specified node pool through node affinity (nodeAffinity). If no node pool is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
          • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
          +
        • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Load affinity and node affinity are provided.
          • Load Affinity: Common load affinity policies are offered for quick load affinity deployment.
            • Multi-AZ deployment is preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ but onto different nodes for high availability. If there are fewer nodes than pods, the extra pods will fail to run.
            • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If there are fewer AZs than pods, the extra pods will fail to run.
            • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
            +
          • Node Affinity: Common load affinity policies are offered for quick load affinity deployment.
            • Node Affinity: Workload pods can be deployed on specified nodes through node affinity (nodeAffinity). If no node is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
            • Specified node pool scheduling: Workload pods can be deployed in a specified node pool through node affinity (nodeAffinity). If no node pool is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
            • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
          -
        • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Taints and Tolerations.
        • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
        • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
        • Network Configuration
          • Pod ingress/egress bandwidth limitation: You can set ingress/egress bandwidth limitation for pods. For details, see Configuring QoS for a Pod.
          +
        • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Taints and Tolerations.
        • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
        • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
        • Network Configuration
          • Pod ingress/egress bandwidth limitation: You can set ingress/egress bandwidth limitation for pods. For details, see Configuring QoS for a Pod.
          • Whether to enable the static IP address: available only for clusters that support this function. After this function is enabled, you can set the interval for reclaiming expired pod IP addresses. For details, see Configuring a Static IP Address for a Pod.
          • IPv6 shared bandwidth: available only for clusters that support this function. After this function is enabled, you can configure a shared bandwidth for a pod with IPv6 dual-stack ENIs. For details, see Configuring Shared Bandwidth for a Pod with IPv6 Dual-Stack ENIs.

    • Click Create Workload in the lower right corner.
    • diff --git a/docs/cce/umn/cce_10_0054.html b/docs/cce/umn/cce_10_0054.html index 8b9329b7..97fe3a2b 100644 --- a/docs/cce/umn/cce_10_0054.html +++ b/docs/cce/umn/cce_10_0054.html @@ -73,7 +73,7 @@ - + + + + - @@ -139,7 +148,7 @@ - - - + + + + @@ -179,7 +195,7 @@ - @@ -208,7 +224,7 @@ - - - diff --git a/docs/cce/umn/cce_10_0059.html b/docs/cce/umn/cce_10_0059.html index f12e6fcb..02f70447 100644 --- a/docs/cce/umn/cce_10_0059.html +++ b/docs/cce/umn/cce_10_0059.html @@ -14,21 +14,13 @@ - - - - - @@ -55,7 +47,7 @@ spec: - protocol: TCP port: 6379

      The following figure shows how podSelector works.

      -
      Figure 1 podSelector
      +
      Figure 1 podSelector
      • Using namespaceSelector to specify the access scope
        apiVersion: networking.k8s.io/v1
         kind: NetworkPolicy
        @@ -74,11 +66,11 @@ spec:
             - protocol: TCP
               port: 6379

        The following figure shows how namespaceSelector works.

        -
        Figure 2 namespaceSelector
        +
        Figure 2 namespaceSelector

      Using Egress Rules

      Egress supports not only podSelector and namespaceSelector, but also ipBlock.

      -

      Only clusters of version 1.23 or later support Egress rules. Only nodes running EulerOS 2.5 or EulerOS 2.9 are supported.

      +

      Only clusters of version 1.23 or later support Egress rules. Only nodes running EulerOS 2.9 are supported.

      apiVersion: networking.k8s.io/v1
       kind: NetworkPolicy
      @@ -98,7 +90,7 @@ spec:
               except:
               - 172.16.0.40/32        # This CIDR block cannot be accessed. This value must fall within the range specified by cidr.

      The following figure shows how ipBlock works.

      -
      Figure 3 ipBlock
      +
      Figure 3 ipBlock

      You can define ingress and egress in the same rule.

      apiVersion: networking.k8s.io/v1
       kind: NetworkPolicy
      @@ -126,10 +118,10 @@ spec:
               matchLabels:
                 role: web

      The following figure shows how to use ingress and egress together.

      -
      Figure 4 Using both ingress and egress
      +
      Figure 4 Using both ingress and egress
      -

      Creating a Network Policy on the Console

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. Choose Policies in the navigation pane, click the Network Policies tab, and click Create Network Policy in the upper right corner.

        • Policy Name: Specify a network policy name.
        • Namespace: Select a namespace in which the network policy is applied.
        • Selector: Enter a label, select the pod to be associated, and click Add. You can also click Reference Workload Label to reference the label of an existing workload.
        • Inbound Rule: Click to add an inbound rule. For details about parameter settings, see Table 1.

          -

          +

          Creating a Network Policy on the Console

          1. Log in to the CCE console and click the cluster name to access the cluster console.
          2. Choose Policies in the navigation pane, click the Network Policies tab, and click Create Network Policy in the upper right corner.

            • Policy Name: Specify a network policy name.
            • Namespace: Select a namespace in which the network policy is applied.
            • Selector: Enter a label, select the pod to be associated, and click Add. You can also click Reference Workload Label to use the label of an existing workload.
            • Inbound Rule: Click to add an inbound rule. For details about parameter settings, see Table 1.

              +

      Table 3 Service Network

      Parameter

      +
      @@ -109,7 +115,7 @@ - + + +
      Table 3 Service network

      Parameter

      Description

      Service CIDR Block

      Configure the Service CIDR blocks for containers in the same cluster to access each other. The value determines the maximum number of Services you can create. This configuration cannot be modified after the cluster is created.

      +

      Configure the Service CIDR blocks for containers in the same cluster to access each other. The value determines the maximum number of Services you can create. The value cannot be changed after the cluster is created.

      Request Forwarding

      @@ -118,6 +124,11 @@
      • iptables is the traditional kube-proxy mode. This mode applies to the scenario where the number of Services is small or a large number of short connections are concurrently sent on the client. IPv6 clusters do not support iptables.
      • IPVS allows higher throughput and faster forwarding. This mode applies to scenarios where the cluster scale is large or the number of Services is large.

      IPv6 Service CIDR Block

      +

      Configure this parameter only when IPv6 dual stack is enabled for a CCE Turbo cluster. This configuration cannot be modified after the cluster is created.

      +
      @@ -131,7 +142,7 @@

      Certificate Authentication

      • If Automatically generated is selected, the X509-based authentication mode will be enabled by default. X509 is a commonly used certificate format.
      • If Bring your own is selected, the cluster can identify users based on the header in the request body for authentication.

        You need to upload your own CA root certificate, client certificate, and private key.

        +
      • If Automatically generated is selected, the X509-based authentication mode will be enabled by default. X509 is a commonly used certificate format.
      • If Bring your own is selected, the cluster can identify users based on the header in the request body for authentication.

        Upload your CA root certificate, client certificate, and private key.

        CAUTION:
        • Upload a file smaller than 1 MB. The CA certificate and client certificate can be in .crt or .cer format. The private key of the client certificate can only be uploaded unencrypted.
        • The validity period of the client certificate must be longer than five years.
        • The uploaded CA root certificate is used by the authentication proxy and for configuring the kube-apiserver aggregation layer. If any of the uploaded certificates is invalid, the cluster cannot be created.
        • Starting from v1.25, Kubernetes no longer supports certificate authentication generated using the SHA1WithRSA or ECDSAWithSHA1 algorithm. The certificate authentication generated using the SHA256 algorithm is supported instead.
      @@ -157,7 +168,7 @@

      Resource Tag

      You can add resource tags to classify resources.

      -

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      +

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      Description

      @@ -205,7 +216,7 @@

      CCE Node Problem Detector

      (Optional) If selected, this add-on (CCE Node Problem Detector) will be automatically installed. CCE Node Problem Detector helps detect node problems and provides node isolation capability so that node problems can be identified in a timely manner.

      +

      (Optional) If selected, this add-on (CCE Node Problem Detector) will be automatically installed to detect faults and isolate nodes for prompt cluster troubleshooting.

      CPU Quota

      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      +
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores that can be used by a container. This prevents containers from using excessive resources.

      If Request and Limit are not specified, the quota is not limited. For more information and suggestions about Request and Limit, see Configuring Container Specifications.

      CPU Quota

      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      +
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores that can be used by a container. This prevents containers from using excessive resources.

      If Request and Limit are not specified, the quota is not limited. For more information and suggestions about Request and Limit, see Configuring Container Specifications.

      This operation cannot be undone.

      Worker node

      +

      Worker node

      Modifying the security group of a node in a cluster

      NOTE:

      Naming rule of a security group: Cluster name-cce-node-Random digits

      @@ -84,6 +84,15 @@

      Restore the security group and allow traffic from the security group to pass through.

      Modifying the DNS configuration (/etc/resolv.conf) of a node

      +

      Internal domain names cannot be accessed, which may lead to errors in functions such as add-on errors or errors in in-place node upgrade.

      +
      NOTE:

      If your service needs to use an on-premises DNS, configure the DNS in the workload. Do not change node's DNS address. For details, see DNS Configuration.

      +
      +

      Restore the DNS configuration based on the DNS configuration of a new node.

      +

      Deleting the node

      The node will become unavailable.

      @@ -130,7 +139,7 @@

      Deleting or modifying the /opt/cloud/cce and /var/paas directories, and deleting the data disk

      The node will become unready.

      +

      The node will become unavailable.

      Reset the node. For details, see Resetting a Node.

      The permissions will be abnormal.

      You are not advised to modify the permissions. Restore the permissions if they are modified.

      +

      Do not modify the permissions. Restore the permissions if they have been modified.

      Formatting or partitioning system disks, Docker disks, and kubelet disks on nodes.

      @@ -163,11 +172,18 @@

      Reset the node. For details, see Resetting a Node.

      Delete system images such as cce-pause from the node.

      +

      Deleting system images such as cce-pause from the node

      Containers cannot be created and system images cannot be pulled.

      Copy the image from another normal node for restoration.

      +

      Copy the image from a functional node for restoration.

      +

      Changing the flavor of a node in a node pool on the ECS console

      +

      If a node flavor is different from the flavor specified in the node pool where the node resides, the increased number of nodes in a node pool scale-out is different from the expected number.

      +

      Change the node flavor to the one specified in the node pool, or delete the node and perform a node pool scale-out again.

      Impact

      How to Avoid/Fix

      +

      Solution

      The DNS in the cluster cannot work properly.

      Restore the security group by referring to Creating a CCE Cluster and allow traffic from the security group to pass through.

      +

      Restore the security group by referring to Creating a CCE Standard/Turbo Cluster and allow traffic from the security group to pass through.

      Deleting CRD resources of network-attachment-definitions of default-network

      @@ -220,11 +236,11 @@

      Enabling the iptables firewall

      By default, iptables firewall is disabled on CCE. Enabling the firewall can leave the network inaccessible.

      -
      NOTE:

      Do not enable iptables firewall. If iptables firewall must be enabled, check whether the rules configured in /etc/sysconfig/iptables and /etc/sysconfig/ip6tables will affect the network in the test environment.

      +

      By default, the iptables firewall is disabled on CCE. Enabling the firewall can leave the network inaccessible.

      +
      NOTE:

      Do not enable the iptables firewall. If the iptables firewall must be enabled, check whether the rules configured in /etc/sysconfig/iptables and /etc/sysconfig/ip6tables in the test environment will affect the network.

      Disable the iptables firewall and check the rules configured in /etc/sysconfig/iptables and /etc/sysconfig/ip6tables.

      +

      Disable the iptables firewall and check the rules configured in /etc/sysconfig/iptables and /etc/sysconfig/ip6tables.

      EulerOS 2.5

      -

      v1.23 or later

      -

      3.10.0-862.14.1.5.h591.eulerosv2r7.x86_64

      -

      3.10.0-862.14.1.5.h687.eulerosv2r7.x86_64

      -

      EulerOS 2.9

      +

      EulerOS 2.9

      v1.23 or later

      4.18.0-147.5.1.6.h541.eulerosv2r9.x86_64

      4.18.0-147.5.1.6.h766.eulerosv2r9.x86_64

      -

      4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

      +

      4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

      @@ -149,14 +141,14 @@ spec: -
      Table 1 Adding an inbound rule

      Parameter

      Source Pod Label

      Allow accesses from the pods with this label. If this parameter is not specified, all pods in the namespace can access the port.

      +

      Allow accessing the pods with this label. If this parameter is not specified, all pods in the namespace can be accessed.

      -
    • Outbound Rule: Click to add an outbound rule. For details about parameter settings, see Table 1.

      +
    • Outbound Rule: Click to add an outbound rule. For details about parameter settings, see Table 1.

      @@ -189,7 +181,7 @@ spec: -

    • Click OK.
    • +

    • After the configuration is complete, click OK.
    • diff --git a/docs/cce/umn/cce_10_0063.html b/docs/cce/umn/cce_10_0063.html index cd954471..8131e9e1 100644 --- a/docs/cce/umn/cce_10_0063.html +++ b/docs/cce/umn/cce_10_0063.html @@ -6,13 +6,13 @@

      Viewing a Node Scaling Policy

      You can view the associated node pool, rules, and scaling history of a node scaling policy and rectify faults according to the error information displayed.

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Nodes.On the page displayed, click the Node Pools tab and then the name of the node pool for which an auto scaling policy has been created to view the node pool details.
      3. On the node pool details page, click the Auto Scaling tab to view the auto scaling configuration and scaling records.
      -

      Deleting a Node Scaling Policy

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling tab, locate the row containing the target policy and choose More > Delete in the Operation column.
      3. In the Delete Node Scaling Policy dialog box displayed, confirm whether to delete the policy.
      4. Click Yes to delete the policy.
      +

      Deleting a Node Scaling Policy

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling Policies tab, locate the row containing the target policy and choose More > Delete in the Operation column.
      3. In the Delete Node Scaling Policy dialog box displayed, confirm whether to delete the policy.
      4. Click Yes to delete the policy.
      -

      Editing a Node Scaling Policy

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling tab, locate the row containing the target policy and click Edit in the Operation column.
      3. On the Edit Node Scaling Policy page displayed, configure policy parameters listed in Table 2.
      4. After the configuration is complete, click OK.
      +

      Editing a Node Scaling Policy

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling Policies tab, locate the row containing the target policy and click Edit in the Operation column.
      3. On the Edit Node Scaling Policy page displayed, configure policy parameters listed in Table 2.
      4. After the configuration is complete, click OK.
      -

      Cloning a Node Scaling Policy

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling tab, locate the row containing the target policy and choose More > Clone in the Operation column.
      3. On the Clone Node Scaling Policy page displayed, certain parameters have been cloned. Add or modify other policy parameters based on service requirements.
      4. Click OK.
      +

      Cloning a Node Scaling Policy

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling Policies tab, locate the row containing the target policy and choose More > Clone in the Operation column.
      3. On the Clone Node Scaling Policy page displayed, certain parameters have been cloned. Add or modify other policy parameters based on service requirements.
      4. Click OK.
      -

      Enabling or Disabling a Node Scaling Policy

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling tab, locate the row containing the target policy click Disable in the Operation column. If the policy is in the disabled state, click Enable in the Operation column.
      3. In the dialog box displayed, confirm whether to disable or enable the node policy.
      +

      Enabling or Disabling a Node Scaling Policy

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling Policies tab, locate the row containing the target policy click Disable in the Operation column. If the policy is in the disabled state, click Enable in the Operation column.
      3. In the dialog box displayed, confirm whether to disable or enable the node policy.
      diff --git a/docs/cce/umn/cce_10_0064.html b/docs/cce/umn/cce_10_0064.html index 99e747a4..75985e10 100644 --- a/docs/cce/umn/cce_10_0064.html +++ b/docs/cce/umn/cce_10_0064.html @@ -22,7 +22,7 @@ -
      diff --git a/docs/cce/umn/cce_10_0066.html b/docs/cce/umn/cce_10_0066.html index f8e7798f..1f33446a 100644 --- a/docs/cce/umn/cce_10_0066.html +++ b/docs/cce/umn/cce_10_0066.html @@ -4,10 +4,10 @@

      Introduction

      Everest is a cloud native container storage system, which enables clusters of Kubernetes v1.15.6 or later to access cloud storage services through the CSI.

      Everest is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.15 or later is created.

      -

      Constraints

      • If your cluster is upgraded from v1.13 to v1.15, storage-driver will be replaced by Everest (v1.1.6 or later) for container storage. The takeover does not affect the original storage functions.
      • In version 1.2.0 of the Everest add-on, key authentication is optimized when OBS is used. After upgrade Everest from a version earlier than 1.2.0, restart all workloads that use OBS in the cluster. Otherwise, workloads may not be able to use OBS.
      • By default, this add-on is installed in clusters of v1.15 and later. For clusters of v1.13 and earlier, the storage-driver add-on is installed by default.
      +

      Constraints

      • If your cluster is upgraded from v1.13 to v1.15, storage-driver will be replaced by Everest (v1.1.6 or later) for container storage. The takeover does not affect the original storage functions.
      • In version 1.2.0 of the Everest add-on, key authentication is optimized when OBS is used. After the Everest add-on is upgraded from a version earlier than 1.2.0, restart all workloads that use OBS in the cluster. Otherwise, workloads may not be able to use OBS.
      • By default, this add-on is installed in clusters of v1.15 and later. For clusters of v1.13 and earlier, the storage-driver add-on is installed by default.

      Installing the Add-on

      This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

      -
      1. Log in to the CCE console and click the cluster name to access the cluster console. Click Add-ons in the navigation pane, locate CCE Container Storage (Everest) on the right, and click Install.
      2. On the Install Add-on page, configure the specifications.

        +

        1. Log in to the CCE console and click the cluster name to access the cluster console. Click Add-ons in the navigation pane, locate CCE Container Storage (Everest) on the right, and click Install.
        2. On the Install Add-on page, configure the specifications.

      Table 2 Adding an outbound rule

      Parameter

      - + + + - - @@ -247,7 +253,7 @@

    • Click Install.
    • Components

      -
      Table 1 Everest parameters

      Parameter

      Description

      @@ -16,8 +16,8 @@

      Pods

      Number of instances for the add-on.

      -

      High availability is not possible with a single add-on instance. If an error occurs on the node where the add-on instance runs, the add-on will fail.

      +

      Number of pods for the add-on.

      +

      High availability is not possible with a single pod. If an error occurs on the node where the add-on instance runs, the add-on will fail.

      Containers

      @@ -197,6 +197,12 @@

      This field is left blank by default. You do not need to configure this parameter.

      number_of_reserved_disks

      +

      Number of disks on the node reserved for custom use. This parameter is supported when the add-on version is 2.3.11 or later.

      +

      Assume that a maximum of 20 EVS disks can be attached to a node, and the value of this parameter is set to 6. Then 14 (20-6) disks can be attached to this node when the system schedules the EVS disk attachment workloads. The reserved six disks include one system disk and one data disk that has been attached to the node. You can attach four EVS disks to this node as additional data disks or raw disks for a local storage pool.

      +

      over_subscription

      Overcommitment ratio of the local storage pool (local_storage). The default value is 80. If the size of the local storage pool is 100 GB, it can be overcommitted to 180 GB.

      @@ -212,7 +218,7 @@
      In Everest 1.2.26 or later, the performance of attaching a large number of EVS volumes has been optimized. The following parameters can be configured:
      • csi_attacher_worker_threads
      • csi_attacher_detach_worker_threads
      • volume_attaching_flow_ctrl
      -

      The preceding parameters are associated with each other and are constrained by the underlying storage resources in the region where the cluster is located. To attach a large number of volumes (more than 500 EVS volumes per minute), contact customer service and configure the parameters under their guidance to prevent the Everest add-on from running abnormally due to improper parameter settings.

      +

      The preceding parameters are associated with each other and are constrained by the underlying storage resources in the region where the cluster is located. To attach a large number of volumes (more than 500 EVS volumes per minute), contact administrator and configure the parameters under their guidance to prevent the Everest add-on from running abnormally due to improper parameter settings.

    • Configure scheduling policies for the add-on.

      • Scheduling policies do not take effect on add-on instances of the DaemonSet type.
      • When configuring multi-AZ deployment or node affinity, ensure that there are nodes meeting the scheduling policy and that resources are sufficient in the cluster. Otherwise, the add-on cannot run.
      @@ -225,12 +231,12 @@
    • Multi AZ

      • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
      • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
      • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.
      +
      • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
      • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
      • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.

      Node Affinity

      • Incompatibility: Node affinity is disabled for the add-on.
      • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

        If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

        +
      • Not configured: Node affinity is disabled for the add-on.
      • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

        If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

      Table 5 Everest components

      Component

      +
      diff --git a/docs/cce/umn/cce_10_0068.html b/docs/cce/umn/cce_10_0068.html index f515f229..f2613a90 100644 --- a/docs/cce/umn/cce_10_0068.html +++ b/docs/cce/umn/cce_10_0068.html @@ -1,9 +1,11 @@ -

      Kubernetes Release Notes

      +

      Kubernetes Version Release Notes

      - @@ -100,7 +100,7 @@ - diff --git a/docs/cce/umn/cce_10_0083.html b/docs/cce/umn/cce_10_0083.html index d139db6f..b1a85918 100644 --- a/docs/cce/umn/cce_10_0083.html +++ b/docs/cce/umn/cce_10_0083.html @@ -1,10 +1,10 @@

      Managing Workload Scaling Policies

      -

      Scenario

      After an HPA policy is created, you can update, clone, edit, and delete the policy, as well as edit the YAML file.

      +

      Scenario

      After an HPA policy is created, you can update and delete the policy, as well as edit the YAML file.

      Checking an HPA Policy

      You can view the rules, status, and events of an HPA policy and handle exceptions based on the error information displayed.

      -
      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Policies. On the page displayed, click the HPA Policies tab and then next to the target HPA policy.
      3. In the expanded area, choose View Events in the Operation column. If the policy malfunctions, locate and rectify the fault based on the error message displayed on the page.

        You can also view the created HPA policy on the workload details page.

        +
        1. Log in to the CCE console and click the cluster name to access the cluster console.
        2. In the navigation pane, choose Policies. On the page displayed, click the HPA Policies tab and then next to the target HPA policy.
        3. In the expanded area, choose View Events in the Operation column. If the policy malfunctions, locate and rectify the fault based on the error message displayed on the page.

          You can also view the created HPA policy on the workload details page.

          1. Log in to the CCE console and click the cluster name to access the cluster console.
          2. In the navigation pane, choose Workloads. Click the workload name to view its details.
          3. On the workload details page, switch to the Auto Scaling tab page to view the HPA policies. You can also view the scaling policies you configured on the Policies page.
          diff --git a/docs/cce/umn/cce_10_0084.html b/docs/cce/umn/cce_10_0084.html index e2afb6f0..dd5820d2 100644 --- a/docs/cce/umn/cce_10_0084.html +++ b/docs/cce/umn/cce_10_0084.html @@ -1,11 +1,69 @@

          Enabling ICMP Security Group Rules

          -

          Scenario

          If a workload uses UDP for both load balancing and health check, enable ICMP security group rules for the backend servers.

          +

          Application Scenarios

          If a workload uses UDP for both load balancing and health check, enable ICMP security group rules for the backend servers.

          -

          Procedure

          1. Log in to the ECS console, find the ECS corresponding to any node where the workload runs, and click the ECS name. On the displayed ECS details page, record the security group name.
          2. Log in to the VPC console. In the navigation pane on the left, choose Access Control > Security Groups. In the security group list on the right, click the security group name obtained in step 1.
          3. On the page displayed, click the Inbound Rules tab and click Add Rule to add an inbound rule for ECS. Then, click OK.

            • You only need to add security group rules to any node where the workload runs.
            • The security group must have rules to allow access from the CIDR block 100.125.0.0/16.
            -
            -

          +

          Procedure

          1. Log in to the CCE console, choose Service List > Networking > Virtual Private Cloud, and choose Access Control > Security Groups in the navigation pane.
          2. In the security group list, locate the security group of the cluster. Click the Inbound Rules tab page and then Add Rule. In the Add Inbound Rule dialog box, configure inbound parameters.

            +

      Table 5 Add-on components

      Component

      Description

      Changing node pool configurations

      You can modify the node pool name, node quantity, Kubernetes labels (and their quantity), and taints and adjust the disk, OS, and container engine configurations of the node pool.

      +

      You can modify the node pool name, node quantity, Kubernetes labels (and their quantity), resource tags, and taints.

      The deleted or added Kubernetes labels and taints (as well as their quantity) will apply to all nodes in the node pool, which may cause pod re-scheduling. Therefore, exercise caution when performing this operation.

      Nodes in the default node pool cannot be migrated to other node pools, and nodes in a user-created node pool cannot be migrated to other user-created node pools.

      Cloning a node pool

      +

      Copying a node pool

      You can copy the configuration of an existing node pool to create a new node pool.

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      Cluster Type

      +

      ELB Type

      +

      Security Group

      +

      Protocol & Port

      +

      Allowed Source CIDR Block

      +

      CCE Standard

      +

      Shared

      +

      Node security group, which is named in the format of "{Cluster name}-cce-node-{Random ID}".

      +

      If a custom node security group is bound to the cluster, select the target security group.

      +

      All ICMP ports

      +

      100.125.0.0/16 for the shared load balancer

      +

      Dedicated

      +

      Node security group, which is named in the format of "{Cluster name}-cce-node-{Random ID}".

      +

      If a custom node security group is bound to the cluster, select the target security group.

      +

      All ICMP ports

      +

      Backend subnet of the load balancer

      +

      CCE Turbo

      +

      Shared

      +

      Node security group, which is named in the format of "{Cluster name}-cce-node-{Random ID}".

      +

      If a custom node security group is bound to the cluster, select the target security group.

      +

      All ICMP ports

      +

      100.125.0.0/16 for the shared load balancer

      +

      Dedicated

      +

      ENI security group, which is named in the format of "{Cluster name}-cce-eni-{Random ID}".

      +

      If a custom ENI security group is bound to the cluster, select the target security group.

      +

      All ICMP ports

      +

      Backend subnet of the load balancer

      +
      +
      +

    • Click OK.
    • diff --git a/docs/cce/umn/cce_10_0094.html b/docs/cce/umn/cce_10_0094.html index 59f739c9..ed803b10 100644 --- a/docs/cce/umn/cce_10_0094.html +++ b/docs/cce/umn/cce_10_0094.html @@ -3,17 +3,19 @@

      Overview

      Why We Need Ingresses

      A Service is generally used to forward access requests based on TCP and UDP and provide layer-4 load balancing for clusters. However, in actual scenarios, if there is a large number of HTTP/HTTPS access requests on the application layer, the Service cannot meet the forwarding requirements. Therefore, the Kubernetes cluster provides an HTTP-based access mode, ingress.

      An ingress is an independent resource in the Kubernetes cluster and defines rules for forwarding external access traffic. As shown in Figure 1, you can customize forwarding rules based on domain names and URLs to implement fine-grained distribution of access traffic.

      -
      Figure 1 Ingress diagram
      +
      Figure 1 Ingress diagram

      The following describes the ingress-related definitions:

      -
      • Ingress object: a set of access rules that forward requests to specified Services based on domain names or URLs. It can be added, deleted, modified, and queried by calling APIs.
      • Ingress Controller: an executor for request forwarding. It monitors the changes of resource objects such as ingresses, Services, endpoints, secrets (mainly TLS certificates and keys), nodes, and ConfigMaps in real time, parses rules defined by ingresses, and forwards requests to the corresponding backend Services.
      +
      • Ingress object: a set of access rules that forward requests to specified Services based on domain names or URLs. It can be added, deleted, modified, and queried by calling APIs.
      • Ingress Controller: an executor for request forwarding. It monitors the changes of resource objects such as ingresses, Services, endpoints, secrets (mainly TLS certificates and keys), nodes, and ConfigMaps in real time, parses rules defined by ingresses, and forwards requests to the target backend Services.
      -

      Working Principle of ELB Ingress Controller

      ELB Ingress Controller developed by CCE implements layer-7 network access for the internet and intranet (in the same VPC) based on ELB and distributes access traffic to the corresponding Services using different URLs.

      -

      ELB Ingress Controller is deployed on the master node and bound to the load balancer in the VPC where the cluster resides. Different domain names, ports, and forwarding policies can be configured for the same load balancer (with the same IP address). Figure 2 shows the working principle of ELB Ingress Controller.

      +

      Working Rules of LoadBalancer Ingress Controller

      LoadBalancer Ingress Controller developed by CCE implements layer-7 network access for the internet and intranet (in the same VPC) based on ELB and distributes access traffic to the corresponding Services using different URLs.

      +

      LoadBalancer Ingress Controller is deployed on the master node and bound to the load balancer in the VPC where the cluster resides. Different domain names, ports, and forwarding policies can be configured for the same load balancer (with the same IP address). Figure 2 shows the working rules of LoadBalancer Ingress Controller.

      1. A user creates an ingress object and configures a traffic access rule in the ingress, including the load balancer, URL, SSL, and backend service port.
      2. When Ingress Controller detects that the ingress object changes, it reconfigures the listener and backend server route on the ELB side according to the traffic access rule.
      3. When a user accesses a workload, the traffic is forwarded to the corresponding backend service port based on the forwarding policy configured on ELB, and then forwarded to each associated workload through the Service.
      -
      Figure 2 Working principle of ELB Ingress Controller
      +
      Figure 2 Working rules of shared LoadBalancer ingresses in CCE standard and Turbo clusters
      +

      When you use a dedicated load balancer in a CCE Turbo cluster, pod IP addresses are allocated from the VPC and the load balancer can directly access the pods. When creating an ingress for external cluster access, you can use ELB to access a ClusterIP Service and use pods as the backend server of the ELB listener. In this way, external traffic can directly access the pods in the cluster without being forwarded by node ports.

      +
      Figure 3 Working rules of passthrough networking for dedicated LoadBalancer ingresses in CCE Turbo clusters
      -

      Services Supported by Ingresses

      Table 1 lists the services supported by ELB Ingresses. -
      Table 1 Services supported by ELB Ingresses

      Cluster Type

      +

      Services Supported by Ingresses

      Table 1 lists the services supported by LoadBalancer ingresses. +
      diff --git a/docs/cce/umn/cce_10_0105.html b/docs/cce/umn/cce_10_0105.html index dd5c81f9..940e458a 100644 --- a/docs/cce/umn/cce_10_0105.html +++ b/docs/cce/umn/cce_10_0105.html @@ -132,7 +132,7 @@ - @@ -73,29 +73,29 @@ metadata: spec: containers: - name: liveness - image: nginx:alpine + image: <image_address> args: - /server - livenessProbe: - httpGet: - path: /healthz - port: 80 - httpHeaders: + livenessProbe: # Liveness probe + httpGet: # Checking an HTTP request is used as an example. + path: /healthz # The HTTP check path is /healthz. + port: 80 # The check port number is 80. + httpHeaders: # (Optional) The request header name is Custom-Header and the value is Awesome. - name: Custom-Header value: Awesome initialDelaySeconds: 3 periodSeconds: 3 - readinessProbe: - exec: - command: + readinessProbe: # Readiness probe + exec: # Checking an execution command is used as an example. + command: # Command to be executed - cat - /tmp/healthy initialDelaySeconds: 5 periodSeconds: 5 - startupProbe: - httpGet: - path: /healthz - port: 80 + startupProbe: # Startup probe + httpGet: # Checking an HTTP request is used as an example. + path: /healthz # The HTTP check path is /healthz. + port: 80 # The check port number is 80. failureThreshold: 30 periodSeconds: 10 diff --git a/docs/cce/umn/cce_10_0113.html b/docs/cce/umn/cce_10_0113.html index bdf02760..9e4e14fa 100644 --- a/docs/cce/umn/cce_10_0113.html +++ b/docs/cce/umn/cce_10_0113.html @@ -7,9 +7,9 @@

      Configurations must be imported to a container as arguments. Otherwise, configurations will be lost after the container restarts.

      Environment variables can be set in the following modes:

      -
      • Custom: Enter the environment variable name and parameter value.
      • Added from ConfigMap key: Import all keys in a ConfigMap as environment variables.
      • Added from ConfigMap: Import a key in a ConfigMap as the value of an environment variable. As shown in Figure 1, if you import configmap_value of configmap_key in a ConfigMap as the value of environment variable key1, an environment variable named key1 whose value is configmap_value exists in the container.
      • Added from secret: Import all keys in a secret as environment variables.
      • Added from secret key: Import the value of a key in a secret as the value of an environment variable. As shown in Figure 1, if you import secret_value of secret_key in secret secret-example as the value of environment variable key2, an environment variable named key2 whose value is secret_value exists in the container.
      • Variable value/reference: Use the field defined by a pod as the value of the environment variable. As shown in Figure 1, if the pod name is imported as the value of environment variable key3, an environment variable named key3 exists in the container and its value is the pod name.
      • Resource Reference: The value of Request or Limit defined by the container is used as the value of the environment variable. As shown in Figure 1, if you import the CPU limit of container-1 as the value of environment variable key4, an environment variable named key4 exists in the container and its value is the CPU limit of container-1.
      +
      • Custom: Enter the environment variable name and parameter value.
      • Added from ConfigMap key: Import all keys in a ConfigMap as environment variables.
      • Added from ConfigMap: Import a key in a ConfigMap as the value of an environment variable. As shown in Figure 1, if you import configmap_value of configmap_key in configmap-example as the value of environment variable key1, an environment variable named key1 whose value is configmap_value is available in the container.
      • Added from secret: Import all keys in a secret as environment variables.
      • Added from secret key: Import the value of a key in a secret as the value of an environment variable. As shown in Figure 1, if you import secret_value of secret_key in secret-example as the value of environment variable key2, an environment variable named key2 whose value is secret_value is available in the container.
      • Variable value/reference: Use the field defined by a pod as the value of the environment variable. As shown in Figure 1, if the pod name is imported as the value of environment variable key3, an environment variable named key3 whose value is the pod name is available in the container.
      • Resource Reference: The value of Request or Limit defined by the container is used as the value of the environment variable. As shown in Figure 1, if you import the CPU limit of container-1 as the value of environment variable key4, an environment variable named key4 whose value is the CPU limit of container-1 is available in the container.
      -

      Adding Environment Variables

      1. Log in to the CCE console.
      2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
      3. When creating a workload, modify the container information in Container Settings and click the Environment Variables tab.
      4. Configure environment variables.

        Figure 1 Configuring environment variables
        +

        Adding Environment Variables

        1. Log in to the CCE console.
        2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
        3. When creating a workload, modify the container information in Container Settings and click the Environment Variables tab.
        4. Configure environment variables.

          Figure 1 Configuring environment variables

        YAML Example

        apiVersion: apps/v1
        @@ -45,12 +45,12 @@ spec:
                       valueFrom:
                         configMapKeyRef:
                           name: configmap-example
        -                  key: key1
        +                  key: configmap_key
                     - name: key2                    # Added from secret key
                       valueFrom:
                         secretKeyRef:
                           name: secret-example
        -                  key: key2
        +                  key: secret_key
                     - name: key3                    # Variable reference, which uses the field defined by a pod as the value of the environment variable.
                       valueFrom:
                         fieldRef:
        diff --git a/docs/cce/umn/cce_10_0125.html b/docs/cce/umn/cce_10_0125.html
        index ad897941..fcc89d8c 100644
        --- a/docs/cce/umn/cce_10_0125.html
        +++ b/docs/cce/umn/cce_10_0125.html
        @@ -11,7 +11,7 @@
         
      5. - diff --git a/docs/cce/umn/cce_10_0127.html b/docs/cce/umn/cce_10_0127.html index e157e7df..6ecef4b7 100644 --- a/docs/cce/umn/cce_10_0127.html +++ b/docs/cce/umn/cce_10_0127.html @@ -1,6 +1,6 @@ -

        CCE Container Storage (FlexVolume, Discarded)

        +

        FlexVolume (Discarded)

        Introduction

        CCE Container Storage (FlexVolume), also called storage-driver, functions as a standard Kubernetes FlexVolume plugin to allow containers to use EVS, SFS, OBS, and SFS Turbo storage resources. By installing and upgrading storage-driver, you can quickly install and update cloud storage capabilities.

        FlexVolume is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.13 or earlier is created.

        @@ -10,7 +10,7 @@

        Installing the Add-on

        This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

        If storage-driver is not installed in a cluster, perform the following steps to install it:

        -
        1. Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane, locate CCE Container Storage (FlexVolume) on the right, and click Install.
        2. Click Install to install the add-on. Note that the storage-driver has no configurable parameters and can be directly installed.
        +
        1. Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane, locate CCE Container Storage (FlexVolume) on the right, and click Install.
        2. Click Install to install the add-on. Note that the storage-driver has no configurable parameters and can be directly installed.
      diff --git a/docs/cce/umn/cce_10_0129.html b/docs/cce/umn/cce_10_0129.html index 866e0c40..1d2e8bb0 100644 --- a/docs/cce/umn/cce_10_0129.html +++ b/docs/cce/umn/cce_10_0129.html @@ -294,12 +294,12 @@ $configBlock
      - - @@ -316,7 +316,7 @@ $configBlock

    • Click Install.
    • Components

      -
      Table 1 Services supported by LoadBalancer ingresses

      Cluster Type

      ELB Type

      CLI

      Set commands to be executed in the container for pre-stop processing. The command format is Command Args[1] Args[2].... Command is a system command or a user-defined executable program. If no path is specified, an executable program in the default path will be selected. If multiple commands need to be executed, write the commands into a script for execution.

      +

      Set commands to be executed in the container for pre-stop processing. The command format is Command Args[1] Args[2].... Command is a system command or a user-defined executable program. If no path is specified, an executable program in the default path will be selected. If multiple commands need to be executed, write the commands into a script for execution.

      Example command:

      exec: 
         command: 
      diff --git a/docs/cce/umn/cce_10_0107.html b/docs/cce/umn/cce_10_0107.html
      index ed25c303..d114977f 100644
      --- a/docs/cce/umn/cce_10_0107.html
      +++ b/docs/cce/umn/cce_10_0107.html
      @@ -1,12 +1,12 @@
       
       
       

      Connecting to a Cluster Using kubectl

      -

      Scenario

      This section uses a CCE standard cluster as an example to describe how to connect to a CCE cluster using kubectl.

      +

      Scenario

      This section uses a CCE standard cluster as an example to describe how to access a CCE cluster using kubectl.

      -

      Permissions

      When you access a cluster using kubectl, CCE uses kubeconfig.json generated on the cluster for authentication. This file contains user information, based on which CCE determines which Kubernetes resources can be accessed by kubectl. The permissions recorded in a kubeconfig.json file vary from user to user.

      +

      Permissions

      When you access a cluster using kubectl, CCE uses kubeconfig generated on the cluster for authentication. This file contains user information, based on which CCE determines which Kubernetes resources can be accessed by kubectl. The permissions recorded in a kubeconfig file vary from user to user.

      For details about user permissions, see Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based).

      -

      Using kubectl

      To connect to a Kubernetes cluster from a PC, you can use kubectl, a Kubernetes command line tool. You can log in to the CCE console and click the name of the target cluster to access the cluster console. On the Overview page, view the access address and kubectl connection procedure.

      +

      Using kubectl

      To connect to a Kubernetes cluster from a PC, you can use kubectl, a Kubernetes command line tool. You can log in to the CCE console and click the name of the target cluster to access the cluster console. On the Overview page, view the access address and kubectl connection procedure.

      CCE allows you to access a cluster through a private network or a public network.
      • Intranet access: The client that accesses the cluster must be in the same VPC as the cluster.
      • Public access: The client that accesses the cluster must be able to access public networks and the cluster has been bound with a public network IP.

        To bind an EIP to the cluster, go to the Overview page and click Bind next to EIP in the Connection Information area. In a cluster with an EIP bound, kube-apiserver will be exposed to the Internet and may be attacked. To solve this problem, you can configure Advanced Anti-DDoS for the EIP of the node on which kube-apiserver runs.

      @@ -20,12 +20,12 @@ curl -LO https://dl.k8s.io/release/{v1.25.
    • Install kubectl.
      chmod +x kubectl
       mv -f kubectl /usr/local/bin
    • -

    • Obtain the kubectl configuration file (kubeconfig).

      On the Overview page, locate the Connection Info area, click Configure next to kubectl. On the window displayed, download the configuration file.

      -
      • The kubectl configuration file kubeconfig.json is used for cluster authentication. If the file is leaked, your clusters may be attacked.
      • The Kubernetes permissions assigned by the configuration file downloaded by IAM users are the same as those assigned to the IAM users on the CCE console.
      • If the KUBECONFIG environment variable is configured in the Linux OS, kubectl preferentially loads the KUBECONFIG environment variable instead of $home/.kube/config.
      +

    • Obtain the kubectl configuration file (kubeconfig).

      On the Overview page, locate the Connection Info area, click Configure next to kubectl. On the page displayed, download the configuration file.

      +
      • The kubectl configuration file kubeconfig is used for cluster authentication. If the file is leaked, your clusters may be attacked.
      • The Kubernetes permissions assigned by the configuration file downloaded by IAM users are the same as those assigned to the IAM users on the CCE console.
      • If the KUBECONFIG environment variable is configured in the Linux OS, kubectl preferentially loads the KUBECONFIG environment variable instead of $home/.kube/config.
      -

    • Configure kubectl.

      Configure kubectl (A Linux OS is used).
      1. Log in to your client and copy the kubeconfig.json configuration file downloaded in 2 to the /home directory on your client.
      2. Configure the kubectl authentication file.
        cd /home
        +

      3. Configure kubectl.

        Configure kubectl (A Linux OS is used).
        1. Log in to your client and copy the kubeconfig.yaml file downloaded in 2 to the /home directory on your client.
        2. Configure the kubectl authentication file.
          cd /home
           mkdir -p $HOME/.kube
          -mv -f kubeconfig.json $HOME/.kube/config
          +mv -f kubeconfig.yaml $HOME/.kube/config
    • Switch the kubectl access mode based on service scenarios.
      • Run this command to enable intra-VPC access:
        kubectl config use-context internal
      • Run this command to enable public access (EIP required):
        kubectl config use-context external
      • Run this command to enable public access and two-way authentication (EIP required):
        kubectl config use-context externalTLSVerify
        @@ -36,7 +36,7 @@ mv -f kubeconfig.json $HOME/.kube/config

      • Two-Way Authentication for Domain Names

        CCE supports two-way authentication for domain names.

        -
        • After an EIP is bound to an API Server, two-way domain name authentication will be disabled by default if kubectl is used to connect to the cluster. You can run kubectl config use-context externalTLSVerify to switch to the externalTLSVerify context to enable the two-way domain name authentication.
        • When an EIP is bound to or unbound from a cluster, or a custom domain name is configured or updated, the cluster server certificate will be added the latest cluster access address (including the EIP bound to the cluster and all custom domain names configured for the cluster).
        • Asynchronous cluster synchronization takes about 5 to 10 minutes. You can view the synchronization result in Synchronize Certificate in Operation Records.
        • For a cluster that has been bound to an EIP, if the authentication fails (x509: certificate is valid) when two-way authentication is used, bind the EIP again and download kubeconfig.json again.
        • If the domain name two-way authentication is not supported, kubeconfig.json contains the "insecure-skip-tls-verify": true field, as shown in Figure 1. To use two-way authentication, you can download the kubeconfig.json file again and enable two-way authentication for the domain names.
          Figure 1 Two-way authentication disabled for domain names
          +
          • After an EIP is bound to an API Server, two-way domain name authentication is disabled by default if kubectl is used to access the cluster. You can run kubectl config use-context externalTLSVerify to enable the two-way domain name authentication.
          • When an EIP is bound to or unbound from a cluster, or a custom domain name is configured or updated, the cluster server certificate will be added the latest cluster access address (including the EIP bound to the cluster and all custom domain names configured for the cluster).
          • Asynchronous cluster synchronization takes about 5 to 10 minutes. You can view the synchronization result in Synchronize Certificate in Operation Records.
          • For a cluster that has been bound to an EIP, if the authentication fails (x509: certificate is valid) when two-way authentication is used, bind the EIP again and download kubeconfig.yaml again.
          • If the two-way domain name authentication is not supported, kubeconfig.yaml contains the "insecure-skip-tls-verify": true field, as shown in Figure 1. To use two-way authentication, download the kubeconfig.yaml file again and enable two-way authentication for the domain names.
            Figure 1 Two-way authentication disabled for domain names

        FAQs

        • Error from server Forbidden

          When you use kubectl to create or query Kubernetes resources, the following output is returned:

          diff --git a/docs/cce/umn/cce_10_0112.html b/docs/cce/umn/cce_10_0112.html index fafbcf71..cff0b184 100644 --- a/docs/cce/umn/cce_10_0112.html +++ b/docs/cce/umn/cce_10_0112.html @@ -3,7 +3,7 @@

          Configuring Container Health Check

          Scenario

          Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect application exceptions or automatically restart the application to restore it. This will result in a situation where the pod status is normal but the application in the pod is abnormal.

          Kubernetes provides the following health check probes:

          -
          • Liveness probe (livenessProbe): checks whether a container is still alive. It is similar to the ps command that checks whether a process exists. If the liveness check of a container fails, the cluster restarts the container. If the liveness check is successful, no operation is executed.
          • Readiness probe (readinessProbe): checks whether a container is ready to process user requests. Upon that the container is detected unready, service traffic will not be directed to the container. It may take a long time for some applications to start up before they can provide services. This is because that they need to load disk data or rely on startup of an external module. In this case, the application process is running, but the application cannot provide services. To address this issue, this health check probe is used. If the container readiness check fails, the cluster masks all requests sent to the container. If the container readiness check is successful, the container can be accessed.
          • Startup probe (startupProbe): checks when a containerized application has started. If such a probe is configured, it disables liveness and readiness checks until it succeeds, ensuring that those probes do not interfere with the application startup. This can be used to adopt liveness checks on slow starting containers, avoiding them getting terminated by the kubelet before they are started.
          +
          • Liveness probe (livenessProbe): checks whether a container is still alive. It is similar to the ps command that checks whether a process exists. If the liveness check of a container fails, the cluster restarts the container. If the liveness check is successful, no operation is executed.
          • Readiness probe (readinessProbe): checks whether a container is ready to process user requests. Upon that the container is detected unready, service traffic will not be directed to the container. It may take a long time for some applications to start up before they can provide services. This is because that they need to load disk data or rely on startup of an external module. In this case, although the application process has started, the application cannot provide services. To address this issue, this health check probe is used. If the container readiness check fails, the cluster masks all requests sent to the container. If the container readiness check is successful, the container can be accessed.
          • Startup probe (startupProbe): checks when a containerized application has started. If such a probe is configured, it disables liveness and readiness checks until it succeeds, ensuring that those probes do not interfere with the application startup. This can be used to adopt liveness checks on slow starting containers, avoiding them getting terminated by the kubelet before they are started.

          Check Method

          • HTTP request

            This health check mode applies to containers that provide HTTP/HTTPS services. The cluster periodically initiates an HTTP/HTTPS GET request to such containers. If the return code of the HTTP/HTTPS response is within 200–399, the probe is successful. Otherwise, the probe fails. In this health check mode, you must specify a container listening port and an HTTP/HTTPS request path.

            For example, for a container that provides HTTP services, the HTTP check path is /health-check, the port is 80, and the host address is optional (which defaults to the container IP address). Here, 172.16.0.186 is used as an example, and we can get such a request: GET http://172.16.0.186:80/health-check. The cluster periodically initiates this request to the container. You can also add one or more headers to an HTTP request. For example, set the request header name to Custom-Header and the corresponding value to example.

            @@ -13,7 +13,7 @@

            The CLI mode can be used to replace the HTTP request-based and TCP port-based health check.

            • For a TCP port, you can use a program script to connect to a container port. If the connection is successful, the script returns 0. Otherwise, the script returns –1.
            • For an HTTP request, you can use the script command to run the wget command to detect the container.

              wget http://127.0.0.1:80/health-check

              Check the return code of the response. If the return code is within 200–399, the script returns 0. Otherwise, the script returns –1.

              -
              • Put the program to be executed in the container image so that the program can be executed.
              • If the command to be executed is a shell script, do not directly specify the script as the command, but add a script parser. For example, if the script is /data/scripts/health_check.sh, you must specify sh/data/scripts/health_check.sh for command execution. The reason is that the cluster is not in the terminal environment when executing programs in a container.
              +
              • Put the program to be executed in the container image so that the program can be executed.
              • If the command to be executed is a shell script, do not directly specify the script as the command, but add a script parser. For example, if the script is /data/scripts/health_check.sh, you must specify sh/data/scripts/health_check.sh for command execution.
          • gRPC Check
            gRPC checks can configure startup, liveness, and readiness probes for your gRPC application without exposing any HTTP endpoint, nor do you need an executable. Kubernetes can connect to your workload via gRPC and obtain its status.
            • The gRPC check is supported only in CCE clusters of v1.25 or later.
            • To use gRPC for check, your application must support the gRPC health checking protocol.
            • Similar to HTTP and TCP probes, if the port is incorrect or the application does not support the health checking protocol, the check fails.
            @@ -57,7 +57,7 @@
    • Number of retry times when the detection fails.

      Giving up in case of liveness probe means to restart the container. In case of readiness probe the pod will be marked Unready.

      -

      The default value is 3. The minimum value is 1.

      +

      The default value is 3, and the minimum value is 1.

      Multi AZ

      • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
      • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
      • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.
      +
      • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
      • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
      • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.

      Node Affinity

      • Incompatibility: Node affinity is disabled for the add-on.
      • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

        If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

        +
      • Not configured: Node affinity is disabled for the add-on.
      • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

        If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

      Table 6 CoreDNS components

      Component

      +
      @@ -345,7 +345,7 @@ $configBlock
      1. The query is first sent to the DNS caching layer in CoreDNS.
      2. From the caching layer, the suffix of the request is examined and then the request is forwarded to the corresponding DNS:
        • Names with the cluster suffix, for example, .cluster.local: The request is sent to CoreDNS.
        • Names with the stub domain suffix, for example, .acme.local: The request is sent to the configured custom DNS resolver that listens, for example, on 1.2.3.4.
        • Names that do not match the suffix (for example, widget.com): The request is forwarded to the upstream DNS.
      -
      Figure 1 Routing
      +
      Figure 1 Routing
      diff --git a/docs/cce/umn/cce_10_0132.html b/docs/cce/umn/cce_10_0132.html index 2455a7a4..428f5016 100644 --- a/docs/cce/umn/cce_10_0132.html +++ b/docs/cce/umn/cce_10_0132.html @@ -22,7 +22,7 @@
      - @@ -37,31 +37,31 @@

    • Configure the add-on parameters.

      Only v1.16.0 and later versions support the configurations.

      -
    • Table 6 Add-on components

      Component

      Description

      The specifications can be Custom.

      Instances

      +

      Pods

      If you select Custom, you can adjust the number of pods as required.

      Table 2 NPD parameters

      Parameter

      +
      - - - - - - - - - @@ -78,12 +78,12 @@ - - @@ -100,7 +100,7 @@

    • Click Install.
    • Components

      -
      Table 2 NPD parameters

      Parameter

      Description

      +

      Description

      common.image.pullPolicy

      +

      common.image.pullPolicy

      An image pulling policy. The default value is IfNotPresent.

      +

      An image pulling policy. The default value is IfNotPresent.

      feature_gates

      +

      feature_gates

      A feature gate

      +

      A feature gate

      npc.maxTaintedNode

      +

      npc.maxTaintedNode

      The maximum number of nodes that NPC can add taints to when a single fault occurs on multiple nodes for minimizing impact.

      +

      The maximum number of nodes that NPC can add taints to when a single fault occurs on multiple nodes for minimizing impact.

      The value can be in int or percentage format.

      npc.nodeAffinity

      +

      npc.nodeAffinity

      Node affinity of the controller

      +

      Node affinity of the controller

      Multi AZ

      • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
      • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
      • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.
      +
      • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
      • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
      • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.

      Node Affinity

      • Incompatibility: Node affinity is disabled for the add-on.
      • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

        If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

        +
      • Not configured: Node affinity is disabled for the add-on.
      • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

        If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

      Table 4 NPD components

      Component

      +
      @@ -155,7 +155,7 @@

      Typical scenario: Disk I/O suspension causes process suspension.

      @@ -167,7 +167,7 @@ @@ -177,36 +177,36 @@
    • Status-related

      For status-related check items, when a problem occurs, NPD reports an event to the API server and changes the node status synchronously. This function can be used together with Node-problem-controller fault isolation to isolate nodes.

      If the check period is not specified in the following check items, the default period is 30 seconds.

      -
    • Table 4 Add-on components

      Component

      Description

      Warning event

      -

      Listening object: /dev/kmsg

      +

      Listening object: /dev/kmsg

      Matching rule: "task \\S+:\\w+ blocked for more than \\w+ seconds\\."

      Warning event

      -

      Listening object: /dev/kmsg

      +

      Listening object: /dev/kmsg

      Matching rule: Remounting filesystem read-only

      Table 6 Checking system components

      Check Item

      +
      - - - - - - - - - - - @@ -222,20 +222,20 @@ - - - - - - @@ -297,18 +297,18 @@
      Table 6 Checking system components

      Check Item

      Function

      +

      Function

      Description

      +

      Description

      Container network component error

      +

      Container network component error

      CNIProblem

      Check the status of the CNI components (container network components).

      +

      Check the status of the CNI components (container network components).

      None

      +

      None

      Container runtime component error

      +

      Container runtime component error

      CRIProblem

      Check the status of Docker and containerd of the CRI components (container runtime components).

      +

      Check the status of Docker and containerd of the CRI components (container runtime components).

      Check object: Docker or containerd

      +

      Check object: Docker or containerd

      Frequent restarts of Kubelet

      +

      Frequent restarts of Kubelet

      FrequentKubeletRestart

      Periodically backtrack system logs to check whether the key component Kubelet restarts frequently.

      +

      Periodically backtrack system logs to check whether the key component Kubelet restarts frequently.

      • Default threshold: 10 restarts within 10 minutes

        If Kubelet restarts for 10 times within 10 minutes, it indicates that the system restarts frequently and a fault alarm is generated.

        +
      • Default threshold: 10 restarts within 10 minutes

        If Kubelet restarts for 10 times within 10 minutes, it indicates that the system restarts frequently and a fault alarm is generated.

      • Listening object: logs in the /run/log/journal directory

      Periodically backtrack system logs to check whether the container runtime containerd restarts frequently.

      kubelet error

      +

      kubelet error

      KubeletProblem

      Check the status of the key component Kubelet.

      +

      Check the status of the key component Kubelet.

      None

      +

      None

      kube-proxy error

      +

      kube-proxy error

      KubeProxyProblem

      Check the status of the key component kube-proxy.

      +

      Check the status of the key component kube-proxy.

      None

      +

      None

      -
      Table 8 Checking the storage

      Check Item

      +
      - - - - - @@ -335,10 +335,10 @@

      Typical scenario: When creating a node, a user configures two data disks as a persistent volume storage pool. Some data disks are deleted by mistake.

      - - @@ -346,10 +346,10 @@
      for dir in `df -h | grep -v "Mounted on" | awk "{print \\$NF}"`;do cd $dir; done && echo "ok"
      - - @@ -362,10 +362,10 @@ - -
      Table 8 Checking the storage

      Check Item

      Function

      +

      Function

      Description

      Disk read-only

      +

      Disk read-only

      DiskReadonly

      Periodically perform write tests on the system disk and CCE data disks (including the CRI logical disk and Kubelet logical disk) of the node to check the availability of key disks.

      +

      Periodically perform write tests on the system disk and CCE data disks (including the CRI logical disk and Kubelet logical disk) of the node to check the availability of key disks.

      Detection paths:

      • /mnt/paas/kubernetes/kubelet/
      • /var/lib/docker/
      • /var/lib/containerd/
      • /var/paas/sys/log/cceaddon-npd/
      @@ -316,10 +316,10 @@

      Currently, additional data disks are not supported.

      emptyDir storage pool error

      +

      emptyDir storage pool error

      EmptyDirVolumeGroupStatusError

      Check whether the ephemeral volume group on the node is normal.

      +

      Check whether the ephemeral volume group on the node is normal.

      Impact: Pods that depend on the storage pool cannot write data to the temporary volume. The temporary volume is remounted as a read-only file system by the kernel due to an I/O error.

      Typical scenario: When creating a node, a user configures two data disks as a temporary volume storage pool. Some data disks are deleted by mistake. As a result, the storage pool becomes abnormal.

      Mount point error

      +

      Mount point error

      MountPointProblem

      Check the mount point on the node.

      +

      Check the mount point on the node.

      Exceptional definition: You cannot access the mount point by running the cd command.

      Typical scenario: Network File System (NFS), for example, obsfs and s3fs is mounted to a node. When the connection is abnormal due to network or peer NFS server exceptions, all processes that access the mount point are suspended. For example, during a cluster upgrade, a kubelet is restarted, and all mount points are scanned. If the abnormal mount point is detected, the upgrade fails.

      Suspended disk I/O

      +

      Suspended disk I/O

      DiskHung

      Check whether I/O suspension occurs on all disks on the node, that is, whether I/O read and write operations are not responded.

      +

      Check whether I/O suspension occurs on all disks on the node, that is, whether I/O read and write operations are not responded.

      Definition of I/O suspension: The system does not respond to disk I/O requests, and some processes are in the D state.

      Typical scenario: Disks cannot respond due to abnormal OS hard disk drivers or severe faults on the underlying network.

      Slow disk I/O

      +

      Slow disk I/O

      DiskSlow

      Check whether all disks on the node have slow I/Os, that is, whether I/Os respond slowly.

      +

      Check whether all disks on the node have slow I/Os, that is, whether I/Os respond slowly.

      Typical scenario: EVS disks have slow I/Os due to network fluctuation.

      • Check object: all data disks
      • Source:

        /proc/diskstat

        diff --git a/docs/cce/umn/cce_10_0141.html b/docs/cce/umn/cce_10_0141.html index 3371e20a..8357225e 100644 --- a/docs/cce/umn/cce_10_0141.html +++ b/docs/cce/umn/cce_10_0141.html @@ -23,11 +23,11 @@ cd /usr/local/nvidia/bin && ./nvidia-smi
      • Container:
        cd /usr/local/nvidia/bin && ./nvidia-smi

      If GPU information is returned, the device is available and the add-on has been installed.

      -

      +

      -

      Obtaining the Driver Link from Public Network

      1. Log in to the CCE console.
      2. Click Create Node and select the GPU node to be created in the Specifications area. The GPU card model of the node is displayed in the lower part of the page.
      1. Visit https://www.nvidia.com/Download/Find.aspx?lang=en.
      2. Select the driver information on the NVIDIA Driver Downloads page, as shown in Figure 1. Operating System must be Linux 64-bit.

        Figure 1 Setting parameters
        -

      3. After confirming the driver information, click SEARCH. A page is displayed, showing the driver information, as shown in Figure 2. Click DOWNLOAD.

        Figure 2 Driver information
        -

      4. Obtain the driver link in either of the following ways:

        • Method 1: As shown in Figure 3, find url=/tesla/470.103.01/NVIDIA-Linux-x86_64-470.103.01.run in the browser address box. Then, supplement it to obtain the driver link https://us.download.nvidia.com/tesla/470.103.01/NVIDIA-Linux-x86_64-470.103.01.run. By using this method, you must bind an EIP to each GPU node.
        • Method 2: As shown in Figure 3, click AGREE & DOWNLOAD to download the driver. Then, upload the driver to OBS and record the OBS URL. By using this method, you do not need to bind an EIP to GPU nodes.
          Figure 3 Obtaining the link
          +

          Obtaining the Driver Link from Public Network

          1. Log in to the CCE console.
          2. Click Create Node and select the GPU node to be created in the Specifications area. The GPU card model of the node is displayed in the lower part of the page.
          1. Visit https://www.nvidia.com/Download/Find.aspx?lang=en.
          2. Select the driver information on the NVIDIA Driver Downloads page, as shown in Figure 1. Operating System must be Linux 64-bit.

            Figure 1 Setting parameters
            +

          3. After confirming the driver information, click SEARCH. A page is displayed, showing the driver information, as shown in Figure 2. Click DOWNLOAD.

            Figure 2 Driver information
            +

          4. Obtain the driver link in either of the following ways:

            • Method 1: As shown in Figure 3, find url=/tesla/470.103.01/NVIDIA-Linux-x86_64-470.103.01.run in the browser address box. Then, supplement it to obtain the driver link https://us.download.nvidia.com/tesla/470.103.01/NVIDIA-Linux-x86_64-470.103.01.run. By using this method, you must bind an EIP to each GPU node.
            • Method 2: As shown in Figure 3, click AGREE & DOWNLOAD to download the driver. Then, upload the driver to OBS and record the OBS URL. By using this method, you do not need to bind an EIP to GPU nodes.
              Figure 3 Obtaining the link

          @@ -36,7 +36,7 @@ cd /usr/local/nvidia/bin && ./nvidia-smi

        • In the bucket list, click a bucket name, and then the Overview page of the bucket is displayed.
        • In the navigation pane, choose Objects.
        • Select the name of the target object and copy the driver link on the object details page.

      Components

      -
      - @@ -379,14 +379,10 @@ tolerations: - @@ -553,7 +549,7 @@ tolerations: - @@ -570,7 +566,7 @@ tolerations:

    • Click Install.
    • Components

      -
      Table 1 GPU component

      Component

      +
      diff --git a/docs/cce/umn/cce_10_0142.html b/docs/cce/umn/cce_10_0142.html index 20298410..b85c2884 100644 --- a/docs/cce/umn/cce_10_0142.html +++ b/docs/cce/umn/cce_10_0142.html @@ -2,12 +2,12 @@

      NodePort

      Scenario

      A Service is exposed on each node's IP address at a static port (NodePort). When you create a NodePort Service, Kubernetes automatically allocates an internal IP address (ClusterIP) of the cluster. When clients outside the cluster access <NodeIP>:<NodePort>, the traffic will be forwarded to the target pod through the ClusterIP of the NodePort Service.

      -
      Figure 1 NodePort access
      +
      Figure 1 NodePort access

      Constraints

      • By default, a NodePort Service is accessed within a VPC. To use an EIP to access a NodePort Service through public networks, bind an EIP to the node in the cluster in advance.
      • After a Service is created, if the affinity setting is switched from the cluster level to the node level, the connection tracing table will not be cleared. Do not modify the Service affinity setting after the Service is created. To modify it, create a Service again.
      • CCE Turbo clusters support only cluster-level service affinity.
      • In VPC network mode, when container A is published through a NodePort service and the service affinity is set to the node level (that is, externalTrafficPolicy is set to local), container B deployed on the same node cannot access container A through the node IP address and NodePort service.
      • When a NodePort service is created in a cluster of v1.21.7 or later, the port on the node is not displayed using netstat by default. If the cluster forwarding mode is iptables, run the iptables -t nat -L command to view the port. If the cluster forwarding mode is IPVS, run the ipvsadm -Ln command to view the port.
      -

      Creating a NodePort Service

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
      3. Set intra-cluster access parameters.

        • Service Name: Specify a Service name, which can be the same as the workload name.
        • Service Type: Select NodePort.
        • Namespace: Namespace to which the workload belongs.
        • Service Affinity: For details, see externalTrafficPolicy (Service Affinity).
          • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
          • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
          -
        • Selector: Add a label and click Confirm. A Service selects a pod based on the added label. You can also click Reference Workload Label to reference the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
        • Port Settings
          • Protocol: protocol used by the Service.
          • Service Port: port used by the Service. The port number ranges from 1 to 65535.
          • Container Port: port on which the workload listens. For example, Nginx uses port 80 by default.
          • Node Port: You are advised to select Auto. You can also specify a port. The default port ranges from 30000 to 32767.
          +

          Creating a NodePort Service

          1. Log in to the CCE console and click the cluster name to access the cluster console.
          2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
          3. Configure intra-cluster access parameters.

            • Service Name: Specify a Service name, which can be the same as the workload name.
            • Service Type: Select NodePort.
            • Namespace: Namespace to which the workload belongs.
            • Service Affinity: For details, see externalTrafficPolicy (Service Affinity).
              • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
              • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
              +
            • Selector: Add a label and click Confirm. A Service selects a pod based on the added label. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
            • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
            • Port Settings
              • Protocol: protocol used by the Service.
              • Service Port: port used by the Service. The port number ranges from 1 to 65535.
              • Container Port: port on which the workload listens. For example, Nginx uses port 80 by default.
              • Node Port: You are advised to select Auto. You can also specify a port. The default port ranges from 30000 to 32767.

          4. Click OK.
          diff --git a/docs/cce/umn/cce_10_0144.html b/docs/cce/umn/cce_10_0144.html index 9212b416..2cd16288 100644 --- a/docs/cce/umn/cce_10_0144.html +++ b/docs/cce/umn/cce_10_0144.html @@ -1,7 +1,7 @@

          Deploying an Application Through the Helm v3 Client

          -

          Prerequisites

          The Kubernetes cluster created on CCE has been connected to kubectl. For details, see Using kubectl.

          +

          Prerequisites

          • The Kubernetes cluster created on CCE has been connected to kubectl. For details, see Using kubectl.
          • To pull a public image when deploying Helm, ensure an EIP has been bound to the node.

          Installing Helm v3

          This section uses Helm v3.3.0 as an example.

          For other versions, visit https://github.com/helm/helm/releases.

          @@ -18,17 +18,15 @@ version.BuildInfo{Version:"v3.3.0", GitCommit:"e29ce2a54e96cd02ccfce88bee4f58bb6
          1. Search for a chart from the Artifact Hub repository recommended by Helm and configure the Helm repository.

            helm repo add {repo_name} {repo_addr}
            The following uses the WordPress chart as an example:
            helm repo add bitnami https://charts.bitnami.com/bitnami
            -

          2. Run the helm install command to install the chart.

            • Default installation: This is the simplest method, which requires only two parameters.
              helm install {release_name} {chart_name}
              -
              For example, to install WordPress, the WordPress chart added in step 1 is bitnami/wordpress, and the release name is my-wordpress.
              helm install my-wordpress bitnami/wordpress
              -
              -
            • Custom installation: The default installation uses the default settings in the chart. Use custom installation to custom parameter settings. Run the helm show values {chart_name} command to view the configurable options of the chart. For example, to view the configurable items of WordPress, run the following command:
              helm show values bitnami/wordpress
              -

              Overwrite specified parameters by running the following commands:

              -
              helm install my-wordpress bitnami/wordpress \
              +

            • Run the helm install command to install the chart.

              helm install {release_name} {chart_name} --set key1=val1
              +

              For example, to install WordPress, the WordPress chart added in 1 is bitnami/wordpress, the release name is my-wordpress, and mandatory parameters have been configured.

              +
              helm install my-wordpress bitnami/wordpress \
                    --set mariadb.primary.persistence.enabled=true \
                    --set mariadb.primary.persistence.storageClass=csi-disk \
                    --set mariadb.primary.persistence.size=10Gi \
                    --set persistence.enabled=false
              -
            +
            Run the helm show values {chart_name} command to view the configurable options of the chart. For example, to view the configurable items of WordPress, run the following command:
            helm show values bitnami/wordpress
            +

          3. View the installed chart release.

            helm list

          diff --git a/docs/cce/umn/cce_10_0146.html b/docs/cce/umn/cce_10_0146.html index a913e7d0..73385ff3 100644 --- a/docs/cce/umn/cce_10_0146.html +++ b/docs/cce/umn/cce_10_0146.html @@ -33,7 +33,7 @@
      @@ -101,7 +101,7 @@

    • Click Install.

      On the Releases tab page, you can view the installation status of the release.

    • -

      Upgrading a Chart-based Workload

      1. Log in to the CCE console and click the cluster name to access the cluster console. Choose App Templates in the navigation pane and click the Releases tab.
      2. Click Upgrade in the row where the desired workload resides and set the parameters for the workload.
      3. Select a chart version for Chart Version.
      4. Follow the prompts to modify the chart parameters. Click Upgrade, and then click Submit.
      5. Click Back to Release List. If the chart status changes to Upgrade successful, the workload is successfully upgraded.
      +

      Upgrading a Chart-based Workload

      1. Log in to the CCE console and click the cluster name to access the cluster console. Choose App Templates in the navigation pane and click the Releases tab.
      2. Click Upgrade in the row where the desired workload resides and set the parameters for the workload.
      3. Select a chart version for Chart Version.
      4. Follow the prompts to modify the chart parameters. Confirm the modification and click Upgrade.
      5. If the execution status is Upgraded, the workload has been upgraded.

      Rolling Back a Chart-based Workload

      1. Log in to the CCE console and click the cluster name to access the cluster console. Choose App Templates in the navigation pane and click the Releases tab.
      2. Click More > Roll Back for the workload to be rolled back, select the workload version, and click Roll back to this version.

        In the workload list, if the status is Rollback successful, the workload is rolled back successfully.

      diff --git a/docs/cce/umn/cce_10_0150.html b/docs/cce/umn/cce_10_0150.html index d9ebc9af..0b5ce823 100644 --- a/docs/cce/umn/cce_10_0150.html +++ b/docs/cce/umn/cce_10_0150.html @@ -40,7 +40,7 @@
      - @@ -75,7 +75,7 @@
      • (Optional) Lifecycle: Configure operations to be performed in a specific phase of the container lifecycle, such as Startup Command, Post-Start, and Pre-Stop. For details, see Configuring Container Lifecycle Parameters.
      • (Optional) Environment Variables: Configure variables for the container running environment using key-value pairs. These variables transfer external information to containers running in pods and can be flexibly modified after application deployment. For details, see Configuring Environment Variables.
      • (Optional) Data Storage: Mount local storage or cloud storage to the container. The application scenarios and mounting modes vary with the storage type. For details, see Storage.

        If the workload contains more than one pod, EVS volumes cannot be mounted.

        -
      • (Optional) Logging: Report standard container output logs to AOM by default, without requiring manual settings. You can manually configure the log collection path. For details, see Connecting CCE to AOM.

        To disable the standard output of the current workload, add the annotation kubernetes.AOM.log.stdout: [] in Labels and Annotations. For details about how to use this annotation, see Table 1.

        +
      • (Optional) Logging: Report standard container output logs to AOM by default, without requiring manual settings. You can manually configure the log collection path. For details, see Collecting Container Logs Using ICAgent.

        To disable the standard output of the current workload, add the annotation kubernetes.AOM.log.stdout: [] in Labels and Annotations. For details about how to use this annotation, see Table 1.

    • Image Access Credential: Select the credential used for accessing the image repository. The default value is default-secret. You can use default-secret to access images in SWR. For details about default-secret, see default-secret.
    • (Optional) GPU: All is selected by default. The workload instance will be scheduled to the node of the specified GPU type.
    • @@ -83,7 +83,7 @@
      (Optional) Advanced Settings
      • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
      • Job Settings
        • Parallel Pods: Maximum number of pods that can run in parallel during job execution. The value cannot be greater than the total number of pods in the job.
        • Timeout (s): Once a job reaches this time, the job status becomes failed and all pods in this job will be deleted. If you leave this parameter blank, the job will never time out.
        • Completion Mode
          • Non-indexed: A job is considered complete when all the pods are successfully executed. Each pod completion is homologous to each other.
          • Indexed: Each pod gets an associated completion index from 0 to the number of pods minus 1. The job is considered complete when every pod allocated with an index is successfully executed. For an indexed job, pods are named in the format of $(job-name)-$(index).
        • Suspend Job: By default, a job is executed immediately after being created. The job's execution will be suspended if you enable this option, and resumed after you disable it.
        -
      • Network Configuration
        • Pod ingress/egress bandwidth limitation: You can set ingress/egress bandwidth limitation for pods. For details, see Configuring QoS for a Pod.
        +
      • Network Configuration

    • Click Create Workload in the lower right corner.
    • diff --git a/docs/cce/umn/cce_10_0151.html b/docs/cce/umn/cce_10_0151.html index 9026bb54..366a17a4 100644 --- a/docs/cce/umn/cce_10_0151.html +++ b/docs/cce/umn/cce_10_0151.html @@ -41,7 +41,7 @@ - @@ -86,7 +86,7 @@
    • Job Records: You can set the number of jobs that are successfully executed or fail to be executed. Setting a limit to 0 corresponds to keeping none of the jobs after they finish.
    • (Optional) Advanced Settings
      • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
      -
      • Network Configuration
        • Pod ingress/egress bandwidth limitation: You can set ingress/egress bandwidth limitation for pods. For details, see Configuring QoS for a Pod.
        +

    • Click Create Workload in the lower right corner.
    • diff --git a/docs/cce/umn/cce_10_0152.html b/docs/cce/umn/cce_10_0152.html index 9209604b..98f69cc1 100644 --- a/docs/cce/umn/cce_10_0152.html +++ b/docs/cce/umn/cce_10_0152.html @@ -33,12 +33,12 @@ - diff --git a/docs/cce/umn/cce_10_0153.html b/docs/cce/umn/cce_10_0153.html index 89ed39a7..e6a5e61d 100644 --- a/docs/cce/umn/cce_10_0153.html +++ b/docs/cce/umn/cce_10_0153.html @@ -36,14 +36,14 @@ - @@ -67,7 +67,7 @@ data: kind: Secret metadata: name: mysecret #Secret name - namespace: default #Namespace. The default value is default. + namespace: default #Namespace. The default value is default. data: .dockerconfigjson: eyJh***** # Content encoded using Base64. type: kubernetes.io/dockerconfigjson @@ -86,7 +86,7 @@ data: apiVersion: v1 metadata: name: mysecret #Secret name - namespace: default #Namespace. The default value is default. + namespace: default #Namespace. The default value is default. data: tls.crt: LS0tLS1CRU*****FURS0tLS0t # Certificate content, which must be encoded using Base64. tls.key: LS0tLS1CRU*****VZLS0tLS0= # Private key content, which must be encoded using Base64. @@ -96,7 +96,7 @@ data: apiVersion: v1 metadata: name: mysecret #Secret name - namespace: default #Namespace. The default value is default. + namespace: default #Namespace. The default value is default. data: tls.crt: LS0tLS1CRU*****FURS0tLS0t # Certificate content, which must be encoded using Base64. tls.key: LS0tLS1CRU*****VZLS0tLS0= # Private key content, which must be encoded using Base64. diff --git a/docs/cce/umn/cce_10_0154.html b/docs/cce/umn/cce_10_0154.html index 4cdc2364..57a6a2bb 100644 --- a/docs/cce/umn/cce_10_0154.html +++ b/docs/cce/umn/cce_10_0154.html @@ -151,12 +151,12 @@ - - @@ -173,7 +173,7 @@

    • After the configuration is complete, click Install.
    • Components

      -
      Table 1 Add-on components

      Component

      Description

      Describes configuration parameters required by templates.

      NOTICE:

      Make sure that the image address set in the values.yaml file is the same as the image address in the container image repository. Otherwise, an exception occurs when you create a workload, and the system displays a message indicating that the image fails to be pulled.

      -

      To obtain the image address, perform the following operations: Log in to the CCE console. In the navigation pane, choose Image Repository to access the SWR console. Choose My Images > Private Images and click the name of the uploaded image. On the Image Tags tab page, obtain the image address from the pull command. You can click to copy the command in the Image Pull Command column.

      +

      To obtain the image address, perform the following operations: Log in to the CCE console. In the navigation pane, choose Image Repository to access the SWR console. Choose My Images > Private Images and click the name of the uploaded image. On the Image Tags tab page, obtain the image address from the pull command. You can click to copy the command in the Image Pull Command column.

      CPU Quota

      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      +
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores that can be used by a container. This prevents containers from using excessive resources.

      If Request and Limit are not specified, the quota is not limited. For more information and suggestions about Request and Limit, see Configuring Container Specifications.

      CPU Quota

      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      +
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores that can be used by a container. This prevents containers from using excessive resources.

      If Request and Limit are not specified, the quota is not limited. For more information and suggestions about Request and Limit, see Configuring Container Specifications.

      Data

      Data of a ConfigMap, in the key-value pair format.

      -

      Click to add data. The value can be in string, JSON, or YAML format.

      +

      Click to add data. The value can be in string, JSON, or YAML format.

      Label

      Label of the ConfigMap. Enter a key-value pair and click Add.

      +

      Label of the ConfigMap. Enter a key-value pair and click Confirm.

      Secret Data

      Workload secret data can be used in containers.

      -
      • If Secret Type is Opaque, click . In the dialog box displayed, enter a key-value pair and select Auto Base64 Encoding.
      • If Secret Type is kubernetes.io/dockerconfigjson, enter the account and password for logging in to the private image repository.
      • If Secret Type is kubernetes.io/tls or IngressTLS, upload the certificate file and private key file.
        NOTE:
        • A certificate is a self-signed or CA-signed credential used for identity authentication.
        • A certificate request is a request for a signature with a private key.
        +
        • If Secret Type is Opaque, click . In the dialog box displayed, enter a key-value pair and select Auto Base64 Encoding.
        • If Secret Type is kubernetes.io/dockerconfigjson, enter the account and password for logging in to the private image repository.
        • If Secret Type is kubernetes.io/tls or IngressTLS, upload the certificate file and private key file.
          NOTE:
          • A certificate is a self-signed or CA-signed credential used for identity authentication.
          • A certificate request is a request for a signature with a private key.

      Secret Label

      Label of the secret. Enter a key-value pair and click Add.

      +

      Label of the secret. Enter a key-value pair and click Confirm.

      Multi AZ

      • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
      • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
      • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.
      +
      • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
      • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
      • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.

      Node Affinity

      • Incompatibility: Node affinity is disabled for the add-on.
      • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

        If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

        +
      • Not configured: Node affinity is disabled for the add-on.
      • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

        If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

      Table 5 Autoscaler

      Component

      +
      diff --git a/docs/cce/umn/cce_10_0163.html b/docs/cce/umn/cce_10_0163.html index 3a5c60fd..1a1d92ef 100644 --- a/docs/cce/umn/cce_10_0163.html +++ b/docs/cce/umn/cce_10_0163.html @@ -55,7 +55,7 @@
      Table 5 Add-on components

      Component

      Description

      Recommended configuration

      -

      Actual available memory of a node ≥ Sum of memory limits of all containers on the current node ≥ Sum of memory requests of all containers on the current node. You can view the actual available memory of a node on the CCE console (Resource Management > Nodes > Allocatable).

      +

      Actual available memory of a node ≥ Sum of memory limits of all containers on the current node ≥ Sum of memory requests of all containers on the current node. You can view the actual available memory of a node on the CCE console (Resource Management > Nodes > Allocatable).

      The allocatable resources are calculated based on the resource request value (Request), which indicates the upper limit of resources that can be requested by pods on this node, but does not indicate the actual available resources of the node (for details, see Example of CPU and Memory Quota Usage). The calculation formula is as follows:

      • Allocatable CPU = Total CPU – Requested CPU of all pods – Reserved CPU for other resources
      • Allocatable memory = Total memory – Requested memory of all pods – Reserved memory for other resources
      @@ -105,6 +105,42 @@

      In this case, the remaining 1 core 5 GiB can be used by the next new pod.

      If pod 1 is under heavy load during peak hours, it will use more CPUs and memory within the limit. Therefore, the actual allocatable resources are fewer than 1 core 5 GiB.

      +

      Quotas of Other Resources

      Typically, nodes support local ephemeral storage, which is provided by locally mounted writable devices or RAM. Ephemeral storage does not ensure long-term data availability. Pods can use local ephemeral storage to buffer data and store logs, or mount emptyDir storage volumes to containers. For details, see Local ephemeral storage.

      +

      Kubernetes allows you to specify the requested value and limit value of ephemeral storage in container configurations to manage the local ephemeral storage. The following attributes can be configured for each container in a pod:

      +
      • spec.containers[].resources.limits.ephemeral-storage
      +
      • spec.containers[].resources.requests.ephemeral-storage
      +

      In the following example, a pod contains two containers. The requested value of each container for local ephemeral storage is 2 GiB, and the limit value is 4 GiB. Therefore, the requested value of the pod for local ephemeral storage is 4 GiB, the limit value is 8 GiB, and the emptyDir volume uses 500 MiB of the local ephemeral storage.

      +
      apiVersion: v1
      +kind: Pod
      +metadata:
      +  name: frontend
      +spec:
      +  containers:
      +  - name: container-1
      +    image: <example_app_image>
      +    resources:
      +      requests:
      +        ephemeral-storage: "2Gi"
      +      limits:
      +        ephemeral-storage: "4Gi"
      +    volumeMounts:
      +    - name: ephemeral
      +      mountPath: "/tmp"
      +  - name: container-2
      +    image: <example_log_aggregator_image>
      +    resources:
      +      requests:
      +        ephemeral-storage: "2Gi"
      +      limits:
      +        ephemeral-storage: "4Gi"
      +    volumeMounts:
      +    - name: ephemeral
      +      mountPath: "/tmp"
      +  volumes:
      +    - name: ephemeral
      +      emptyDir:
      +        sizeLimit: 500Mi
      +
      -

      Precautions

      • After you create a cluster, CCE automatically assigns the cluster-admin permission to you, which means you have full control on all resources in all namespaces in the cluster. The ID of a federated user changes upon each login and logout. Therefore, the user with the permissions is displayed as deleted. In this case, do not delete the permissions. Otherwise, the authentication fails. You are advised to grant the cluster-admin permission to a user group on CCE and add federated users to the user group.
      • A user with the Security Administrator role has all IAM permissions except role switching. For example, an account in the admin user group has this role by default. Only these users can assign permissions on the Permissions page on the CCE console.
      +

      Precautions

      • After you create a cluster, CCE automatically assigns the cluster-admin permission to you, which means you have full control on all resources in all namespaces in the cluster. The ID of a federated user changes upon each login and logout. Therefore, the user with the permissions is displayed as deleted. In this case, do not delete the permissions. Otherwise, the authentication fails. You are advised to grant the cluster-admin permission to a user group on CCE and add federated users to the user group.
      • A user with the Security Administrator role has all IAM permissions except role switching. For example, an account in the admin user group has this role by default. Only these users can assign permissions on the Permissions page on the CCE console.

      Configuring Namespace Permissions (on the Console)

      You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles.

      1. Log in to the CCE console. In the navigation pane, choose Permissions.
      2. Select a cluster for which you want to add permissions from the drop-down list on the right.
      3. Click Add Permissions in the upper right corner.
      4. Confirm the cluster name and select the namespace to assign permissions for. For example, select All namespaces, the target user or user group, and select the permissions.

        If you do not have IAM permissions, you cannot select users or user groups when configuring permissions for other users or user groups. In this case, you can enter a user ID or user group ID.

        @@ -101,7 +101,7 @@ nginx-658dff48ff-njdhj 1/1 Running 0 4d9h # kubectl get pod nginx-658dff48ff-7rkph NAME READY STATUS RESTARTS AGE nginx-658dff48ff-7rkph 1/1 Running 0 4d9h -

        Try querying Deployments and Services in the namespace. The output shows that user-example does not have the required permissions. Try querying the pods in namespace kube-system. The output shows that user-example does not have the required permissions, either. This indicates that the IAM user user-example has only the GET and LIST Pod permissions in the default namespace, which is the same as expected.

        +

        Try querying Deployments and Services in the namespace. The output shows that user-example does not have the required permissions. Try querying the pods in namespace kube-system. The output shows that user-example does not have the required permissions. This indicates that the IAM user user-example has only the GET and LIST Pod permissions in the default namespace, which is the same as expected.

        # kubectl get deploy
         Error from server (Forbidden): deployments.apps is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "deployments" in API group "apps" in the namespace "default"
         # kubectl get svc
        diff --git a/docs/cce/umn/cce_10_0190.html b/docs/cce/umn/cce_10_0190.html
        index 596811dd..24053dc7 100644
        --- a/docs/cce/umn/cce_10_0190.html
        +++ b/docs/cce/umn/cce_10_0190.html
        @@ -5,18 +5,15 @@
         
        • Dependency policies are assigned based on the CCE FullAccess or CCE ReadOnlyAccess policy you configure.
        • Only users and user groups with namespace permissions can gain the view access to resources in clusters.
          • If a user is granted the view access to all namespaces of a cluster, the user can view all namespace resources (except secrets) in the cluster. To view secrets in the cluster, the user must gain the admin or edit role in all namespaces of the cluster.
          • The view role within a single namespace allows users to view resources only in the specified namespace.

        Dependency Policy Configuration

        To grant an IAM user the permissions to view or use resources of other cloud services on the CCE console, you must first grant the CCE Administrator, CCE FullAccess, or CCE ReadOnlyAccess policy to the user group to which the user belongs and then grant the dependency policies listed in Table 1 to the user. These dependency policies will allow the IAM user to access resources of other cloud services.

        -

        Enterprise projects can group and manage resources across different projects of an enterprise. Resources are thereby isolated. IAM allows you to implement fine-grained authorization. It is strongly recommended that you use IAM for permissions management.

        -

        If you use an enterprise project to set permissions for IAM users, the following restrictions apply:

        -
        • On the CCE console, enterprise projects cannot call the API used to obtain AOM monitoring data for cluster monitoring. Therefore, IAM users in these enterprise projects cannot query monitoring data.
        • On the CCE console, enterprise projects cannot call the API to query the key pair created during node creation. Therefore, IAM users in these enterprise projects cannot use the key pair login mode. Only the password login mode is supported.
        • On the CCE console, enterprise projects are not supported during template creation. Therefore, enterprise project sub-users cannot use template management.
        • On the CCE console, the EVS disk query API does not support enterprise projects. Therefore, enterprise project IAM users cannot use existing EVS disks to create PVs. To use this function, add the fine-grained permissions such as evs:volumes:get to the IAM users.
        -

        CCE supports fine-grained permissions configuration, but has the following restrictions:

        +

        CCE supports fine-grained permissions configuration, but has the following restrictions:

        • AOM does not support resource-level monitoring. After operation permissions on specific resources are configured using IAM's fine-grained cluster resource management function, IAM users can view cluster monitoring information on the Dashboard page of the CCE console, but cannot view the data on non-fine-grained metrics.
        - - @@ -37,7 +34,7 @@

        Scalable File Service (SFS)

        @@ -71,9 +68,9 @@

        Scalable File Service (SFS)

        SFS Turbo

        - @@ -102,7 +99,7 @@ - -
        Table 1 Dependency policies

        Console Function

        Dependent Services

        +

        Dependent Service

        Roles or Policies Required

        +

        Role or Policy Required

        Except in the following cases, the user does not require any additional role to create workloads.

        -
        • To create a Service using ELB, you must have the ELB FullAccess or ELB Administrator plus VPC Administrator permissions assigned.
        • To use a Java probe, you must have the AOM FullAccess and APM FullAccess permissions assigned.
        • To create a Service using NAT Gateway, you must have the NAT Gateway Administrator permission assigned.
        • To use OBS, you must have the OBS Administrator permission globally assigned.
          NOTE:

          Because of the cache, it takes about 13 minutes for the RBAC policy to take effect after being granted to users, enterprise projects, and user groups. After an OBS-related system policy is granted, it takes about 5 minutes for the policy to take effect.

          +
          • To create a Service using ELB, you must have the ELB FullAccess or ELB Administrator plus VPC Administrator permissions assigned.
          • To use a Java probe, you must have the AOM FullAccess and APM FullAccess permissions assigned.
          • To create a Service using NAT Gateway, you must have the NAT Gateway Administrator permission assigned.
          • To use OBS, you must have the OBS Administrator permission globally assigned.
            NOTE:

            Because of the cache, it takes about 13 minutes for the RBAC policy to take effect after being granted to users and user groups. After an OBS-related system policy is granted, it takes about 5 minutes for the policy to take effect.

          • To use SFS, you must have the SFS FullAccess permission assigned.
        • To use OBS, you must have the OBS Administrator permission globally assigned.
          NOTE:

          Because of the cache, it takes about 13 minutes for the RBAC policy to take effect after being granted to users, enterprise projects, and user groups. After an OBS-related system policy is granted, it takes about 5 minutes for the policy to take effect.

          +
        • To use OBS, you must have the OBS Administrator permission globally assigned.
          NOTE:

          Because of the cache, it takes about 13 minutes for the RBAC policy to take effect after being granted to users and user groups. After an OBS-related system policy is granted, it takes about 5 minutes for the policy to take effect.

          -
        • To use SFS, you must have the SFS FullAccess permission assigned.
        • Using SFS Turbo requires the SFS Turbo FullAccess role.
        +
      5. To use SFS, you must have the SFS FullAccess permission assigned.
      6. To use SFS Turbo, you must have the SFS Turbo FullAccess permission.
      7. The CCE Administrator role is required for importing storage devices.

        /

        • For cloud accounts, no additional policy/role is required.
        • IAM users with the CCE Administrator or global Security Administrator permission assigned can use this function.
        • IAM users with the CCE FullAccess or CCE ReadOnlyAccess permission can access the namespace. In addition, the IAM users must have the administrator permissions (cluster-admin) on the namespace.
        +
        • For cloud accounts, no additional policy/role is required.
        • IAM users with the CCE Administrator or global Security Administrator permission assigned can use this function.
        • IAM users with the CCE FullAccess or CCE ReadOnlyAccess permission can use this function. In addition, the IAM users must have the administrator permissions (cluster-admin) on the namespace.

        ConfigMaps and Secrets

        diff --git a/docs/cce/umn/cce_10_0191.html b/docs/cce/umn/cce_10_0191.html index bda7cb16..9b6c0584 100644 --- a/docs/cce/umn/cce_10_0191.html +++ b/docs/cce/umn/cce_10_0191.html @@ -6,7 +6,7 @@

        The relationship between Helm and Kubernetes is as follows:

        • Helm <–> Kubernetes
        • Apt <–> Ubuntu
        • Yum <–> CentOS
        • Pip <–> Python

        The following figure shows the solution architecture:

        -

        +

        Helm can help application orchestration for Kubernetes:

        • Manages, edits, and updates a large number of Kubernetes configuration files.
        • Deploys a complex Kubernetes application that contains a large number of configuration files.
        • Shares and reuses Kubernetes configurations and applications.
        • Supports multiple environments with parameter-based configuration templates.
        • Manages the release of applications, including rolling back the application, finding differences (using the diff command), and viewing the release history.
        • Controls phases in a deployment cycle.
        • Tests and verifies the released version.
        diff --git a/docs/cce/umn/cce_10_0193.html b/docs/cce/umn/cce_10_0193.html index 00abcc31..8dce80b2 100644 --- a/docs/cce/umn/cce_10_0193.html +++ b/docs/cce/umn/cce_10_0193.html @@ -19,7 +19,7 @@

        Add-on Specifications

        Select Standalone, HA, or Custom for Add-on Specifications.

        +

        Select Standalone, Custom, or HA for Add-on Specifications.

        Pods

        @@ -32,7 +32,7 @@

        CPU and memory quotas of the container allowed for the selected add-on specifications.

        If you select Custom, the recommended values for volcano-controller and volcano-scheduler are as follows:

        -
        • If the number of nodes is less than 100, retain the default configuration. The requested vCPUs is 500m, and the limit is 2000m. The requested memory is 500 MiB, and the limit is 2000 MiB.
        • If the number of nodes is greater than 100, increase the requested vCPUs by 500m and the requested memory by 1000 MiB each time 100 nodes (10,000 pods) are added. Increase the vCPU limit by 1500m and the memory limit by 1000 MiB.
          NOTE:

          Recommended formula for calculating the requested value:

          +
          • If the number of nodes is less than 100, retain the default configuration. The requested vCPUs are 500m, and the limit is 2000m. The requested memory is 500 MiB, and the limit is 2000 MiB.
          • If the number of nodes is greater than 100, increase the requested vCPUs by 500m and the requested memory by 1000 MiB each time 100 nodes (10,000 pods) are added. Increase the vCPU limit by 1500m and the memory limit by 1000 MiB.
            NOTE:

            Recommended formula for calculating the requested value:

            • Requested vCPUs: Calculate the number of target nodes multiplied by the number of target pods, perform interpolation search based on the number of nodes in the cluster multiplied by the number of target pods in Table 2, and round up the request value and limit value that are closest to the specifications.

              For example, for 2000 nodes and 20,000 pods, Number of target nodes x Number of target pods = 40 million, which is close to the specification of 700/70,000 (Number of cluster nodes x Number of pods = 49 million). According to the following table, set the requested vCPUs to 4000m and the limit value to 5500m.

            • Requested memory: It is recommended that 2.4 GiB memory be allocated to every 1000 nodes and 1 GiB memory be allocated to every 10,000 pods. The requested memory is the sum of these two values. (The obtained value may be different from the recommended value in Table 2. You can use either of them.)

              Requested memory = Number of target nodes/1000 x 2.4 GiB + Number of target pods/10,000 x 1 GiB

              For example, for 2000 nodes and 20,000 pods, the requested memory is 6.8 GiB (2000/1000 x 2.4 GiB + 20,000/10,000 x 1 GiB).

              @@ -148,9 +148,9 @@
        -

      8. Configure advanced add-on parameters.

        Configure parameters of the default Volcano scheduler. For details, see Table 4.
        colocation_enable: ''
        +

      9. Configure the add-on parameters.

        Configure parameters of the default Volcano scheduler. For details, see Table 4.
        colocation_enable: ''
         default_scheduler_conf:
        -  actions: 'allocate, backfill'
        +  actions: 'allocate, backfill, preempt'
           tiers:
             - plugins:
                 - name: 'priority'
        @@ -223,8 +223,8 @@ tolerations:
         

      The following options are supported:

      • enqueue: uses a series of filtering algorithms to filter out tasks to be scheduled and sends them to the queue to wait for scheduling. After this action, the task status changes from pending to inqueue.
      • allocate: selects the most suitable node based on a series of pre-selection and selection algorithms.
      • preempt: performs preemption scheduling for tasks with higher priorities in the same queue based on priority rules.
      • backfill: schedules pending tasks as much as possible to maximize the utilization of node resources.
      actions: 'allocate, backfill'
      -
      NOTE:
      • When configuring actions, use either preempt or enqueue.
      • actions is configured in the following sequence: enqueue, allocate, preempt, and backfill.
      +
      actions: 'allocate, backfill, preempt'
      +
      NOTE:

      When configuring actions, use either preempt or enqueue.

      The Dominant Resource Fairness (DRF) scheduling algorithm, which schedules jobs based on their dominant resource share. Jobs with a smaller resource share will be scheduled with a higher priority.

      enablePreemptable:

      -
      • true: Preemption enabled
      • false: Preemption not enabled
      -
      NOTE:

      DRF preemption is disabled by default. The scheduler performs preemption based on job priorities. Enable either DRF preemption or priority preemption.

      -
      +

      -

      - plugins:
         - name: 'drf'
      -    enablePreemptable: false
         - name: 'predicates'
         - name: 'nodeorder'

      Node Affinity

      • Incompatibility: Node affinity is disabled for the add-on.
      • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

        If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

        +
      • Not configured: Node affinity is disabled for the add-on.
      • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

        If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

      Table 6 Volcano components

      Component

      +
      @@ -620,13 +616,13 @@ tolerations:

      Modifying the volcano-scheduler Configurations Using the Console

      volcano-scheduler is the component responsible for pod scheduling. It consists of a series of actions and plugins. Actions should be executed in every step. Plugins provide the action algorithm details in different scenarios. volcano-scheduler is highly scalable. You can specify and implement actions and plugins based on your requirements.

      Volcano allows you to configure the scheduler during installation, upgrade, and editing. The configuration will be synchronized to volcano-scheduler-configmap.

      This section describes how to configure volcano-scheduler.

      -

      Only Volcano of v1.7.1 and later support this function. On the new plugin page, options such as plugins.eas_service and resource_exporter_enable are replaced by default_scheduler_conf.

      +

      Only Volcano of v1.7.1 and later support this function. On the new add-on page, options such as resource_exporter_enable are replaced by default_scheduler_conf.

      Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane. On the right of the page, locate Volcano Scheduler and click Install or Upgrade. In the Parameters area, configure the Volcano parameters.

      • Using resource_exporter:
        {
             "ca_cert": "",
             "default_scheduler_conf": {
        -        "actions": "allocate, backfill",
        +        "actions": "allocate, backfill, preempt",
                 "tiers": [
                     {
                         "plugins": [
        @@ -693,155 +689,6 @@ tolerations:
         }

        After this function is enabled, you can use the functions of both numa-aware and resource_exporter.

      -
      • Using eas_service:
        {
        -    "ca_cert": "",
        -    "default_scheduler_conf": {
        -        "actions": "allocate, backfill",
        -        "tiers": [
        -            {
        -                "plugins": [
        -                    {
        -                        "name": "priority"
        -                    },
        -                    {
        -                        "name": "gang"
        -                    },
        -                    {
        -                        "name": "conformance"
        -                    }
        -                ]
        -            },
        -            {
        -                "plugins": [
        -                    {
        -                        "name": "drf"
        -                    },
        -                    {
        -                        "name": "predicates"
        -                    },
        -                    {
        -                        "name": "nodeorder"
        -                    }
        -                ]
        -            },
        -            {
        -                "plugins": [
        -                    {
        -                        "name": "cce-gpu-topology-predicate"
        -                    },
        -                    {
        -                        "name": "cce-gpu-topology-priority"
        -                    },
        -                    {
        -                        "name": "cce-gpu"
        -                    },
        -                    {
        -                        "name": "eas",
        -                        "custom": {
        -                            "availability_zone_id": "",
        -                            "driver_id": "",
        -                            "endpoint": "",
        -                            "flavor_id": "",
        -                            "network_type": "",
        -                            "network_virtual_subnet_id": "",
        -                            "pool_id": "",
        -                            "project_id": "",
        -                            "secret_name": "eas-service-secret"
        -                        }
        -                    }
        -                ]
        -            },
        -            {
        -                "plugins": [
        -                    {
        -                        "name": "nodelocalvolume"
        -                    },
        -                    {
        -                        "name": "nodeemptydirvolume"
        -                    },
        -                    {
        -                        "name": "nodeCSIscheduling"
        -                    },
        -                    {
        -                        "name": "networkresource"
        -                    }
        -                ]
        -            }
        -        ]
        -    },
        -    "server_cert": "",
        -    "server_key": ""
        -}
        -
      • Using ief:
        {
        -    "ca_cert": "",
        -    "default_scheduler_conf": {
        -        "actions": "allocate, backfill",
        -        "tiers": [
        -            {
        -                "plugins": [
        -                    {
        -                        "name": "priority"
        -                    },
        -                    {
        -                        "name": "gang"
        -                    },
        -                    {
        -                        "name": "conformance"
        -                    }
        -                ]
        -            },
        -            {
        -                "plugins": [
        -                    {
        -                        "name": "drf"
        -                    },
        -                    {
        -                        "name": "predicates"
        -                    },
        -                    {
        -                        "name": "nodeorder"
        -                    }
        -                ]
        -            },
        -            {
        -                "plugins": [
        -                    {
        -                        "name": "cce-gpu-topology-predicate"
        -                    },
        -                    {
        -                        "name": "cce-gpu-topology-priority"
        -                    },
        -                    {
        -                        "name": "cce-gpu"
        -                    },
        -                    {
        -                        "name": "ief",
        -                        "enableBestNode": true
        -                    }
        -                ]
        -            },
        -            {
        -                "plugins": [
        -                    {
        -                        "name": "nodelocalvolume"
        -                    },
        -                    {
        -                        "name": "nodeemptydirvolume"
        -                    },
        -                    {
        -                        "name": "nodeCSIscheduling"
        -                    },
        -                    {
        -                        "name": "networkresource"
        -                    }
        -                ]
        -            }
        -        ]
        -    },
        -    "server_cert": "",
        -    "server_key": ""
        -}
        -

      Retaining the Original volcano-scheduler-configmap Configurations

      If you want to use the original configuration after the plugin is upgraded, perform the following steps:

      1. Check and back up the original volcano-scheduler-configmap configuration.

        Example:
        # kubectl edit cm volcano-scheduler-configmap -n kube-system
        @@ -952,61 +799,187 @@ data:
         

      -

      Uninstalling the Volcano Add-on

      After the add-on is uninstalled, all custom Volcano resources (Table 7) will be deleted, including the created resources. Reinstalling the add-on will not inherit or restore the tasks before the uninstallation. It is a good practice to uninstall the Volcano add-on only when no custom Volcano resources are being used in the cluster.

      +

      Collecting Prometheus Metrics

      volcano-scheduler exposes Prometheus metrics through port 8080. You can build a Prometheus collector to identify and obtain volcano-scheduler scheduling metrics from http://{{volcano-schedulerPodIP}}:{{volcano-schedulerPodPort}}/metrics.

      +

      Prometheus metrics can be exposed only by the Volcano add-on of version 1.8.5 or later.

      +
      -
      Table 6 Add-on components

      Component

      Description

      Table 7 Custom Volcano resources

      Item

      +
      - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 7 Key metrics

      Metric

      API Group

      +

      Type

      API Version

      +

      Description

      Resource Level

      +

      Label

      Command

      +

      e2e_scheduling_latency_milliseconds

      bus.volcano.sh

      +

      Histogram

      v1alpha1

      +

      E2E scheduling latency (ms) (scheduling algorithm + binding)

      Namespaced

      +

      None

      Job

      +

      e2e_job_scheduling_latency_milliseconds

      batch.volcano.sh

      +

      Histogram

      v1alpha1

      +

      E2E job scheduling latency (ms)

      Namespaced

      +

      None

      Numatopology

      +

      e2e_job_scheduling_duration

      nodeinfo.volcano.sh

      +

      Gauge

      v1alpha1

      +

      E2E job scheduling duration

      Cluster

      +

      labels=["job_name", "queue", "job_namespace"]

      PodGroup

      +

      plugin_scheduling_latency_microseconds

      scheduling.volcano.sh

      +

      Histogram

      v1beta1

      +

      Add-on scheduling latency (µs)

      Namespaced

      +

      labels=["plugin", "OnSession"]

      Queue

      +

      action_scheduling_latency_microseconds

      scheduling.volcano.sh

      +

      Histogram

      v1beta1

      +

      Action scheduling latency (µs)

      Cluster

      +

      labels=["action"]

      +

      task_scheduling_latency_milliseconds

      +

      Histogram

      +

      Task scheduling latency (ms)

      +

      None

      +

      schedule_attempts_total

      +

      Counter

      +

      Number of pod scheduling attempts. unschedulable indicates that the pods cannot be scheduled, and error indicates that the internal scheduler is faulty.

      +

      labels=["result"]

      +

      pod_preemption_victims

      +

      Gauge

      +

      Number of selected preemption victims

      +

      None

      +

      total_preemption_attempts

      +

      Counter

      +

      Total number of preemption attempts in a cluster

      +

      None

      +

      unschedule_task_count

      +

      Gauge

      +

      Number of unschedulable tasks

      +

      labels=["job_id"]

      +

      unschedule_job_count

      +

      Gauge

      +

      Number of unschedulable jobs

      +

      None

      +

      job_retry_counts

      +

      Counter

      +

      Number of job retries

      +

      labels=["job_id"]

      +
      +
      + +

      Uninstalling the Volcano Add-on

      After the add-on is uninstalled, all custom Volcano resources (Table 8) will be deleted, including the created resources. Reinstalling the add-on will not inherit or restore the tasks before the uninstallation. It is a good practice to uninstall the Volcano add-on only when no custom Volcano resources are being used in the cluster.

      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/cce/umn/cce_10_0196.html b/docs/cce/umn/cce_10_0196.html index b967d9ce..083de630 100644 --- a/docs/cce/umn/cce_10_0196.html +++ b/docs/cce/umn/cce_10_0196.html @@ -1,9 +1,9 @@ -

      NetworkAttachmentDefinition

      -

      Scenario

      In a CCE Turbo cluster, you can configure subnets and security groups for containers by namespace or workload using NetworkAttachmentDefinition CRDs. If you want to configure a specified container subnet and security group for a specified namespace or workload, you can create a custom container NetworkAttachmentDefinition and associate the container network configuration with the corresponding namespace or workload. In this way, service subnets can be planned or services can be securely isolated.

      +

      Binding a Subnet and Security Group to a Namespace or Workload

      +

      Scenario

      In a CCE Turbo cluster, you can configure subnets and security groups for containers by namespace or workload using NetworkAttachmentDefinition CRDs. If you want to configure a specified container subnet and security group for a specified namespace or workload, create a container network configuration and associate it with the target namespace or workload. In this way, service subnets can be planned or services can be securely isolated.

      The following table lists the resources that a container network configuration can be associated with. -
      Table 8 Custom Volcano resources

      Item

      +

      API Group

      +

      API Version

      +

      Resource Level

      +

      Command

      +

      bus.volcano.sh

      +

      v1alpha1

      +

      Namespaced

      +

      Job

      +

      batch.volcano.sh

      +

      v1alpha1

      +

      Namespaced

      +

      Numatopology

      +

      nodeinfo.volcano.sh

      +

      v1alpha1

      +

      Cluster

      +

      PodGroup

      +

      scheduling.volcano.sh

      +

      v1beta1

      +

      Namespaced

      +

      Queue

      +

      scheduling.volcano.sh

      +

      v1beta1

      +

      Cluster

      Table 1 Associated resources

      Aspect

      +
      @@ -21,14 +21,14 @@ - - - - @@ -40,21 +40,21 @@ -

      Constraints

      • Only the default-network supports container ENI prebinding. The creation speed of pods using the custom container network is slower than that of pods using the default-network. Therefore, this function is not suitable for ultra-fast pod scaling scenarios.
      • The default container network configuration default-network cannot be deleted.
      • To delete a NetworkAttachmentDefinition, delete pods (with the cni.yangtse.io/network-status annotation) created using the configuration in the corresponding namespace first. For details, see Deleting a Network Configuration.
      +

      Constraints

      • Only the default container network configuration default-network supports container ENI prebinding. The speed of creating pods using a custom container network configuration is slower than that of creating pods using default-network. Therefore, this function is not suitable for ultra-fast pod scaling.
      • default-network cannot be deleted.
      • If a workload with a fixed IP address needs to be associated with a new container network configuration, the fixed IP address will be invalid when pods are rebuilt. In this case, delete the workload, release the fixed IP address, and create a workload again.
      • Before deleting a custom container network configuration, delete the pods (with the cni.yangtse.io/network-status annotation) created using the configuration in the target namespace. For details, see Deleting a Container Network Configuration.
      -

      Creating a NetworkAttachmentDefinition of the Namespace Type Using the CCE Console

      1. Log in to the CCE console.
      2. Click the cluster name to access the cluster console. Choose Settings in the navigation pane and click the Network tab.

        If default-network exists in the cluster, it will take effect for all pods for which no custom network is configured. Default container subnet in the network configuration area on the Overview page is the container subnet in default-network.

        +

        Using the CCE Console to Create a Container Network Configuration of the Namespace Type

        1. Log in to the CCE console.
        2. Click the cluster name to access the cluster console. Choose Settings in the navigation pane and click the Network tab.

          If default-network is available in the cluster, it takes effect on all pods where no custom container network configuration has been configured. The default container subnet in the network settings on the Overview page is the container subnet in default-network.

          -

        3. View Custom Container Network Settings and click Add. In the dialog box displayed, configure the container subnet and security group.

          • Name: Enter a name that contains a maximum of 253 characters. Do not use default-network, default, mgnt0, and mgnt1.
          • Associated Resource Type: resource type associated with the custom container network configuration. For details, see Table 1. To create a container network configuration of the namespace type, select Namespace.
          • Namespace: Select the namespace to be associated. Namespaces associated with different container network configurations must be unique. If no namespace is available, click Create Namespace to create one.
          • Pod Subnet: Select a subnet. If no subnet is available, click Create Subnet to create a subnet. After the subnet is created, click the refresh button. A maximum of 20 subnets can be selected.
          • Associate Security Group: The default value is the container ENI security group. You can also click Create Security Group to create one. After the security group is created, click the refresh button. A maximum of five security groups can be selected.
          -

        4. Click OK. After the creation is complete, you will be redirected to the network configuration list. You can see that the newly added subnet is in the list.
        +

      3. View the Container Network Security Policy Configuration (Namespace Level) and click Add. In the window that is displayed, configure parameters such as the pod subnet and security group.

        • Name: Enter a name that contains a maximum of 253 characters. Do not use default-network, default, mgnt0, or mgnt1.
        • Associated Resource Type: resource type associated with the custom container network configuration. For details, see Table 1. To create a container network configuration of the namespace type, select Namespace.
        • Namespace: Select the namespace to be associated. The namespaces associated with different container network configurations must be unique. If no namespace is available, click Create Namespace to create one.
        • Pod Subnet: Select a subnet. If no subnet is available, click Create Subnet to create one. After the subnet is created, click the refresh button. A maximum of 20 subnets can be selected.
        • Associate Security Group: The default value is the container ENI security group. You can also click Create Security Group to create one. After the security group is created, click the refresh button. A maximum of five security groups can be selected.
        +

      4. Click OK. After the creation, you will be redirected to the custom container network configuration list, where the new container network configuration is included.
      -

      Using the CCE Console to Create a Container Network Configuration of the Workload Type

      1. Log in to the CCE console.
      2. Click the cluster name to access the cluster console. Choose Settings in the navigation pane and click the Network tab.

        If default-network exists in the cluster, it will take effect for all pods for which no custom network is configured. Default container subnet in the network configuration area on the Overview page is the container subnet in default-network.

        +

        Using the CCE Console to Create a Container Network Configuration of the Workload Type

        1. Log in to the CCE console.
        2. Click the cluster name to access the cluster console. Choose Settings in the navigation pane and click the Network tab.

          If default-network is available in the cluster, it takes effect on all pods where no custom container network configuration has been configured. The default container subnet in the network settings on the Overview page is the container subnet in default-network.

          -

        3. View the Custom Container Network Settings and click Add. In the window that slides out from the right, configure parameters such as the pod subnet and security group.

          • Name: Enter a name that contains a maximum of 253 characters. Do not use default-network, default, mgnt0, and mgnt1.
          • Associated Resource Type: resource type associated with the custom container network configuration. For details, see Table 1. To create a container network configuration of the workload type, select Workloads.
          • Pod Subnet: Select a subnet. If no subnet is available, click Create Subnet to create a subnet. After the subnet is created, click the refresh button. A maximum of 20 subnets can be selected.
          • Associate Security Group: The default value is the container ENI security group. You can also click Create Security Group to create one. After the security group is created, click the refresh button. A maximum of five security groups can be selected.
          -

        4. Click OK. After the creation, you will be redirected to the custom container network configuration list, where the new container network configuration is included.
        5. When creating a workload, select a custom container network configuration.

          1. In the navigation pane, choose Workloads. In the right pane, click the Deployments tab.
          2. Click Create Workload in the upper right corner of the page. In the Advanced Settings area, choose Network Configuration and determine whether to enable a specified container network configuration.
          3. Select an existing container network configuration. If no configuration is available, click Add to create one.
          4. After the configuration, click Create Workload.

            Return to the Settings page. In the container network configuration list, the name of the resource associated with the created container network configuration is displayed.

            +

          5. View the Container Network Security Policy Configuration (Namespace Level) and click Add. In the window that is displayed, configure parameters such as the pod subnet and security group.

            • Name: Enter a name that contains a maximum of 253 characters. Do not use default-network, default, mgnt0, or mgnt1.
            • Associated Resource Type: resource type associated with the custom container network configuration. For details, see Table 1. To create a container network configuration of the workload type, select Workload.
            • Pod Subnet: Select a subnet. If no subnet is available, click Create Subnet to create one. After the subnet is created, click the refresh button. A maximum of 20 subnets can be selected.
            • Associate Security Group: The default value is the container ENI security group. You can also click Create Security Group to create one. After the security group is created, click the refresh button. A maximum of five security groups can be selected.
            +

          6. Click OK. After the creation, you will be redirected to the custom container network configuration list, where the new container network configuration is included.
          7. When creating a workload, you can select a custom container network configuration.

            1. In the navigation pane, choose Workloads. In the right pane, click the Deployments tab.
            2. Click Create Workload in the upper right corner of the page. In the Advanced Settings area, choose Network Configuration and determine whether to enable a specified container network configuration.
            3. Select an existing container network configuration. If no configuration is available, click Add to create one.
            4. After the configuration, click Create Workload.

              Return to the Settings page. In the container network configuration list, the name of the resource associated with the created container network configuration is displayed.

        -

        Creating a NetworkAttachmentDefinition of the Namespace Type Using Kubectl

        This section describes how to create a NetworkAttachmentDefinition of the namespace type using kubectl.

        +

        Using Kubectl to Create a Container Network Configuration of the Namespace Type

        This section describes how to use kubectl to create a container network configuration of the namespace type.

        1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Modify the networkattachment-test.yaml file.

          vi networkattachment-test.yaml

          apiVersion: k8s.cni.cncf.io/v1
           kind: NetworkAttachmentDefinition
          @@ -264,7 +264,7 @@ spec:
           

        Using Kubectl to Create a Container Network Configuration of the Workload Type

        This section describes how to use kubectl to create a container network configuration of the workload type.

        -
        1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Modify the networkattachment-test.yaml file.

          vi networkattachment-test.yaml

          +
          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
          2. Modify the networkattachment-test.yaml file.

            vi networkattachment-test.yaml

            apiVersion: k8s.cni.cncf.io/v1
             kind: NetworkAttachmentDefinition
             metadata:
            @@ -301,7 +301,7 @@ spec:
             
             
      - - - - @@ -439,9 +439,9 @@ spec: metadata: labels: app: nginx - yangtse.io/network: "example" # Name of the custom container network configuration, which can be used to obtain all pods associated with the container network configuration by label + yangtse.io/network: "example" # Name of the custom container network configuration, which can be used to obtain all pods associated with the container network configuration by label annotations: - yangtse.io/network: "example" # Name of the custom container network configuration + yangtse.io/network: "example" # Name of the custom container network configuration spec: containers: - name: container-0 @@ -455,12 +455,12 @@ spec: memory: 200Mi imagePullSecrets: - name: default-secret -
      • yangtse.io/network: name of the specified custom container network configuration. Only a container network configuration that is not associated with any namespace can be specified. Add this parameter to the label so that you can use the label to obtain all pods associated with this container network configuration.
      +
      • yangtse.io/network: name of the specified custom container network configuration. Only a container network configuration that is not associated with any namespace can be specified. Add this parameter to the label so that you can use the label to obtain all pods associated with this container network configuration.

      -

      Deleting a Network Configuration

      You can delete the new network configuration or view its YAML file.

      -

      Before deleting a network configuration, delete the container corresponding to the configuration. Otherwise, the deletion fails.

      -
      1. Run the following command to filter the pod that uses the configuration in the cluster (example is an example configuration name and you should replace it):
        kubectl get po -A -o=jsonpath="{.items[?(@.metadata.annotations.cni\.yangtse\.io/network-status=='[{\"name\":\"example\"}]')]['metadata.namespace', 'metadata.name']}"
        +

        Deleting a Container Network Configuration

        You can delete the new container network configuration or view its YAML file.

        +

        Before deleting a container network configuration, delete all pods using the configuration. Otherwise, the deletion will fail.

        +
        1. Run the following command to filter the pods that uses the configuration in the cluster (example is used as an example):
          kubectl get po -A -o=jsonpath="{.items[?(@.metadata.annotations.cni\.yangtse\.io/network-status=='[{\"name\":\"example\"}]')]['metadata.namespace', 'metadata.name']}"

          The command output contains the pod name and namespace associated with the configuration.

        2. Delete the owner of the pod. The owner may be a Deployment, StatefulSet, DaemonSet, or Job.
        diff --git a/docs/cce/umn/cce_10_0197.html b/docs/cce/umn/cce_10_0197.html index 447c3509..7bef9f3f 100644 --- a/docs/cce/umn/cce_10_0197.html +++ b/docs/cce/umn/cce_10_0197.html @@ -1,98 +1,148 @@

        Upgrade Overview

        -

        To enable interoperability from one Kubernetes installation to the next, you must upgrade your Kubernetes clusters before the maintenance period ends.

        -

        After the latest Kubernetes version is available in CCE, CCE will describe the changes in this version.

        -

        You can use the CCE console to upgrade the Kubernetes version of a cluster.

        -

        An upgrade tag will be displayed on the cluster card view if there is a new version for the cluster to upgrade.

        -

        How to check:

        -

        Log in to the CCE console and check whether the message "New version available" is displayed for the cluster. If yes, the cluster can be upgraded. View the release notes for the latest version. For details, see Release Notes for CCE Cluster Versions. If no such a message is displayed, the cluster is of the latest version.

        -

        Cluster Upgrade Process

        The cluster upgrade process involves pre-upgrade check, backup, upgrade, and post-upgrade verification.

        -
        Figure 1 Process of upgrading a cluster
        -

        After determining the target version of the cluster, read the precautions carefully and prevent function incompatibility during the upgrade.

        -
        1. Pre-upgrade check

          Before a cluster upgrade, CCE checks the compatibility of nodes, add-ons, and workloads in the cluster to reduce the probability of upgrade failures to the best extend. If any exception is detected, rectify the fault as prompted on the console.

          -
        2. Backup

          Cluster data is backed up before an upgrade by default. You can also back up the entire master nodes as needed.

          -
        3. Upgrade

          During the upgrade, configure upgrade parameters, such as the step for add-on upgrade or node rolling upgrade. After the upgrade parameters are configured, the add-ons and nodes will be upgraded one by one.

          -
        4. Post-upgrade verification

          After the upgrade, manually check services and ensure that services are not interrupted by the upgrade.

          -
        -
        -

        Cluster Upgrade

        The following table describes the target version to which each cluster version can be upgraded and the supported upgrade modes.

        - -
      Table 1 Associated resources

      Category

      Resources a Container Network Configuration Can Associate with

      The workloads associated with the same container network configuration use the same subnet and security group configurations.

      Supported Cluster Version

      +

      Supported cluster versions

      Available only in CCE Turbo clusters of 1.23.8-r0, 1.25.3-r0, or later

      +

      Available only in CCE Turbo clusters of 1.23.8-r0, 1.25.3-r0, or later.

      Available only in CCE Turbo clusters of 1.23.11-r0, 1.25.6-r0, 1.27.3-r0, or later

      +

      Available only in CCE Turbo clusters of 1.23.11-r0, 1.25.6-r0, 1.27.3-r0, 1.28.1-r0, or later.

      Constraint

      +

      Constraints

      The namespaces associated with different container network configurations must be unique.

      String

      API version. The value is fixed at k8s.cni.cncf.io/v1.

      +

      API version. The value is fixed at k8s.cni.cncf.io/v1.

      kind

      @@ -310,7 +310,7 @@ spec:

      String

      Type of the object to be created. The value is fixed at NetworkAttachmentDefinition.

      +

      Type of the object to be created. The value is fixed at NetworkAttachmentDefinition.

      yangtse.io/project-id

      @@ -337,7 +337,7 @@ spec:

      String

      Namespace of the configuration resource. The value is fixed to kube-system.

      +

      Namespace of the configuration resource. The value is fixed to kube-system.

      config

      @@ -369,7 +369,7 @@ spec:

      String

      The value is fixed at eni-neutron.

      +

      The value is fixed at eni-neutron.

      args

      @@ -404,7 +404,7 @@ spec:

      Security group ID. If no security group is planned, select the same security group as that in default-network.

      How to obtain:

      -

      Log in to the VPC console. In the navigation pane, choose Access Control > Security Groups. Click the target security group name and copy the ID on the Summary tab page.

      +

      Log in to the VPC console. In the navigation pane, choose Access Control > Security Groups. Click the target security group name and copy the ID on the Summary tab page.

      subnets

      @@ -417,7 +417,7 @@ spec:
      [{"subnetID":"27d95**"},{"subnetID":"827bb**"},{"subnetID":"bdd6b**"}]

      Subnet ID not used by the cluster in the same VPC.

      How to obtain:

      -

      Log in to the VPC console. In the navigation pane, choose Virtual Private Cloud > Subnets. Click the target subnet name and copy the Subnet ID on the Summary tab page.

      +

      Log in to the VPC console. In the navigation pane, choose Virtual Private Cloud > Subnets. Click the target subnet name and copy the Subnet ID on the Summary tab page.

      Table 1 Cluster upgrade

      Source Version

      +

      CCE strictly complies with community consistency authentication. It releases three Kubernetes versions each year and offers a maintenance period of at least 24 months after each version is released. CCE ensures the stable running of Kubernetes versions during the maintenance period.

      +

      To ensure your service rights and benefits, upgrade your Kubernetes clusters before a maintenance period ends. You can check the Kubernetes version of your cluster on the cluster list page and check whether a new version is available. Proactive cluster upgrades help you:

      +
      1. Reduce security and stability risks: During the iteration of Kubernetes versions, known security and stability vulnerabilities are continuously fixed. Long-term use of EOS clusters will result in security and stability risks to services.
      2. Experience the latest functions: During the iteration of Kubernetes versions, new functions and optimizations are continuously released. For details about the features of the latest version, see Release Notes for CCE Cluster Versions.
      3. Minimize compatibility risks: During the iteration of Kubernetes versions, APIs are continuously modified and functions are deprecated. If a cluster has not been upgraded for a long time, more O&M assurance investment will be required when the cluster is upgraded. Periodic upgrades can effectively mitigate compatibility risks caused by accumulated version differences. It is a good practice to upgrade a patch version every quarter and upgrade a major version to the latest version every year.
      4. Obtain more effective technical support: CCE does not provide security patches or issue fixing for EOS Kubernetes cluster versions, and does not ensure technical support for the EOS versions.
      +

      Cluster Upgrade Path

      CCE clusters evolve iteratively based on the community Kubernetes version. A CCE cluster version consists of the community Kubernetes version and the CCE patch version. Therefore, two cluster upgrade paths are provided.

      +
      • Upgrading a Kubernetes version +
        - - - - - - - - - - - - - - - - - - - - + + + + +

        Source Kubernetes Version

        Target Version

        -

        Upgrade Mode

        +

        Target Kubernetes Version

        1.25

        +

        v1.13 or earlier

        v1.27

        -

        In-place upgrade

        +

        Not supported

        v1.23

        +

        v1.15

        v1.25

        -

        In-place upgrade

        +

        v1.19

        v1.21

        +

        v1.17

        v1.25

        -

        v1.23

        -

        In-place upgrade

        +

        v1.19

        v1.19

        +

        v1.19

        v1.23

        -

        v1.21

        -

        In-place upgrade

        +

        v1.21 or v1.23

        v1.17

        +

        v1.21

        v1.19

        -

        In-place upgrade

        +

        v1.23 or v1.25

        v1.15

        +

        v1.23

        v1.19

        +

        v1.25 or v1.27

        In-place upgrade

        +

        v1.25

        +

        v1.27

        +

        v1.27

        +

        v1.28

        +
        • A version that has been end of maintenance cannot be directly upgraded to the latest version. You need to upgrade such a version for multiple times, for example, from v1.15 to v1.19, v1.23, and then to v1.27/v1.28.
        • A Kubernetes version can be upgraded only after the patch is upgraded to the latest version. CCE will automatically generate an optimal upgrade path on the console based on the current cluster version.
        +
        +
      • Upgrading a patch version

        Patch version management is available for CCE clusters of v1.19 or later to provide new features and fix bugs and vulnerability for in-maintenance clusters without requiring a major version upgrade.

        +

        After a new patch version is released, you can directly upgrade any patch version to the latest patch version. For details about the release history of patch versions, see Patch Version Release Notes.

        +
      -

      Upgrade Mode

      The following table lists the advantages and disadvantages.

      +

      Cluster Upgrade Process

      The cluster upgrade process involves pre-upgrade check, backup, upgrade, and post-upgrade verification.

      +
      Figure 1 Process of upgrading a cluster
      +

      After determining the target version of the cluster, read the precautions carefully and prevent function incompatibility during the upgrade.

      +
      1. Pre-upgrade check

        Before a cluster upgrade, CCE checks mandatory items such as the cluster status, add-ons, and nodes to ensure that the cluster meets the upgrade requirements. For more details, see Pre-upgrade Check. If any check item is abnormal, rectify the fault as prompted on the console.

        +
      2. Backup

        You can use disk snapshots to back up master node data, including CCE component images, component configurations, and etcd data. Back up data before an upgrade. If unexpected cases occur during an upgrade, you can use the backup to quickly restore the cluster.

        -
        Table 2 Advantages and disadvantages

        Upgrade Mode

        +
        - - - + + - - - - + + + + + + + + + + + +

        Backup Type

        Method

        +

        Backup Object

        Advantage

        +

        Backup Mode

        Disadvantage

        +

        Backup Time

        +

        Rollback Time

        +

        Description

        In-place upgrade

        +

        etcd data backup

        Kubernetes components, network components, and CCE management components are upgraded on the node. During the upgrade, service pods and networks are not affected. The SchedulingDisabled label will be added to all existing nodes. After the upgrade is complete, you can properly use existing nodes.

        +

        etcd data

        You do not need to migrate services, ensuring service continuity.

        +

        Automatic backup during an upgrade

        In-place upgrade does not upgrade the OS of a node. If you want to upgrade the OS, clear the corresponding node data after the node upgrade is complete and reset the node to upgrade the OS to a new version.

        +

        1-5 minutes

        +

        2 hours

        +

        Mandatory. The data is automatically backed up during an upgrade.

        +

        CBR cloud server backup

        +

        Master node disks, including component images, configurations, logs, and etcd data

        +

        One-click backup on web pages (manually triggered)

        +

        20 minutes to 2 hours (based on the cloud backup tasks in the current region)

        +

        20 minutes

        +

        This function is gradually replaced by EVS snapshot backup.

        +
        +
        +
      3. Configuration and upgrade

        Configure parameters before an upgrade. CCE has provided default settings, which can be modified as needed. After the configuration, upgrade add-ons, master nodes, and worker nodes in sequence.

        +
        • Add-on Upgrade Configuration: Add-ons that have been installed in your cluster are listed. During the cluster upgrade, CCE automatically upgrades the selected add-ons to be compatible with the target cluster version. You can click Set to re-define the add-on parameters.

          If an add-on is marked with on its right side, the add-on cannot be compatible with both the source and target versions of the cluster upgrade. In this case, CCE will upgrade the add-on after the cluster upgrade. The add-on may be unavailable during the cluster upgrade.

          +
          +
        • Node Upgrade Configuration
          • Max. Nodes for Batch Upgrade: You can configure the maximum number of nodes to be upgraded in a batch.

            Node pools will be upgraded in sequence. Nodes in node pools will be upgraded in batches. One node is upgraded in the first batch, two nodes in the second batch, and the number of nodes to be upgraded in each subsequent batch increases by a power of 2 until the maximum number of nodes to be upgraded in each batch is reached. The next cluster is upgraded after the previous one is upgraded. By default, 20 nodes are upgraded in a batch, and the number can be increased to the maximum of 60.

            +
          • Node Priority: You can customize node upgrade priorities. If the priorities are not specified, CCE will perform the upgrade based on the priorities generated by the default policy.
            • Add Upgrade Priority: You can custom the priorities for upgrading node pools. If the priorities are not specified, CCE will preferentially upgrade the node pool with the least number of nodes based on the default policy.
            • Add Node Priority: You can custom the priorities for upgrading nodes in a node pool. If the priorities are not specified, CCE will preferentially upgrade the node with lightest load (calculated based on the number of pods, resource request rate, and number of PVs) based on the default policy.
            +
          +
        +
      4. Post-upgrade verification

        After an upgrade, CCE will automatically check items including the cluster status and node status. You need to manually check services, new nodes, and new pods to ensure that the cluster functions properly after the upgrade. For details, see Performing Post-Upgrade Verification.

        +
      5. + +

        Upgrade Modes

        +
        + + + + + + + + + + + diff --git a/docs/cce/umn/cce_10_0198.html b/docs/cce/umn/cce_10_0198.html index 2337d8b1..d330f269 100644 --- a/docs/cce/umn/cce_10_0198.html +++ b/docs/cce/umn/cce_10_0198.html @@ -1,18 +1,18 @@ -

        Adding Nodes for Management

        -

        Scenario

        In CCE, you can create a node (Creating a Node) or add existing nodes (ECSs or) to your cluster.

        +

        Accepting Nodes for Management

        +

        Scenario

        In CCE, you can create a node (Creating a Node) or add existing nodes (ECSs) to your cluster for management.

        • While an ECS is being accepted into a cluster, the operating system of the ECS will be reset to the standard OS image provided by CCE to ensure node stability. The CCE console prompts you to select the operating system and the login mode during the reset.
        • LVM information, including volume groups (VGs), logical volumes (LVs), and physical volumes (PVs), will be deleted from the system disks and data disks attached to the selected ECSs during management. Ensure that the information has been backed up.
        • While an ECS is being accepted into a cluster, do not perform any operation on the ECS through the ECS console.
        -

        Constraints

        • The cluster version must be 1.15 or later.
        • If a password or key has been set when the original VM node was created, reset the password or key during management. The original password or key will become invalid.
        • Nodes in a CCE Turbo cluster must support sub-ENIs or be bound to at least 16 ENIs. For details about the node specifications, see the nodes that can be selected on the console when you create a node.
        • Data disks that have been partitioned will be ignored during node management. Ensure that there is at least one unpartitioned data disk meeting the specifications is attached to the node.
        +

        Constraints

        • The cluster version must be 1.15 or later.
        • If IPv6 is enabled for a cluster, only nodes in a subnet with IPv6 enabled can be accepted and managed. If IPv6 is not enabled for the cluster, only nodes in a subnet without IPv6 enabled can be accepted.
        • If a password or key has been set when the original VM node was created, reset the password or key during management. The original password or key will become invalid.
        • Nodes in a CCE Turbo cluster must support sub-ENIs or be bound to at least 16 ENIs. For details about the node flavors, see the node flavors that can be selected on the console when you create a node.
        • Data disks that have been partitioned will be ignored during node management. Ensure that there is at least one unpartitioned data disk meeting the specifications is attached to the node.

        Prerequisites

        A cloud server that meets the following conditions can be accepted:

        • The node to be accepted must be in the Running state and not used by other clusters. In addition, the node to be accepted does not carry the CCE-Dynamic-Provisioning-Node tag.
        • The node to be accepted and the cluster must be in the same VPC. (If the cluster version is earlier than v1.13.10, the node to be accepted and the CCE cluster must be in the same subnet.)
        • Data disks must be attached to the nodes to be managed. A local disk (disk-intensive disk) or a data disk of at least 20 GiB can be attached to the node, and any data disks already attached cannot be smaller than 10 GiB.
        • The node to be accepted has 2-core or higher CPU, 4 GiB or larger memory, and only one NIC.
        • Only cloud servers with the same specifications, AZ, and data disk configuration can be added in batches.
        -

        Procedure

        1. Log in to the CCE console and go to the cluster where the node to be accepted resides.
        2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab and then Accept Node in the upper right corner.
        3. Specify node parameters.

          Compute Settings

          +

          Procedure

          1. Log in to the CCE console and go to the cluster where the node to be accepted resides.
          2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab and then Accept Node in the upper right corner.
          3. Specify node parameters.

            Configurations

            -
        Table 1 Upgrade modes

        Upgrade Mode

        +

        Description

        +

        Upgrade Scope

        +

        Advantage

        +

        Constraint

        +

        In-place upgrade

        +

        Kubernetes components, network components, and CCE management components are upgraded on nodes. During an upgrade, service pods and networks are not affected.

        +

        Nodes are upgraded in batches. Only the nodes that have been upgraded can be used to schedule services.

        +
        • Node OSs are not upgraded.
        • The add-ons that are incompatible with the target cluster version will be automatically upgraded.
        • Kubernetes components will be automatically upgraded.
        +

        The one-click upgrade does not need to migrate services, which ensures service continuity.

        +

        In-place upgrade is supported only in clusters of v1.15 or later.

        Table 1 Configuration parameters

        Parameter

        +
        @@ -27,14 +27,13 @@ - @@ -65,7 +64,7 @@ @@ -81,22 +80,22 @@ - - - - + + + - - - - diff --git a/docs/cce/umn/cce_10_0201.html b/docs/cce/umn/cce_10_0201.html index bd4116f5..308966f6 100644 --- a/docs/cce/umn/cce_10_0201.html +++ b/docs/cce/umn/cce_10_0201.html @@ -2,10 +2,10 @@

        Monitoring Custom Metrics on AOM

        CCE allows you to upload custom metrics to AOM. ICAgent on a node periodically calls the metric monitoring API configured on a workload to read monitoring data and then uploads the data to AOM.

        -
        Figure 1 Using ICAgent to collect monitoring metrics
        +
        Figure 1 Using ICAgent to collect monitoring metrics

        The custom metric API of a workload can be configured when the workload is created. The following procedure uses an Nginx application as an example to describe how to report custom metrics to AOM.

        1. Preparing an Application

          Prepare an application image. The application must provide a metric monitoring API for ICAgent to collect data, and the monitoring data must comply with the Prometheus specifications.

          -
        2. Deploying Applications and Converting Nginx Metrics

          Use the application image to deploy a workload in a cluster. Custom monitoring metrics are automatically reported.

          +
        3. Deploying Applications and Converting Nginx Metrics

          Use the application image to deploy a workload in a cluster. Custom metrics are automatically reported.

        4. Verification

          Go to AOM to check whether the custom metrics are successfully collected.

        Constraints

        • The ICAgent is compatible with the monitoring data specifications of Prometheus. The custom metrics provided by pods can be collected by the ICAgent only when they meet the monitoring data specifications of Prometheus. For details, see Prometheus Monitoring Data Collection.
        • The ICAgent supports only Gauge metrics.
        • The interval for the ICAgent to call the custom metric API is 1 minute, which cannot be changed.
        @@ -58,7 +58,7 @@ ADD nginx.conf /etc/nginx/nginx.conf EXPOSE 80 CMD ["nginx", "-g", "daemon off;"]
        -

      6. Use this Dockerfile to build an image and upload it to SWR. The image name is nginx:exporter.

        1. In the navigation pane, choose My Images and then click Upload Through Clientin the upper right corner. On the page displayed, click Generate a temporary login command and click to copy the command.
        2. Run the login command copied in the previous step on the node. If the login is successful, the message "Login Succeeded" is displayed.
        3. Run the following command to build an image named nginx. The image version is exporter.
          docker build -t nginx:exporter .
          +

        4. Use this Dockerfile to build an image and upload it to SWR. The image name is nginx:exporter.

          1. In the navigation pane, choose My Images. In the upper right corner, click Upload Through Client. On the displayed dialog box, click Generate a temporary login command and click to copy the command.
          2. Run the login command copied in the previous step on the node. If the login is successful, the message "Login Succeeded" is displayed.
          3. Run the following command to build an image named nginx. The image version is exporter.
            docker build -t nginx:exporter .
          4. Tag the image and upload it to the image repository. Change the image repository address and organization name based on your requirements.
            docker tag nginx:exporter {swr-address}/{group}/nginx:exporter
             docker push {swr-address}/{group}/nginx:exporter
          @@ -71,7 +71,7 @@ Reading: 0 Writing: 1 Waiting: 2

      7. Deploying Applications and Converting Nginx Metrics

        The format of the monitoring data provided by nginx:exporter does not meet the requirements of Prometheus. Convert the data format to the format required by Prometheus. To convert the format of Nginx metrics, use nginx-prometheus-exporter, as shown in the following figure.

        -
        Figure 2 Using exporter to convert the data format
        +
        Figure 2 Using exporter to convert the data format

        Deploy nginx:exporter and nginx-prometheus-exporter in the same pod.

        kind: Deployment
         apiVersion: apps/v1
        diff --git a/docs/cce/umn/cce_10_0205.html b/docs/cce/umn/cce_10_0205.html
        index d7028cc7..2ced25f5 100644
        --- a/docs/cce/umn/cce_10_0205.html
        +++ b/docs/cce/umn/cce_10_0205.html
        @@ -17,7 +17,7 @@
         
        - - - @@ -65,7 +65,7 @@

      8. Click Install.
      9. Components

        -
        Table 1 Node configuration parameters

        Parameter

        Description

        Container Engine

        CCE clusters support Docker and containerd in some scenarios.
        • VPC network clusters of v1.23 and later versions support containerd. Tunnel network clusters of v1.23.2-r0 and later versions support containerd.
        • For a CCE Turbo cluster, both Docker and containerd are supported. For details, see Mapping between Node OSs and Container Engines.
        -
        +

        The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

        OS

        Select an OS type. Different types of nodes support different OSs.
        • Public image: Select a public image for the node.
        • Private image: Select a private image for the node.
        -
        NOTE:
        • Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.
        +
        NOTE:

        Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.

        Data Disk

        At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

        -

        Click Expand and select Allocate Disk Space to define the disk space occupied by the container runtime to store the working directories, container image data, and image metadata. For details about how to allocate data disk space, see Data Disk Space Allocation.

        +

        Click Expand to configure Data Disk Space Allocation, which is used to allocate space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.

        For other data disks, a raw disk is created without any processing by default. You can also click Expand and select Mount Disk to mount the data disk to a specified directory.

        Kubernetes Label

        -

        Click Add to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

        -

        Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

        -

        Resource Tag

        +

        Resource Tag

        You can add resource tags to classify resources.

        -

        You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

        +

        You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

        CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

        Kubernetes Label

        +

        Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

        +

        Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

        +

        Taint

        This parameter is left blank by default. You can add taints to configure anti-affinity for the node. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
        • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
        • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
        • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
        +
        This parameter is left blank by default. You can add taints to configure node anti-affinity. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
        • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
        • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
        • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
        NOTICE:
        • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.
        • After a node pool is created, you can click Edit to modify its configuration. The modification will be synchronized to all nodes in the node pool.
        @@ -104,19 +103,19 @@

        Max. Pods

        Maximum number of pods that can run on the node, including the default system pods.

        +

        Maximum number of pods that can run on the node, including the default system pods.

        This limit prevents the node from being overloaded with pods.

        Pre-installation Command

        Enter commands. A maximum of 1000 characters are allowed.

        +

        Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

        The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

        Post-installation Command

        Enter commands. A maximum of 1000 characters are allowed.

        +

        Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

        The script will be executed after Kubernetes software is installed, which does not affect the installation.

        Select Single, Custom, or HA for Add-on Specifications.

        Instances

        +

        Pods

        Number of pods that will be created to match the selected add-on specifications.

        If you select Custom, you can adjust the number of pods as required.

        @@ -43,12 +43,12 @@

        Multi AZ

        • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
        • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
        • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.
        +
        • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
        • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
        • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.

        Node Affinity

        • Incompatibility: Node affinity is disabled for the add-on.
        • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
        • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
        • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

          If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

          +
        • Not configured: Node affinity is disabled for the add-on.
        • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
        • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
        • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

          If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

        Table 3 metrics-server components

        Component

        +
        diff --git a/docs/cce/umn/cce_10_0208.html b/docs/cce/umn/cce_10_0208.html index e60c76a6..53500e99 100644 --- a/docs/cce/umn/cce_10_0208.html +++ b/docs/cce/umn/cce_10_0208.html @@ -5,7 +5,7 @@

        Prerequisites

        To use HPA, install an add-on that provides metrics APIs. Select one of the following add-ons based on your cluster version and service requirements.
        • Kubernetes Metrics Server: provides basic resource usage metrics, such as container CPU and memory usage. It is supported by all cluster versions.
        -

        Constraints

        • HPA policies can be created only for clusters of v1.13 or later.
        • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS disks attached, the existing pods cannot be read or written when a new pod is scheduled to another node.

          For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS disks attached, a new pod cannot be started because EVS disks cannot be attached.

          +

          Constraints

          • HPA policies can be created only for clusters of v1.13 or later.
          • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

            For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volume mounted, a new pod cannot be started because EVS disks cannot be attached.

          Creating an HPA Policy

          1. Log in to the CCE console and click the cluster name to access the cluster console.
          2. Choose Workloads in the navigation pane. Locate the target workload and choose More > Auto Scaling in the Operation column.
          3. Set Policy Type to HPA+CronHPA, enable the created HPA policy, and configure parameters.

            This section describes only HPA policies. To enable CronHPA, see CronHPA Policies.

            diff --git a/docs/cce/umn/cce_10_0209.html b/docs/cce/umn/cce_10_0209.html index 2b9c4f33..1189313c 100644 --- a/docs/cce/umn/cce_10_0209.html +++ b/docs/cce/umn/cce_10_0209.html @@ -6,81 +6,79 @@
            • If the scale-out succeeds for the unschedulable pods, the system skips the metric-based rule logic and enters the next loop.
            • If the scale-out fails for the unschedulable pods, the metric-based rule is executed.

            Prerequisites

            Before using the node scaling function, you must install the CCE Cluster Autoscaler add-on of v1.13.8 or later in the cluster.

            -

            Constraints

            • Auto scaling policies apply to node pools. When the number of nodes in a node pool is 0 and the scaling policy is based on CPU or memory usage, node scaling is not triggered.
            • Node scale-in will cause PVC/PV data loss for the local PVs associated with the node. These PVCs and PVs cannot be restored or used again. In a node scale-in, the pod that uses the local PV is evicted from the node. A new pod is created and stays in the pending state. This is because the PVC used by the pod has a node label, due to which the pod cannot be scheduled.
            • When Autoscaler is used, some taints or annotations may affect auto scaling. Therefore, do not use the following taints or annotations in clusters:
              • ignore-taint.cluster-autoscaler.kubernetes.io: The taint works on nodes. Kubernetes-native Autoscaler supports protection against abnormal scale outs and periodically evaluates the proportion of available nodes in the cluster. When the proportion of non-ready nodes exceeds 45%, protection will be triggered. In this case, all nodes with the ignore-taint.cluster-autoscaler.kubernetes.io taint in the cluster are filtered out from the Autoscaler template and recorded as non-ready nodes, which affects cluster scaling.
              • cluster-autoscaler.kubernetes.io/enable-ds-eviction: The annotation works on pods, which determines whether DaemonSet pods can be evicted by Autoscaler. For details, see Well-Known Labels, Annotations and Taints.
              +

              Constraints

              • If there are no nodes in a node pool, Autoscaler cannot obtain the CPU or memory data of the node, and the node scaling rule triggered using these metrics will not take effect.
              • If the driver of a GPU node is not installed, Autoscaler determines that the node is not fully available and the node scaling rules triggered using the CPU or memory metrics will not take effect.
              • Node scale-in will cause PVC/PV data loss for the local PVs associated with the node. These PVCs and PVs cannot be restored or used again. In a node scale-in, the pod that uses the local PV is evicted from the node. A new pod is created and stays in the pending state. This is because the PVC used by the pod has a node label, due to which the pod cannot be scheduled.
              • When Autoscaler is used, some taints or annotations may affect auto scaling. Therefore, do not use the following taints or annotations in clusters:
                • ignore-taint.cluster-autoscaler.kubernetes.io: The taint works on nodes. Kubernetes-native Autoscaler supports protection against abnormal scale outs and periodically evaluates the proportion of available nodes in the cluster. When the proportion of non-ready nodes exceeds 45%, protection will be triggered. In this case, all nodes with the ignore-taint.cluster-autoscaler.kubernetes.io taint in the cluster are filtered out from the Autoscaler template and recorded as non-ready nodes, which affects cluster scaling.
                • cluster-autoscaler.kubernetes.io/enable-ds-eviction: The annotation works on pods, which determines whether DaemonSet pods can be evicted by Autoscaler. For details, see Well-Known Labels, Annotations and Taints.
              -

              Procedure

              1. Log in to the CCE console and click the cluster name to access the cluster console.
              2. In the navigation pane, choose Nodes. On the Node Pools tab, locate the row containing the target node pool and click Auto Scaling.

                • If the auto scaling add-on has not been installed, configure add-on parameters based on service requirements, click Install, and wait until the add-on is installed. For details about add-on configurations, see CCE Cluster Autoscaler.
                • If the auto scaling add-on has been installed, directly configure auto scaling policies.
                -

              3. Configure auto scaling policies.

                AS Configuration

                -
                • Customize scale-out rules.: Click Add Rule. In the dialog box displayed, configure parameters. You can add multiple node scaling rules, a maximum of one CPU usage-based rule, and one memory usage-based rule. The total number of rules cannot exceed 10.
                  The following table lists custom rules. -
        Table 3 Add-on components

        Component

        Description

        Table 1 Custom rules

        Rule Type

        +

        Configuring Node Pool Scaling Policies

        1. Log in to the CCE console and click the cluster name to access the cluster console.
        2. In the navigation pane, choose Nodes. On the Node Pools tab, locate the row containing the target node pool and click Auto Scaling.

          • If the auto scaling add-on has not been installed, configure add-on parameters based on service requirements, click Install, and wait until the add-on is installed. For details about add-on configurations, see CCE Cluster Autoscaler.
          • If the auto scaling add-on has been installed, directly configure auto scaling policies.
          +

        3. Configure auto scaling policies.

          AS Configuration

          +
          • Customized Rule: Click Add Rule. In the dialog box displayed, configure parameters. You can add multiple node scaling policies, a maximum of one CPU usage-based rule, and one memory usage-based rule. The total number of rules cannot exceed 10.
            The following table lists custom rules. +
            - - - - -
            Table 1 Custom rules

            Rule Type

            Configuration

            +

            Configuration

            Metric-based

            +

            Metric-based

            • Trigger: Select CPU allocation rate or Memory allocation rate and enter a value. The value must be greater than the scale-in percentage configured in the auto scaling add-on.
              NOTE:
              • Resource allocation (%) = Resources requested by pods in the node pool/Resources allocatable to pods in the node pool
              • If multiple rules meet the conditions, the rules are executed in either of the following modes:

                If rules based on the CPU allocation rate and memory allocation rate are configured and two or more rules meet the scale-out conditions, the rule that will add the most nodes will be executed.

                -

                If a rule based on the CPU allocation rate and a periodic rule are configured and they both meet the scale-out conditions, one of them will be executed randomly. The rule executed first (rule A) changes the node pool to the scaling state. As a result, the other rule (rule B) cannot be executed. After rule A is executed and the node pool status becomes normal, rule B will not be executed.

                -
              • If rules based on the CPU allocation rate and memory allocation rate are configured, the policy detection period varies with the processing logic of each loop of the Autoscaler add-on. A scale-out is triggered once the conditions are met, but it is constrained by other factors such as the cooldown period and node pool status.
              • When the number of nodes in the cluster reaches the upper limit, or the CPU or memory usage reaches the upper limit of the autoscaler add-on, node scale-out will not be triggered.
              +
            • Trigger: Select CPU allocation rate or Memory allocation rate and enter a value. The value must be greater than the scale-in percentage configured in the auto scaling add-on.
              NOTE:
              • Resource allocation (%) = Resources requested by pods in the node pool/Resources allocatable to pods in the node pool
              • If multiple rules meet the conditions, the rules are executed in either of the following modes:

                If rules based on the CPU allocation rate and memory allocation rate are configured and two or more rules meet the scale-out conditions, the rule that will add the most nodes will be executed.

                +

                If a rule based on the CPU allocation rate and a periodic rule are configured and they both meet the scale-out conditions, one of them will be executed randomly. The rule executed first (rule A) changes the node pool to the scaling state. As a result, the other rule (rule B) cannot be executed. After rule A is executed and the node pool status becomes normal, rule B will not be executed.

                +
              • If rules based on the CPU allocation rate and memory allocation rate are configured, the policy detection period varies with the processing logic of each loop of the Autoscaler add-on. A scale-out is triggered once the conditions are met, but it is constrained by other factors such as the cooldown period and node pool status.
              • When the number of nodes in the cluster reaches the upper limit, or the CPU or memory usage reaches the upper limit of the autoscaler add-on, node scale-out will not be triggered.
              -
            • Action: Configure an action to be performed when the triggering condition is met.
              • Custom: Add a specified number of nodes to a node pool.
              • Auto calculation: When the trigger condition is met, nodes are automatically added and the allocation rate is restored to a value lower than the threshold. The formula is as follows:

                Number of nodes to be added = [Resource request of pods in the node pool/(Available resources of a single node x Target allocation rate)] – Number of current nodes + 1

                +
              • Action: Configure an action to be performed when the triggering condition is met.
                • Custom: Add a specified number of nodes to a node pool.
                • Auto calculation: When the trigger condition is met, nodes are automatically added and the allocation rate is restored to a value lower than the threshold. The formula is as follows:

                  Number of nodes to be added = [Resource request of pods in the node pool/(Available resources of a single node x Target allocation rate)] – Number of current nodes + 1

            Periodic

            +

            Periodic

            • Trigger Time: You can select a specific time every day, every week, every month, or every year.
            • Action: specifies an action to be carried out when the trigger time is reached. A specified number of nodes will be added to the node pool.
            +
            • Trigger Time: You can select a specific time every day, every week, every month, or every year.
            • Action: specifies an action to be carried out when the trigger time is reached. A specified number of nodes will be added to the node pool.
            -
          • Nodes: The number of nodes in a node pool will always be within the range during auto scaling.
          • Cooldown Period: a period during which the nodes added in the current node pool cannot be scaled in.
          -

          AS Object

          -

          Specifications: Configure whether to enable auto scaling for node flavors in a node pool.

          -

        4. View cluster-level auto scaling configurations, which take effect for all node pools in the cluster. On this page, you can only view cluster-level auto scaling policies. To modify these policies, go to the Settings page. For details, see Configuring an Auto Scaling Policy for a Cluster.
        5. Click OK.
        +
      10. Nodes: The number of nodes in a node pool will always be within the range during auto scaling.
      11. Cooldown Period: a period during which the nodes added in the current node pool cannot be scaled in.
      12. +

        AS Object

        +

        Specification selection: Configure whether to enable auto scaling for node flavors in a node pool.

        +

      13. View cluster-level auto scaling configurations, which take effect for all node pools in the cluster. On this page, you can only view cluster-level auto scaling policies. To modify these policies, go to the Settings page. For details, see Configuring an Auto Scaling Policy for a Cluster.
      14. After the configuration is complete, click OK.
      15. -

        Configuring an Auto Scaling Policy for a Cluster

        An auto scaling policy takes effect on all node pools in a cluster. After the policy is modified, the Autoscaler add-on will be restarted.

        +

        Configuring an Auto Scaling Policy for a Cluster

        An auto scaling policy takes effect on all node pools in a cluster. After the policy is modified, the Autoscaler add-on will be restarted.

        -
        1. Log in to the CCE console and click the cluster name to access the details page.
        2. In the navigation pane, choose Settings and click the Auto Scaling tab.
        3. Configure for an elastic scale-out.

          • Auto Scale-out when the load cannot be scheduled: When workload pods in a cluster cannot be scheduled (pods remain in pending state), CCE automatically adds nodes to the slave node pool. If a node has been configured to be affinity for pods, no node will not be automatically added when pods cannot be scheduled. Such auto scaling typically works with an HPA policy. For details, see Using HPA and CA for Auto Scaling of Workloads and Nodes.

            If this function is not enabled, scaling can be performed only using custom scaling policies.

            -
          • Upper limit of resources to be expanded: Configure an upper limit for the total resources in the cluster. When the upper limit is reached, nodes will not be automatically added.

            When the total number of nodes, CPUs, and memory is collected, unavailable nodes in custom node pools are included but unavailable nodes in the default node pool are not included.

            +
            1. Log in to the CCE console and click the cluster name to access the cluster console.
            2. In the navigation pane, choose Settings and click the Auto Scaling tab.
            3. Configure for an elastic scale-out.

              • Auto Scale-out when the load cannot be scheduled: When workload pods in a cluster cannot be scheduled (pods remain in pending state), CCE automatically adds nodes to the slave node pool. If a node has been configured to be affinity for pods, no node will not be automatically added when pods cannot be scheduled. Such auto scaling typically works with an HPA policy. For details, see Using HPA and CA for Auto Scaling of Workloads and Nodes.

                If this function is not enabled, scaling can be performed only using custom scaling policies.

                +
              • Upper limit of resources to be expanded: Configure an upper limit for the total resources in the cluster. When the upper limit is reached, nodes will not be automatically added.

                When the total number of nodes, CPUs, and memory is collected, unavailable nodes in custom node pools are included but unavailable nodes in the default node pool are not included.

                -
              • Scale-Out Priority: You can drag and drop the node pools in a list to adjust their scale-out priorities.
              -

            4. Configure for an elastic scale-in. Elastic scale-in is disabled by default. After it is enabled, the following configurations are supported:

              Node Scale-In Conditions: Nodes in a cluster are automatically scaled in when the scale-in conditions are met.
              • Node Resource Condition: When the requested cluster node resources (both CPU and memory) are lower than a certain percentage (50% by default) for a period of time (10 minutes by default), a cluster scale-in is triggered.
              • Node Status Condition: If a node is unavailable for a specified period of time, the node will be automatically reclaimed. The default value is 20 minutes.
              • Scale-in Exception Scenarios: When a node meets the following exception scenarios, CCE will not scale in the node even if the node resources or status meets scale-in conditions:
                1. Resources on other nodes in the cluster are insufficient.
                2. Scale-in protection is enabled on the node. To enable or disable node scale-in protection, choose Nodes in the navigation pane and then click the Nodes tab. Locate the target node, choose More, and then enable or disable node scale-in protection in the Operation column.
                3. There is a pod with the non-scale-in label on the node.
                4. Policies such as reliability have been configured for some pods on the node.
                5. There is a non-DaemonSet pod in the kube-system namespace of the node.
                6. (Optional) There is a pod created using CRD on the node, and this pod is managed by a third-party Pod Controller.
                +
              • Scale-Out Priority: You can drag and drop the node pools in a list to adjust their scale-out priorities.
              +

            5. Configure for an elastic scale-in. Elastic scale-in is disabled by default. After it is enabled, the following configurations are supported:

              Node Scale-In Conditions: Nodes in a cluster are automatically scaled in when the scale-in conditions are met.
              • Node Resource Condition: When the requested cluster node resources (both CPU and memory) are lower than a certain percentage (50% by default) for a period of time (10 minutes by default), a cluster scale-in is triggered.
              • Node Status Condition: If a node is unavailable for a specified period of time, the node will be automatically reclaimed. The default value is 20 minutes.
              • Scale-in Exception Scenarios: When a node meets the following exception scenarios, CCE will not scale in the node even if the node resources or status meets scale-in conditions:
                1. Resources on other nodes in the cluster are insufficient.
                2. Scale-in protection is enabled on the node. To enable or disable node scale-in protection, choose Nodes in the navigation pane and then click the Nodes tab. Locate the target node, choose More, and then enable or disable node scale-in protection in the Operation column.
                3. There is a pod with the non-scale label on the node.
                4. Policies such as reliability have been configured on some containers on the node.
                5. There are non-DaemonSet containers in the kube-system namespace on the node.
                6. (Optional) A container managed by a third-party pod controller is running on a node. Third-party pod controllers are for custom workloads except Kubernetes-native workloads such as Deployments and StatefulSets. Such controllers can be created using CustomResourceDefinitions.
              -
              Node Scale-In Policy
              • Number of Concurrent Scale-In Requests: maximum number of idle nodes that can be concurrently deleted. Default value: 10.
                Only idle nodes can be concurrently scaled in. Nodes that are not idle can only be scaled in one by one.

                During a node scale-in, if the pods on the node do not need to be evicted (such as DaemonSet pods), the node is idle. Otherwise, the node is not idle.

                +
                Node Scale-in Policy
                • Number of Concurrent Scale-In Requests: maximum number of idle nodes that can be concurrently deleted. Default value: 10.
                  Only idle nodes can be concurrently scaled in. Nodes that are not idle can only be scaled in one by one.

                  During a node scale-in, if the pods on the node do not need to be evicted (such as DaemonSet pods), the node is idle. Otherwise, the node is not idle.

                  -
                • Node Recheck Timeout: interval for rechecking a node that could not be removed. Default value: 5 minutes.
                • Cooldown Time
                  • Scale-in Cooldown Time After Scale-out: Default value: 10 minutes.

                    If both auto scale-out and scale-in exist in a cluster, set Scale-in Cooldown Time After Scale-out to 0 minutes. This prevents the node scale-in from being blocked due to continuous scale-out of some node pools or retries upon a scale-out failure, which results in unexpected waste of node resources.

                    +
                  • Node Recheck Timeout: interval for rechecking a node that could not be removed. Default value: 5 minutes.
                  • Cooldown Time
                    • Scale-in Cooldown Time After Scale-out: Default value: 10 minutes.

                      If both auto scale-out and scale-in exist in a cluster, set Scale-in Cooldown Time After Scale-out to 0 minutes. This prevents the node scale-in from being blocked due to continuous scale-out of some node pools or retries upon a scale-out failure, which results in unexpected waste of node resources.

                      -
                    • Scale-in Cooldown Time After Node Deletion: Default value: 10 minutes.
                    • Scale-in Cooldown Time After Failure: Default value: 3 minutes. For details, see Cooldown Period.
                    +
                  • Scale-in Cooldown Time After Node Deletion: Default value: 10 minutes.
                  • Scale-in Cooldown Time After Failure: Default value: 3 minutes. For details, see Cooldown Period.
                -

              • Click Confirm configuration.
            +

          • Click Confirm configuration.

        Cooldown Period

        The impact and relationship between the two cooldown periods configured for a node pool are as follows:

        Cooldown Period During a Scale-out

        This interval indicates the period during which nodes added to the current node pool after a scale-out cannot be deleted. This setting takes effect in the entire node pool.

        Cooldown Period During a Scale-in

        -

        The interval after a scale-out indicates the period during which the entire cluster cannot be scaled in after the auto scaling add-on triggers a scale-out (due to the unschedulable pods, metrics, and scaling policies). This interval takes effect in the entire cluster.

        -

        The interval after a node is deleted indicates the period during which the cluster cannot be scaled in after the auto scaling add-on triggers a scale-in. This setting takes effect in the entire cluster.

        -

        The interval after a failed scale-in indicates the period during which the cluster cannot be scaled in after the auto scaling add-on triggers a scale-in. This setting takes effect in the entire cluster.

        +

        The interval after a scale-out indicates the period during which the entire cluster cannot be scaled in after the Autoscaler add-on triggers a scale-out (due to the unschedulable pods, metrics, and scaling policies). This interval takes effect in the entire cluster.

        +

        The interval after a node is deleted indicates the period during which the cluster cannot be scaled in after the Autoscaler add-on triggers a scale-in. This setting takes effect in the entire cluster.

        +

        The interval after a failed scale-in indicates the period during which the cluster cannot be scaled in after the Autoscaler add-on triggers a scale-in. This setting takes effect in the entire cluster.

        +
        +

        Period for Autoscaler to Retry a Scale-out

        If a node pool failed to scale out, for example, due to insufficient resources or quota, or an error occurred during node installation, Autoscaler can retry the scale-out in the node pool or switch to another node pool. The retry period varies depending on failure causes:

        +
        • When resources in a node pool are sold out or the user quota is insufficient, Autoscaler cools down the node pool for 5 minutes, 10 minutes, or 20 minutes. The maximum cooldown duration is 30 minutes. Then, Autoscaler switches to another node pool for a scale-out in the next 10 seconds until the expected node is added or all node pools are cooled down.
        • If an error occurred during node installation in a node pool, the node pool enters a 5-minute cooldown period. After the period expires, Autoscaler can trigger a node pool scale-out again. If the faulty node is automatically reclaimed, Cluster Autoscaler re-evaluates the cluster status within 1 minute and triggers a node pool scale-out as needed.
        • During a node pool scale-out, if a node remains in the installing state for a long time, Cluster Autoscaler tolerates the node for a maximum of 15 minutes. After the tolerance period expires, Cluster Autoscaler re-evaluates the cluster status and triggers a node pool scale-out as needed.

        Example YAML

        The following is a YAML example of a node scaling policy:

        apiVersion: autoscaling.cce.io/v1alpha1
         kind: HorizontalNodeAutoscaler
         metadata:
        -  creationTimestamp: "2020-02-13T12:47:49Z"
        -  generation: 1
           name: xxxx
           namespace: kube-system
        -  resourceVersion: "11433270"
        -  selfLink: /apis/autoscaling.cce.io/v1alpha1/namespaces/kube-system/horizontalnodeautoscalers/xxxx
        -  uid: c2bd1e1d-60aa-47b5-938c-6bf3fadbe91f
         spec:
           disable: false
           rules:
        @@ -109,131 +107,131 @@ spec:
           - 7d48eca7-3419-11ea-bc29-0255ac1001a8
        -
        - @@ -26,7 +26,7 @@ - @@ -36,7 +36,7 @@ - @@ -47,7 +47,7 @@
        • 200 for clusters with 50 or 200 nodes
        • 500 for clusters with 1000 nodes
        • 1000 for clusters with 2000 nodes
        - @@ -58,40 +58,38 @@
        • 400 for clusters with 50 or 200 nodes
        • 1000 for clusters with 1000 nodes
        • 2000 for clusters with 2000 nodes
        - - - - - @@ -110,7 +108,7 @@ - @@ -119,23 +117,23 @@ - - - - @@ -152,7 +150,7 @@ - @@ -161,7 +159,7 @@ - @@ -170,7 +168,7 @@ - @@ -179,7 +177,7 @@ - @@ -188,7 +186,16 @@ - + + + + + @@ -197,7 +204,7 @@ - @@ -206,7 +213,7 @@ - @@ -215,7 +222,7 @@ - @@ -224,7 +231,7 @@ - @@ -233,11 +240,11 @@ - - @@ -253,7 +260,17 @@ - + + + + + @@ -262,49 +279,42 @@ - - - - - - - - - -
        Table 2 Key parameters

        Parameter

        +
        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/cce/umn/cce_10_0210.html b/docs/cce/umn/cce_10_0210.html index 5857f55f..18077a26 100644 --- a/docs/cce/umn/cce_10_0210.html +++ b/docs/cce/umn/cce_10_0210.html @@ -40,7 +40,7 @@
        Table 2 Key parameters

        Parameter

        Type

        +

        Type

        Description

        +

        Description

        spec.disable

        +

        spec.disable

        Bool

        +

        Bool

        Whether to enable the scaling policy. This parameter takes effect for all rules in the policy.

        +

        Whether to enable the scaling policy. This parameter takes effect for all rules in the policy.

        spec.rules

        +

        spec.rules

        Array

        +

        Array

        All rules in a scaling policy.

        +

        All rules in a scaling policy.

        spec.rules[x].ruleName

        +

        spec.rules[x].ruleName

        String

        +

        String

        Rule name.

        +

        Rule name.

        spec.rules[x].type

        +

        spec.rules[x].type

        String

        +

        String

        Rule type. Currently, Cron and Metric are supported.

        +

        Rule type. Cron and Metric are supported.

        spec.rules[x].disable

        +

        spec.rules[x].disable

        Bool

        +

        Bool

        Rule switch. Currently, only false is supported.

        +

        Rule switch. Currently, only false is supported.

        spec.rules[x].action.type

        +

        spec.rules[x].action.type

        String

        +

        String

        Rule action type. Currently, only ScaleUp is supported.

        +

        Rule action type. Currently, only ScaleUp is supported.

        spec.rules[x].action.unit

        +

        spec.rules[x].action.unit

        String

        +

        String

        Rule action unit. Currently, only Node is supported.

        +

        Rule action unit. Currently, only Node is supported.

        spec.rules[x].action.value

        +

        spec.rules[x].action.value

        Integer

        +

        Integer

        Rule action value.

        +

        Rule action value.

        spec.rules[x].cronTrigger

        +

        spec.rules[x].cronTrigger

        /

        +

        N/A

        Optional. This parameter is valid only in periodic rules.

        +

        Optional. This parameter is valid only in periodic rules.

        spec.rules[x].cronTrigger.schedule

        +

        spec.rules[x].cronTrigger.schedule

        String

        +

        String

        Cron expression of a periodic rule.

        +

        Cron expression of a periodic rule.

        spec.rules[x].metricTrigger

        +

        spec.rules[x].metricTrigger

        /

        +

        N/A

        Optional. This parameter is valid only in metric-based rules.

        +

        Optional. This parameter is valid only in metric-based rules.

        spec.rules[x].metricTrigger.metricName

        +

        spec.rules[x].metricTrigger.metricName

        String

        +

        String

        Metric of a metric-based rule. Currently, Cpu and Memory are supported.

        +

        Metric of a metric-based rule. Currently, Cpu and Memory are supported.

        spec.rules[x].metricTrigger.metricOperation

        +

        spec.rules[x].metricTrigger.metricOperation

        String

        +

        String

        Comparison operator of a metric-based rule. Currently, only > is supported.

        +

        Comparison operator of a metric-based rule. Currently, only > is supported.

        spec.rules[x].metricTrigger.metricValue

        +

        spec.rules[x].metricTrigger.metricValue

        String

        +

        String

        Metric threshold of a metric-based rule. The value can be any integer from 1 to 100 and must be a character string.

        +

        Metric threshold of a metric-based rule. The value can be any integer from 1 to 100 and must be a character string.

        spec.rules[x].metricTrigger.Unit

        +

        spec.rules[x].metricTrigger.Unit

        String

        +

        String

        Unit of the metric-based rule threshold. Currently, only % is supported.

        +

        Unit of the metric-based rule threshold. Currently, only % is supported.

        spec.targetNodepoolIds

        +

        spec.targetNodepoolIds

        Array

        +

        Array

        All node pools associated with the scaling policy.

        +

        All node pools associated with the scaling policy.

        spec.targetNodepoolIds[x]

        +

        spec.targetNodepoolIds[x]

        String

        +

        String

        ID of the node pool associated with the scaling policy.

        +

        ID of the node pool associated with the scaling policy.

        -

        Procedure

        1. Create a CCE cluster.

          Create a cluster with the same specifications and configurations as the cluster of the earlier version. For details, see Creating a CCE Cluster.

          +

          Procedure

          1. Create a CCE cluster.

            Create a cluster with the same specifications and configurations as the cluster of the earlier version. For details, see Creating a CCE Standard/Turbo Cluster.

          2. Add a node.

            Add a node with the same specifications and manual configuration items. For details, see Creating a Node.

          3. Create a storage volume in the new cluster.

            Use the existing storage to create a PVC in the new cluster. The PVC name remains unchanged. For details, see Using an Existing OBS Bucket Through a Static PV or Using an Existing SFS Turbo File System Through a Static PV.

            Storage switching supports only OBS buckets and SFS Turbo file systems. If non-shared storage is used, suspend the workloads in the old cluster to switch the storage resources. As a result, services will be unavailable.

            diff --git a/docs/cce/umn/cce_10_0212.html b/docs/cce/umn/cce_10_0212.html index 1cc59aca..63406829 100644 --- a/docs/cce/umn/cce_10_0212.html +++ b/docs/cce/umn/cce_10_0212.html @@ -1,7 +1,7 @@

            Deleting a Cluster

            -

            Precautions

            • Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workloads, and Services. Related services cannot be restored. Before performing this operation, ensure that data has been backed up or migrated. Deleted data cannot be restored.
              Resources that are not created in CCE will not be deleted:
              • Accepted nodes (only the nodes created in CCE are deleted);
              • ELB load balancers associated with Services and ingresses (only the automatically created load balancers are deleted);
              • Manually created cloud storage resources associated with PVs or imported cloud storage resources (only the cloud storage resources automatically created by PVCs are deleted)
              +

              Precautions

              • Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workloads, and Services. Related services cannot be restored. Before performing this operation, ensure that data has been backed up or migrated. Deleted data cannot be restored.
                Resources that are not created in CCE will not be deleted:
                • Accepted nodes (only the nodes created in CCE are deleted)
                • ELB load balancers associated with Services and ingresses (only the automatically created load balancers are deleted)
                • Manually created cloud storage resources associated with PVs or imported cloud storage resources (only the cloud storage resources automatically created by PVCs are deleted)
              • If you delete a cluster that is not running (for example, unavailable), associated resources, such as storage and networking resources, will remain.
              @@ -11,7 +11,7 @@
              • The underlying storage resources are deleted according to the reclamation policy you defined for the storage volumes. For example, if the reclamation policy of storage volumes is Retain, the underlying storage resources will be retained after the cluster is deleted.
              • If there are more than 1000 files in the OBS bucket, manually clear the files and then delete the cluster.
            • Delete network resources such as load balancers in a cluster. (Only automatically created load balancers will be deleted).
            -

          4. Click Yes to start deleting the cluster.

            The delete operation takes 1 to 3 minutes to complete.

            +

          5. Enter DELETE and click Yes to start deleting the cluster.

            The delete operation takes 1 to 3 minutes to complete.

        diff --git a/docs/cce/umn/cce_10_0213.html b/docs/cce/umn/cce_10_0213.html index d61afdd5..5cb7ac74 100644 --- a/docs/cce/umn/cce_10_0213.html +++ b/docs/cce/umn/cce_10_0213.html @@ -1,7 +1,7 @@

        Cluster Configuration Management

        -

        Scenario

        CCE allows you to manage cluster parameters, through which you can let core components work under your very requirements.

        +

        Scenario

        CCE allows you to manage cluster parameters, through which you can let core components work under your requirements.

        Constraints

        This function is supported only in clusters of v1.15 and later. It is not displayed for versions earlier than v1.15.

        @@ -16,7 +16,7 @@

        Tolerance time of pods for an unavailable node

        +

        Toleration time for nodes in NotReady state

        default-not-ready-toleration-seconds

        Default: 300s

        Tolerance time of pods for an inaccessible node

        +

        Toleration time for nodes in unreachable state

        default-unreachable-toleration-seconds

        Default: 300s

        Maximum number of concurrent modification API requests

        +

        Maximum Number of Concurrent Modification API Calls

        max-mutating-requests-inflight

        Maximum number of concurrent non-modification API requests

        +

        Maximum Number of Concurrent Non-Modification API Calls

        max-requests-inflight

        Ports used by NodePort services

        +

        NodePort port range

        service-node-port-range

        NodePort port range. After changing the value, go to the security group page and change the TCP/UDP port range of node security groups 30000 to 32767. Otherwise, ports other than the default port cannot be accessed externally.

        If the port number is smaller than 20106, a conflict may occur between the port and the CCE health check port, which may further lead to unavailable cluster. If the port number is greater than 32767, a conflict may occur between the port and the ports in net.ipv4.ip_local_port_range, which may further affect the network performance.

        Default:

        -

        From 30000 to 32767

        +

        Default: 30000 to 32767

        Value range:

        Min > 20105

        Max < 32768

        Request timeout

        +

        Request Timeout

        request-timeout

        Default request timeout interval of kube-apiserver. Exercise caution when changing the value of this parameter. Ensure that the changed value is proper to prevent frequent API timeout or other errors.

        -

        This parameter is supported only by clusters of v1.19.16-r30, v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, and later versions.

        +

        This parameter is available only in clusters of v1.19.16-r30, v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, or later versions.

        Default:

        -

        1m0s

        +

        Default: 1m0s

        Value range:

        Min ≥ 1s

        Max ≤ 1 hour

        Overload control

        +

        Overload Control

        support-overload

        Cluster overload control. If enabled, concurrent requests are dynamically controlled based on the resource pressure of master nodes to keep them and the cluster available.

        -

        This parameter is supported only by clusters of v1.23 or later.

        +

        This parameter is available only in clusters of v1.23 or later.

        • false: Overload control is disabled.
        • true: Overload control is enabled.

        Query per second (QPS) for the scheduler to access kube-apiserver

        +

        Qps for communicating with kube-apiserver

        kube-api-qps

        • If the number of nodes in a cluster is less than 1000, the default value is 100.
        • If a cluster contains 1000 or more nodes, the default value is 200.

        Burst for the scheduler to access kube-apiserver

        +

        Burst for communicating with kube-apiserver

        kube-api-burst

        Burst to use while talking with kube-apiserver.

        +

        Burst for communicating with kube-apiserver.

        • If the number of nodes in a cluster is less than 1000, the default value is 100.
        • If a cluster contains 1000 or more nodes, the default value is 200.

        GPU sharing

        +

        Whether to enable GPU sharing

        enable-gpu-share

        Whether to enable GPU sharing. This parameter is supported only by clusters of v1.23.7-r10, v1.25.3-r0, and later.

        • When disabled, ensure that pods in the cluster do not use the shared GPU (that is, the annotation of cce.io/gpu-decision does not exist in pods).
        • When enabled, ensure that the annotation of cce.io/gpu-decision exists in pods that use GPU resources in the cluster.

        Default value: true

        +

        Default: true

        Deployment

        +

        Number of concurrent processing of deployment

        concurrent-deployment-syncs

        Default: 5

        Endpoint

        +

        Concurrent processing number of endpoint

        concurrent-endpoint-syncs

        Default: 5

        Garbage collector

        +

        Concurrent number of garbage collector

        concurrent-gc-syncs

        Default: 20

        Job

        +

        Number of job objects allowed to sync simultaneously

        concurrent-job-syncs

        Default: 5

        Namespace

        +

        Number of CronJob objects allowed to sync simultaneously

        +

        concurrent-cron-job-syncs

        +

        Number of scheduled jobs that can be synchronized concurrently.

        +

        Default: 5

        +

        Number of concurrent processing of namespace

        concurrent-namespace-syncs

        Default: 10

        ReplicaSet

        +

        Concurrent processing number of replicaset

        concurrent-replicaset-syncs

        Default: 5

        RsourceQuota

        +

        ResourceQuota

        concurrent-resource-quota-syncs

        Default: 5

        Servicepace

        +

        Concurrent processing number of service

        concurrent-service-syncs

        Default: 10

        ServiceAccountToken

        +

        Concurrent processing number of serviceaccount-token

        concurrent-serviceaccount-token-syncs

        Default: 5

        TTLAfterFinished

        +

        Concurrent processing of ttl-after-finished

        concurrent-ttl-after-finished-syncs

        Number of ttl-after-finished-controller workers that are allowed to sync concurrently

        +

        Number of ttl-after-finished-controller workers that are allowed to sync concurrently

        Default: 5

        Default: 5

        Period for syncing the number of pods in horizontal pod autoscaler

        +

        HPA

        +

        concurrent-horizontal-pod-autoscaler-syncs

        +

        Number of HPA auto scaling requests that can be concurrently processed.

        +

        Default 1 for clusters earlier than v1.27 and 5 for clusters earlier than v1.27

        +

        Value range: 1 to 50

        +

        Cluster elastic computing period

        horizontal-pod-autoscaler-sync-period

        Default: 15 seconds

        QPS for the controller to access kube-apiserver

        +

        Qps for communicating with kube-apiserver

        kube-api-qps

        QPS to use while talking with kube-apiserver

        +

        QPS for communicating with kube-apiserver

        • If the number of nodes in a cluster is less than 1000, the default value is 100.
        • If a cluster contains 1000 or more nodes, the default value is 200.

        Burst for the controller to communicate with kube-apiserver

        +

        Burst for communicating with kube-apiserver

        kube-api-burst

        Burst to use while talking with kube-apiserver.

        +

        Burst for communicating with kube-apiserver.

        • If the number of nodes in a cluster is less than 1000, the default value is 100.
        • If a cluster contains 1000 or more nodes, the default value is 200.

        Threshold for triggering garbage collection of terminated pods

        +

        The maximum number of terminated pods that can be kept before the Pod GC deletes the terminated pod

        terminated-pod-gc-threshold

        Number of terminated pods that can exist in a cluster. If there are more terminated pods than the expected number in the cluster, the terminated pods that exceed the number will be deleted.

        +
        NOTE:

        If this parameter is set to 0, all pods in the terminated state are retained.

        +

        Default: 1000

        Value range: 10 to 12500

        -

        HPA

        -

        concurrent-horizontal-pod-autoscaler-syncs

        -

        Number of HPA auto scaling requests that can be concurrently processed. This parameter is available only in clusters of v1.27 or later.

        -

        Default: 5

        -

        Value range: 1 to 50

        +

        If the cluster version is v1.21.11-r40, v1.23.8-r0, v1.27.3-r0, v1.25.6-r0, or later, the value range is changed to 0 to 100000.

        -
        Table 4 Network component configurations (supported only by CCE Turbo clusters)

        Item

        +
        @@ -314,28 +324,28 @@ - - - - @@ -345,7 +355,7 @@ - @@ -380,12 +390,12 @@ - diff --git a/docs/cce/umn/cce_10_0214.html b/docs/cce/umn/cce_10_0214.html index d7c9e5d1..0467ce9d 100644 --- a/docs/cce/umn/cce_10_0214.html +++ b/docs/cce/umn/cce_10_0214.html @@ -5,7 +5,7 @@

        After a cluster is hibernated, resources such as workloads cannot be created or managed in the cluster.

        A hibernated cluster can be quickly woken up and used properly.

        -

        Constraints

        • A cluster may fail to be woken up if the master nodes cannot start due to insufficient resources. Wait for a while and wake up the cluster again.
        • After a cluster is hibernated, it takes 3 to 5 minutes to initialize data. Services can be delivered only after the cluster runs properly.
        +

        Constraints

        • During cluster wakeup, the master node may fail to start due to insufficient resources, which leads to a cluster wakeup failure. In this case, wait for a while and try again.
        • After a cluster is woken up, it takes 3 to 5 minutes to initialize data. Deliver services after the cluster runs properly.

        Hibernating a Cluster

        1. Log in to the CCE console. In the navigation pane, choose Clusters.
        2. Locate the cluster to be hibernated, click ... to view more operations on the cluster, and choose Hibernate.
        3. In the dialog box displayed, check the precautions and click Yes. Wait until the cluster is hibernated.
        diff --git a/docs/cce/umn/cce_10_0215.html b/docs/cce/umn/cce_10_0215.html index 241f4ee1..cd915fbc 100644 --- a/docs/cce/umn/cce_10_0215.html +++ b/docs/cce/umn/cce_10_0215.html @@ -8,8 +8,6 @@ -
        -

        Prerequisites

        Before creating a DaemonSet, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Cluster.

        +

        Prerequisites

        Before creating a DaemonSet, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Standard/Turbo Cluster.

        Using the CCE Console

        1. Log in to the CCE console.
        2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
        3. Set basic information about the workload.

          Basic Info
          • Workload Type: Select DaemonSet. For details about workload types, see Overview.
          • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
          • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
          • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Kata Runtime and Common Runtime.
          • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
          @@ -41,7 +41,7 @@
        - @@ -74,7 +74,7 @@
        Table 4 Networking component configurations (supported only by CCE Turbo clusters)

        Item

        Parameter

        Minimum number of ENIs bound to a node at the cluster level

        +

        The minimum number of network cards bound to the container at the cluster level

        nic-minimum-target

        Minimum number of container ENIs bound to a node

        -

        The parameter value must be a positive integer. The value 10 indicates that there are at least 10 container ENIs bound to a node. If the number you entered exceeds the container ENI quota of the node, the ENI quota will be used.

        +

        The parameter value must be a positive integer. The value 10 indicates that at least 10 container ENIs must be bound to a node. If the number you specified exceeds the container ENI quota of the node, the ENI quota will be used.

        Default: 10

        Maximum number of ENIs pre-bound to a node at the cluster level

        +

        Cluster-level node preheating container NIC upper limit check value

        nic-maximum-target

        If the number of ENIs bound to a node exceeds the value of nic-maximum-target, the system does not proactively pre-bind ENIs.

        -

        Checking the upper limit of pre-bound container ENIs is enabled only when the value of this parameter is at least equal to the minimum number of container ENIs (nic-minimum-target) bound to a node.

        -

        The parameter value must be a positive integer. The value 0 indicates that the check on the upper limit of pre-bound container ENIs is disabled. If the number you entered exceeds the container ENI quota of the node, the ENI quota will be used.

        +

        After the number of ENIs bound to a node exceeds the nic-maximum-target value, CCE will not proactively pre-bind ENIs.

        +

        Checking the upper limit of pre-bound container ENIs is enabled only when the value of this parameter is greater than or equal to the minimum number of container ENIs (nic-minimum-target) bound to a node.

        +

        The parameter value must be a positive integer. The value 0 indicates that checking the upper limit of pre-bound container ENIs is disabled. If the number you specified exceeds the container ENI quota of the node, the ENI quota will be used.

        Default: 0

        Number of ENIs pre-bound to a node at the cluster level

        +

        Number of NICs for dynamically warming up containers at the cluster level

        nic-warm-target

        Default: 2

        Reclaim number of ENIs pre-bound to a node at the cluster level

        +

        Cluster-level node warm-up container NIC recycling threshold

        nic-max-above-warm-target

        Resource quota management

        +

        Enable resource quota management

        enable-resource-quota

        Indicates whether to automatically create a ResourceQuota when creating a namespace. With quota management, you can control the number of workloads of each type and the upper limits of resources in a namespace or related dimensions.

        -
        • false: no auto creation
        • true: auto creation enabled. For details about the resource quota defaults, see Configuring Resource Quotas.
          NOTE:

          In high-concurrency scenarios (for example, creating pods in batches), the resource quota management may cause some requests to fail due to conflicts. Do not enable this function unless necessary. To enable this function, ensure that there is a retry mechanism in the request client.

          +
          • false: Auto creation is disabled.
          • true: Auto creation is enabled. For details about the resource quota defaults, see Configuring Resource Quotas.
            NOTE:

            In high-concurrency scenarios (for example, creating pods in batches), the resource quota management may cause some requests to fail due to conflicts. Do not enable this function unless necessary. To enable this function, ensure that there is a retry mechanism in the request client.

        CPU Quota

        • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
        • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
        +
        • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
        • Limit: maximum number of CPU cores that can be used by a container. This prevents containers from using excessive resources.

        If Request and Limit are not specified, the quota is not limited. For more information and suggestions about Request and Limit, see Configuring Container Specifications.

        -
        • (Optional) Lifecycle: Configure operations to be performed in a specific phase of the container lifecycle, such as Startup Command, Post-Start, and Pre-Stop. For details, see Configuring Container Lifecycle Parameters.
        • (Optional) Health Check: Set the liveness probe, ready probe, and startup probe as required. For details, see Configuring Container Health Check.
        • (Optional) Environment Variables: Configure variables for the container running environment using key-value pairs. These variables transfer external information to containers running in pods and can be flexibly modified after application deployment. For details, see Configuring Environment Variables.
        • (Optional) Data Storage: Mount local storage or cloud storage to the container. The application scenarios and mounting modes vary with the storage type. For details, see Storage.
        • (Optional) Security Context: Assign container permissions to protect the system and other containers from being affected. Enter the user ID to assign container permissions and prevent systems and other containers from being affected.
        • (Optional) Logging: Report standard container output logs to AOM by default, without requiring manual settings. You can manually configure the log collection path. For details, see Connecting CCE to AOM.

          To disable the standard output of the current workload, add the annotation kubernetes.AOM.log.stdout: [] in Labels and Annotations. For details about how to use this annotation, see Table 1.

          +
          • (Optional) Lifecycle: Configure operations to be performed in a specific phase of the container lifecycle, such as Startup Command, Post-Start, and Pre-Stop. For details, see Configuring Container Lifecycle Parameters.
          • (Optional) Health Check: Set the liveness probe, ready probe, and startup probe as required. For details, see Configuring Container Health Check.
          • (Optional) Environment Variables: Configure variables for the container running environment using key-value pairs. These variables transfer external information to containers running in pods and can be flexibly modified after application deployment. For details, see Configuring Environment Variables.
          • (Optional) Data Storage: Mount local storage or cloud storage to the container. The application scenarios and mounting modes vary with the storage type. For details, see Storage.
          • (Optional) Security Context: Assign container permissions to protect the system and other containers from being affected. Enter the user ID to assign container permissions and prevent systems and other containers from being affected.
          • (Optional) Logging: Report standard container output logs to AOM by default, without requiring manual settings. You can manually configure the log collection path. For details, see Collecting Container Logs Using ICAgent.

            To disable the standard output of the current workload, add the annotation kubernetes.AOM.log.stdout: [] in Labels and Annotations. For details about how to use this annotation, see Table 1.

        • Image Access Credential: Select the credential used for accessing the image repository. The default value is default-secret. You can use default-secret to access images in SWR. For details about default-secret, see default-secret.
        • (Optional) GPU: All is selected by default. The workload instance will be scheduled to the node of the specified GPU type.
        @@ -83,10 +83,10 @@

        A Service provides external access for pods. With a static IP address, a Service forwards access traffic to pods and automatically balances load for these pods.

        You can also create a Service after creating a workload. For details about Services of different types, see Overview.

        (Optional) Advanced Settings
        • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Workload Upgrade Policies.
        -
        • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Node affinity is provided.
          • Node Affinity: Common load affinity policies are offered for quick load affinity deployment.
            • Node Affinity: Workload pods can be deployed on specified nodes through node affinity (nodeAffinity). If no node is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
            • Specified node pool scheduling: Workload pods can be deployed in a specified node pool through node affinity (nodeAffinity). If no node pool is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
            • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
            +
            • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Node affinity is provided.
              • Node Affinity: Common load affinity policies are offered for quick load affinity deployment.
                • Specified node scheduling: Workload pods can be deployed on specified nodes through node affinity (nodeAffinity). If no node is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
                • Specified node pool scheduling: Workload pods can be deployed in a specified node pool through node affinity (nodeAffinity). If no node pool is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
                • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
            -
            • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Taints and Tolerations.
            • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
            • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
            • Network Configuration
              • Pod ingress/egress bandwidth limitation: You can set ingress/egress bandwidth limitation for pods. For details, see Configuring QoS for a Pod.
              +
              • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Taints and Tolerations.
              • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
              • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
              • Network Configuration

      16. Click Create Workload in the lower right corner.
      17. @@ -124,7 +124,7 @@ spec: imagePullSecrets: - name: default-secret

        The replicas parameter used in defining a Deployment or StatefulSet does not exist in the above configuration for a DaemonSet, because each node has only one replica. It is fixed.

        -

        DaemonSet only creates pods on nodes with specific labels. In the preceding pod template, nodeSelector specifies that a pod is created only on nodes with the daemon: need label. If you want to create a pod on each node, delete the label.

        +

        The nodeSelector in the preceding pod template specifies that a pod is created only on the nodes that meet daemon=need. If you want to create a pod on each node, delete the label.

      18. Create a DaemonSet.

        kubectl create -f nginx-daemonset.yaml

        If the following information is displayed, the DaemonSet is being created.

        daemonset.apps/nginx-daemonset created
        diff --git a/docs/cce/umn/cce_10_0232.html b/docs/cce/umn/cce_10_0232.html index 1e685028..7f03e012 100644 --- a/docs/cce/umn/cce_10_0232.html +++ b/docs/cce/umn/cce_10_0232.html @@ -8,7 +8,7 @@
      19. You can create the preceding affinity policies on the console. For details, see Configuring Load Affinity on the Console and Configuring Node Affinity on the Console.

        -

        Configuring Load Affinity on the Console

        1. When creating a workload, click Scheduling in the Advanced Settings area. For details about how to create a workload, see Creating a Workload.
        2. Select a load affinity scheduling policy.

          • Incompatibility: No load affinity policy is configured.
          • Multi-AZ deployment preferred: Workload pods are preferentially scheduled to different AZs and forcibly scheduled to different nodes through pod anti-affinity. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ but different nodes for high availability. When this scheduling policy is used, if there are fewer nodes than pods or node resources are insufficient, the extra pods will fail to run.
          • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to different AZs and different nodes through pod anti-affinity. When this scheduling policy is used, if there are fewer nodes than pods or node resources are insufficient, the extra pods will fail to run.
          • Custom policies: allow flexible scheduling of workload pods based on pod labels. For details about the supported scheduling policies, see Table 1. Select a proper policy type and click to add a policy. For details about the parameters, see Table 2. +

            Configuring Load Affinity on the Console

            1. When creating a workload, click Scheduling in the Advanced Settings area. For details about how to create a workload, see Creating a Workload.
            2. Select a load affinity scheduling policy.

              • Not configured: No load affinity policy is configured.
              • Multi-AZ deployment preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity.
              • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to different AZs and different nodes through pod anti-affinity. When this scheduling policy is used, if there are fewer nodes than pods or node resources are insufficient, the extra pods will fail to run.
              • Custom policies: allow flexible scheduling of workload pods based on pod labels. For details about the supported scheduling policies, see Table 1. Select a proper policy type and click to add a policy. For details about the parameters, see Table 2.
                @@ -31,7 +31,7 @@ @@ -41,7 +41,7 @@ @@ -49,7 +49,7 @@ @@ -102,7 +102,7 @@

              • After the scheduling policy is added, click Create Workload.
              • -

                Configuring Node Affinity on the Console

                1. When creating a workload, click Scheduling in the Advanced Settings area. For details about how to create a workload, see Creating a Workload.
                2. Select a node affinity scheduling policy.

                  • Incompatibility: No node affinity policy is configured.
                  • Node Affinity: Specify the nodes where workload pods are to be deployed. If no nodes are specified, the pods will be randomly scheduled based on the default cluster scheduling policy.
                  • Specified Node Pool Scheduling: Specify the node pools where workload pods are to be deployed. If no node pools are specified, the pods will be randomly scheduled based on the default cluster scheduling policy.
                  • Custom policies: allow flexible scheduling of workload pods based on node labels. For details about the supported scheduling policies, see Table 3. Select a proper policy type and click to add a policy. For details about the parameters, see Table 4. You can also click Specify Node or Specify AZ to quickly select a node or AZ on the console for scheduling.

                    Specifying a node or AZ is also implemented through labels. The console frees you from manually entering node labels. The kubernetes.io/hostname label is used when you specify a node, and the failure-domain.beta.kubernetes.io/zone label is used when you specify an AZ.

                    +

                    Configuring Node Affinity on the Console

                    1. When creating a workload, click Scheduling in the Advanced Settings area. For details about how to create a workload, see Creating a Workload.
                    2. Select a node affinity scheduling policy.

                      • Not configured: No node affinity policy is configured.
                      • Node Affinity: Specify the nodes where workload pods are to be deployed. If no nodes are specified, the pods will be randomly scheduled based on the default cluster scheduling policy.
                      • Specified Node Pool Scheduling: Specify the node pools where workload pods are to be deployed. If no node pools are specified, the pods will be randomly scheduled based on the default cluster scheduling policy.
                      • Custom policies: allow flexible scheduling of workload pods based on node labels. For details about the supported scheduling policies, see Table 3. Select a proper policy type and click to add a policy. For details about the parameters, see Table 4. You can also click Specify Node or Specify AZ to quickly select a node or AZ on the console for scheduling.

                        Specifying a node or AZ is also implemented through labels. The console frees you from manually entering node labels. The kubernetes.io/hostname label is used when you specify a node, and the failure-domain.beta.kubernetes.io/zone label is used when you specify an AZ.

                Table 1 Load affinity policies

                Policy

                Type

                @@ -23,7 +23,7 @@

                Hard constraint, which is used to configure the conditions that must be met and corresponds to the requiredDuringSchedulingIgnoredDuringExecution field in YAML.

                Select pods that require affinity by label. If such pods have been running on a node in the topology domain, the scheduler will forcibly schedule the created pods to that topology domain.

                -
                NOTE:

                When multiple affinity policies are added, multiple labels are configured for filtering. A pod meets the filtering conditions only when it has all the specified labels.

                +
                NOTE:

                If multiple affinity rules are configured, multiple labels will be used to filter pods that require affinity, and the newly created pods must be affinity with all pods that meet the label filtering conditions. In this way, all pods that meet the label filtering conditions locate in the same topology domain for scheduling.

                Soft constraint, which is used to configure the conditions that preferentially to be met and corresponds to the preferredDuringSchedulingIgnoredDuringExecution field in YAML.

                Select pods that require affinity by label. If such pods have been running on a node in the topology domain, the scheduler will preferentially schedule the created pods to that topology domain.

                -
                NOTE:

                When multiple affinity policies are added, multiple labels are configured for filtering. A pod meets the filtering conditions only when it has all the specified labels.

                +
                NOTE:

                If multiple affinity rules are configured, multiple labels will be used to filter pods that require affinity, and the newly created pods will be preferentially to be affinity with multiple pods that meet the label filtering conditions. However, even if no pod meets the label filter conditions, a topology domain will be selected for scheduling.

                Hard constraint, which corresponds to requiredDuringSchedulingIgnoredDuringExecution in YAML for specifying the conditions that must be met.

                Select one or more pods that require anti-affinity by label. If such pods have been running on a node in the topology domain, the scheduler will not schedule the created pods to that topology domain.

                -
                NOTE:

                When multiple anti-affinity policies are added, multiple labels are configured for filtering. A pod meets the filtering conditions if it has one of the specified labels.

                +
                NOTE:

                If multiple anti-affinity rules are configured, multiple labels will be used to filter pods that require anti-affinity, and the newly created pods must be anti-affinity with all pods that meet the label filtering conditions. In this way, all the topology domains where the pods that meet the label filtering conditions locate will not be scheduled.

                Soft constraint, which corresponds to preferredDuringSchedulingIgnoredDuringExecution in YAML for specifying the conditions that are preferentially met.

                Select one or more pods that require anti-affinity by label. If such pods have been running on a node in the topology domain, the scheduler will preferentially schedule the created pods to other topology domains.

                -
                NOTE:

                When multiple anti-affinity policies are added, multiple labels are configured for filtering. A pod meets the filtering conditions if it has one of the specified labels.

                +
                NOTE:

                If multiple anti-affinity rules are configured, multiple labels will be used to filter pods that require anti-affinity, and the newly created pods will be preferentially to be anti-affinity with multiple pods that meet the label filtering conditions. However, even if all topology domains involve the pods that require anti-affinity, a topology domain will be selected for scheduling.

                @@ -141,7 +141,7 @@ - - @@ -70,7 +70,7 @@

              • Click Install.
              • Components

                -
                Table 3 Node affinity settings

                Parameter

                Operator

                The following operators are supported:

                -
                • In: The label of the affinity or anti-affinity object is in the label value list (values field).
                • NotIn: The label of the affinity or anti-affinity object is not in the label value list (values field).
                • Exists: The affinity or anti-affinity object has a specified label name.
                • DoesNotExist: The affinity or anti-affinity object does not have the specified label name.
                • Gt: (available only for node affinity) The label value of the scheduled node is greater than the list value (string comparison).
                • Lt: (available only for node affinity) The label value of the scheduled node is less than the list value (string comparison).
                +
                • In: The label of the affinity or anti-affinity object is in the label value list (values field).
                • NotIn: The label of the affinity or anti-affinity object is not in the label value list (values field).
                • Exists: The affinity or anti-affinity object has a specified label name.
                • DoesNotExist: The affinity or anti-affinity object does not have the specified label name.
                • Gt: (available only for node affinity) The label value of the scheduled node is greater than the list value (string comparison).
                • Lt: (available only for node affinity) The label value of the scheduled node is less than the list value (string comparison).

                Label Value

                @@ -185,7 +185,7 @@ spec: gpu: true ...
                Node affinity rules can achieve the same results. Compared with nodeSelector, node affinity rules seem more complex, but with a more expressive syntax. You can use the spec.affinity.nodeAffinity field to set node affinity. There are two types of node affinity:
                • requiredDuringSchedulingIgnoredDuringExecution: Kubernetes cannot schedule the pod unless the rule is met.
                • PreferredDuringSchedulingIgnoredDuringExecution: Kubernetes tries to find a node that meets the rule. If a matching node is not available, Kubernetes still schedules the pod.
                -

                In these two types of node affinity, requiredDuringScheduling or preferredDuringScheduling indicates that the pod can be scheduled to a node only when all the defined rules are met (required). IgnoredDuringExecution indicates that if the node label changes after Kubernetes schedules the pod, the pod continues to run and will not be rescheduled. However, if kubelet on the node is restarted, kubelet will verify the node affinity rule again, and the pod will still be scheduled to another node.

                +

                In these two types of node affinity, requiredDuringScheduling or preferredDuringScheduling indicates that the pod can be scheduled to a node only when all the defined rules are met (required). IgnoredDuringExecution indicates that if the node label changes after Kubernetes schedules the pod, the pod continues to run and will not be rescheduled. However, if kubelet on the node is restarted, kubelet will recheck the node affinity rule, and the pod will still be scheduled to another node.

                The following is an example of setting node affinity:

                @@ -325,7 +325,7 @@ gpu-585455d466-t56cm 1/1 Running 0 2m29s 172.16.0.64 192.16 gpu-585455d466-t5w5x 1/1 Running 0 2m29s 172.16.0.41 192.168.0.212

                In the preceding example, the node scheduling priority is as follows. Nodes with both SSD and gpu=true labels have the highest priority. Nodes with the SSD label but no gpu=true label have the second priority (weight: 80). Nodes with the gpu=true label but no SSD label have the third priority. Nodes without any of these two labels have the lowest priority.

                -
                Figure 1 Scheduling priority
                +
                Figure 1 Scheduling priority

                Workload Affinity (podAffinity)

                Node affinity rules affect only the affinity between pods and nodes. Kubernetes also supports configuring inter-pod affinity rules. For example, the frontend and backend of an application can be deployed together on one node to reduce access latency. There are also two types of inter-pod affinity rules: requiredDuringSchedulingIgnoredDuringExecution and preferredDuringSchedulingIgnoredDuringExecution.

                For workload affinity, topologyKey cannot be left blank when requiredDuringSchedulingIgnoredDuringExecution and preferredDuringSchedulingIgnoredDuringExecution are used.

                @@ -411,7 +411,7 @@ NAME STATUS ROLES AGE VERSION PREFE operator: In values: - backend -
                Figure 2 Topology domains
                +
                Figure 2 Topology domains

                During scheduling, node topology domains are divided based on the prefer label. In this example, 192.168.0.97 and 192.168.0.94 are divided into the same topology domain. If a pod with the app=backend label runs in the topology domain, even if not all nodes in the topology domain run the pod with the app=backend label (in this example, only the 192.168.0.97 node has such a pod), frontend is also deployed in this topology domain (192.168.0.97 or 192.168.0.94).

                $ kubectl create -f affinity3.yaml 
                 deployment.apps/frontend created
                @@ -479,7 +479,7 @@ frontend-6f686d8d87-q7cfq   1/1     Running   0          18s   172.16.0.47   192
                 frontend-6f686d8d87-xl8hx   1/1     Running   0          18s   172.16.0.23   192.168.0.94 

                Operator Values

                You can use the operator field to set the logical relationship of the usage rule. The value of operator can be:

                -
                • In: The label of the affinity or anti-affinity object is in the label value list (values field).
                • NotIn: The label of the affinity or anti-affinity object is not in the label value list (values field).
                • Exists: The affinity or anti-affinity object has a specified label name.
                • DoesNotExist: The affinity or anti-affinity object does not have the specified label name.
                • Gt: (available only for node affinity) The label value of the scheduled node is greater than the list value (string comparison).
                • Lt: (available only for node affinity) The label value of the scheduled node is less than the list value (string comparison).
                +
                • In: The label of the affinity or anti-affinity object is in the label value list (values field).
                • NotIn: The label of the affinity or anti-affinity object is not in the label value list (values field).
                • Exists: The affinity or anti-affinity object has a specified label name.
                • DoesNotExist: The affinity or anti-affinity object does not have the specified label name.
                • Gt: (available only for node affinity) The label value of the scheduled node is greater than the list value (string comparison).
                • Lt: (available only for node affinity) The label value of the scheduled node is less than the list value (string comparison).
                diff --git a/docs/cce/umn/cce_10_0240.html b/docs/cce/umn/cce_10_0240.html index ee231ea6..4ac572fa 100644 --- a/docs/cce/umn/cce_10_0240.html +++ b/docs/cce/umn/cce_10_0240.html @@ -1,7 +1,7 @@

                CCE Advanced HPA

                -

                CCE Advanced HPA (cce-hpa-controller) is a CCE-developed add-on, which can be used to flexibly scale in or out Deployments based on metrics such as CPU usage and memory usage.

                +

                cce-hpa-controller is a CCE-developed add-on, which can be used to flexibly scale in or out Deployments based on metrics such as CPU usage and memory usage.

                Main Functions

                • Scaling can be performed based on the percentage of the current number of pods.
                • The minimum scaling step can be set.
                • Different scaling operations can be performed based on the actual metric values.

                Constraints

                • This add-on can be installed only in clusters of v1.15 or later.
                • If the version is 1.2.11 or later, the add-ons that can provide metrics API must be installed.
                  • Kubernetes Metrics Server: provides basic resource usage metrics, such as container CPU and memory usage. It is supported by all cluster versions.
                  @@ -48,12 +48,12 @@

                Multi AZ

                • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
                • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
                • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.
                +
                • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
                • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
                • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.

                Node Affinity

                • Incompatibility: Node affinity is disabled for the add-on.
                • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
                • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
                • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

                  If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

                  +
                • Not configured: Node affinity is disabled for the add-on.
                • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
                • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
                • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

                  If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

                Table 3 cce-hpa-controller components

                Component

                +
                diff --git a/docs/cce/umn/cce_10_0245.html b/docs/cce/umn/cce_10_0245.html index 41b5e56f..b23b42f3 100644 --- a/docs/cce/umn/cce_10_0245.html +++ b/docs/cce/umn/cce_10_0245.html @@ -9,7 +9,7 @@

                Permission Design

                The following uses company X as an example.

                Generally, a company has multiple departments or projects, and each department has multiple members. Design how permissions are to be assigned to different groups and projects, and set a user name for each member to facilitate subsequent user group and permissions configuration.

                The following figure shows the organizational structure of a department in a company and the permissions to be assigned to each member:

                -

                +

                Director: David

                David is a department director of company X. To assign him all CCE permissions (both cluster and namespace permissions), create the cce-admin user group for David on the IAM console and assign the CCE Administrator role.

                CCE Administrator: This role has all CCE permissions. You do not need to assign other permissions.

                diff --git a/docs/cce/umn/cce_10_0247.html b/docs/cce/umn/cce_10_0247.html index f7885601..ce64c7d7 100644 --- a/docs/cce/umn/cce_10_0247.html +++ b/docs/cce/umn/cce_10_0247.html @@ -16,7 +16,7 @@ - diff --git a/docs/cce/umn/cce_10_0248.html b/docs/cce/umn/cce_10_0248.html index 19ef9ed8..22cca7e1 100644 --- a/docs/cce/umn/cce_10_0248.html +++ b/docs/cce/umn/cce_10_0248.html @@ -6,7 +6,7 @@ diff --git a/docs/cce/umn/cce_10_0249.html b/docs/cce/umn/cce_10_0249.html index 6a73ce2f..d997dfab 100644 --- a/docs/cce/umn/cce_10_0249.html +++ b/docs/cce/umn/cce_10_0249.html @@ -105,7 +105,7 @@ curl: (7) Failed to connect to 192.168.10.36 port 900: Connection refused
                # kubectl get svc nginx
                 NAME    TYPE           CLUSTER-IP      EXTERNAL-IP                   PORT(S)        AGE
                 nginx   LoadBalancer   10.247.76.156   123.**.**.**,192.168.0.133   80:32146/TCP   37s
                -
                When the value of externalTrafficPolicy is Local, the access failures in different container network models and service forwarding modes are as follows:
                • For a multi-pod workload, ensure that all pods are accessible. Otherwise, there is a possibility that the access to the workload fails.
                • CCE Turbo clusters using Cloud Native 2.0 networking do not support node-level service affinity.
                • The table lists only the scenarios where the access may fail. Other scenarios that are not listed in the table indicate that the access is normal.
                +
                When the value of externalTrafficPolicy is Local, the access failures in different container network models and service forwarding modes are as follows:
                • For a multi-pod workload, ensure that all pods are accessible. Otherwise, there is a possibility that the access to the workload fails.
                • In a CCE Turbo cluster that utilizes a Cloud Native 2.0 network model, node-level affinity is supported only when the Service backend is connected to a HostNetwork pod.
                • The table lists only the scenarios where the access may fail. Other scenarios that are not listed in the table indicate that the access is normal.
                Table 3 Add-on components

                Component

                Description

                Service Type Released on the Server

                @@ -234,7 +234,7 @@ spec: selector: app: nginx type: LoadBalancer -
              • Leveraging the pass-through feature of the Service, kube-proxy is bypassed when the ELB address is used for access. The ELB load balancer is accessed first, and then the workload. For details, see Enabling Passthrough Networking for LoadBalancer Services.
                • After passthrough networking is configured for a dedicated load balancer, containers on the node where the workload runs cannot be accessed through the Service.
                • Passthrough networking is not supported for clusters of v1.15 or earlier.
                • In IPVS network mode, the pass-through settings of Service connected to the same ELB must be the same.
                • If node-level (local) service affinity is used, kubernetes.io/elb.pass-through is automatically set to onlyLocal to enable pass-through.
                +
              • Leveraging the pass-through feature of the Service, kube-proxy is bypassed when the ELB address is used for access. The ELB load balancer is accessed first, and then the workload. For details, see Enabling Passthrough Networking for LoadBalancer Services.
                • After passthrough networking is configured for a dedicated load balancer, in a CCE standard cluster, pods that run on the same node as the workload and pods that run on the same node cannot be accessed through the LoadBalancer Service.
                • Passthrough networking is not supported for clusters of v1.15 or earlier.
                • In IPVS network mode, the pass-through settings of Service connected to the same ELB must be the same.
                • If node-level (local) service affinity is used, kubernetes.io/elb.pass-through is automatically set to onlyLocal to enable pass-through.
                apiVersion: v1 
                 kind: Service 
                diff --git a/docs/cce/umn/cce_10_0251.html b/docs/cce/umn/cce_10_0251.html
                index 9fc96a73..c4ecc832 100644
                --- a/docs/cce/umn/cce_10_0251.html
                +++ b/docs/cce/umn/cce_10_0251.html
                @@ -1,35 +1,54 @@
                 
                 
                -

                Creating an ELB Ingress on the Console

                -

                Prerequisites

                +

                Creating a LoadBalancer Ingress on the Console

                +

                Prerequisites

                -

                Constraints

                • It is recommended that other resources not use the load balancer automatically created by an ingress. Otherwise, the load balancer will be occupied when the ingress is deleted, resulting in residual resources.
                • After an ingress is created, upgrade and maintain the configuration of the selected load balancers on the CCE console. Do not modify the configuration on the ELB console. Otherwise, the ingress service may be abnormal.
                • The URL registered in an ingress forwarding policy must be the same as the URL used to access the backend Service. Otherwise, a 404 error will be returned.
                • In a cluster using the IPVS proxy mode, if the ingress and Service use the same ELB load balancer, the ingress cannot be accessed from the nodes and containers in the cluster because kube-proxy mounts the LoadBalancer Service address to the ipvs-0 bridge. This bridge intercepts the traffic of the load balancer connected to the ingress. Use different ELB load balancers for the ingress and Service.
                • Dedicated load balancers must be the application type (HTTP/HTTPS) supporting private networks (with a private IP).
                • If multiple ingresses are used to connect to the same ELB port in the same cluster, the listener configuration items (such as the certificate associated with the listener and the HTTP2 attribute of the listener) are subject to the configuration of the first ingress.
                +

                Constraints

                • It is recommended that other resources not use the load balancer automatically created by an ingress. Otherwise, the load balancer will be occupied when the ingress is deleted, resulting in residual resources.
                • After an ingress is created, upgrade and maintain the configuration of the selected load balancers on the CCE console. Do not modify the configuration on the ELB console. Otherwise, the ingress service may be abnormal.
                • The URL registered in an ingress forwarding policy must be the same as the URL used to access the backend Service. Otherwise, a 404 error will be returned.
                • In a cluster using the IPVS proxy mode, if the ingress and Service use the same ELB load balancer, the ingress cannot be accessed from the nodes and containers in the cluster because kube-proxy mounts the LoadBalancer Service address to the ipvs-0 bridge. This bridge intercepts the traffic of the load balancer connected to the ingress. Use different load balancers for the ingress and Service.
                • A dedicated load balancer must be of the application type (HTTP/HTTPS) type and support private networks (with a private IP).
                • If multiple ingresses access the same ELB port in a cluster, the listener configuration items (such as the certificate associated with the listener and the HTTP2 attribute of the listener) are subject to the configuration of the first ingress.
                -

                Adding an ELB Ingress

                This section uses an Nginx workload as an example to describe how to add an ELB ingress.

                -
                1. Log in to the CCE console and click the cluster name to access the cluster console.
                2. Choose Services & Ingresses in the navigation pane, click the Ingresses tab, and click Create Ingress in the upper right corner.
                3. Configure ingress parameters.

                  • Name: specifies a name of an ingress, for example, ingress-demo.
                  • Load Balancer

                    Select the load balancer to interconnect. Only load balancers in the same VPC as the cluster are supported. If no load balancer is available, click Create Load Balancer to create one on the ELB console.

                    -

                    Dedicated load balancers must support HTTP or HTTPS and the network type must support private networks.

                    -
                    The CCE console supports automatic creation of load balancers. Select Auto create from the drop-down list box and configure the following parameters:
                    • Instance Name: Enter a load balancer name.
                    • Public Access: If enabled, an EIP with 5 Mbit/s bandwidth will be created.
                    • Subnet, AZ, and Specifications (available only for dedicated load balancers): Configure the subnet, AZ, and specifications. Only dedicated load balancers of the application type (HTTP/HTTPS) can be automatically created.
                    +

                    Adding a LoadBalancer Ingress

                    This section uses an Nginx workload as an example to describe how to add a LoadBalancer ingress.

                    +
                    1. Log in to the CCE console and click the cluster name to access the cluster console.
                    2. Choose Services & Ingresses in the navigation pane, click the Ingresses tab, and click Create Ingress in the upper right corner.
                    3. Configure ingress parameters.

                      • Name: specifies a name of an ingress, for example, ingress-demo.
                      • Load Balancer: Select a load balancer type and creation mode.

                        A load balancer can be dedicated or shared. A dedicated load balancer must be of the application (HTTP/HTTPS) type and support private networks.

                        +
                        You can select Use existing or Auto create to obtain a load balancer. For details about the configuration of different creation modes, see Table 1. +
                        + + + + + + + + + + +
                        Table 1 Load balancer configurations

                        How to Create

                        +

                        Configuration

                        +

                        Use existing

                        +

                        Only the load balancers in the same VPC as the cluster can be selected. If no load balancer is available, click Create Load Balancer to create one on the ELB console.

                        +

                        Auto create

                        +
                        • Instance Name: Enter a load balancer name.
                        • Public Access: If enabled, an EIP with 5 Mbit/s bandwidth will be created.
                        • AZ: available only to dedicated load balancers. You can create load balancers in multiple AZs to improve service availability. You can deploy a load balancer in multiple AZs for high availability.
                        • Specifications (available only to dedicated load balancers)
                          • Fixed: applies to stable traffic, billed based on specifications.
                          +
                        +
                        -
                      • Listener: Ingress configures a listener for the load balancer, which listens to requests from the load balancer and distributes traffic. After the configuration is complete, a listener is created on the load balancer. The default listener name is k8s__<Protocol type>_<Port number>, for example, k8s_HTTP_80.
                        • External Protocol: HTTP and HTTPS are supported.
                        • External Port: Port number that is open to the ELB service address. The port number can be specified randomly.
                        • Certificate Source: TLS secret and ELB server certificate are supported.
                        • Server Certificate: When an HTTPS listener is created for a load balancer, bind a certificate to the load balancer to support encrypted authentication for HTTPS data transmission.
                          • TLS secret: For details about how to create a secret certificate, see Creating a Secret.
                          • ELB server certificate: Use the certificate created in the ELB service.
                          +
                    +
                  • Listener: An ingress configures a listener for the load balancer, which listens to requests from the load balancer and distributes traffic. After the configuration is complete, a listener is created on the load balancer. The default listener name is k8s__<Protocol type>_<Port number>, for example, k8s_HTTP_80.
                    • External Protocol: HTTP or HTTPS
                    • External Port: port number that is open to the ELB service address. The port number can be specified randomly.
                    • Certificate Source: TLS secret and ELB server certificate are supported.
                    • Server Certificate: When an HTTPS listener is created for a load balancer, bind a certificate to the load balancer to support encrypted authentication for HTTPS data transmission.
                      • TLS secret: For details about how to create a secret certificate, see Creating a Secret.
                      • ELB server certificate: Use the certificate created in the ELB service.

                      If there is already an HTTPS ingress for the chosen port on the load balancer, the certificate of the new HTTPS ingress must be the same as the certificate of the existing ingress. This means that a listener has only one certificate. If two certificates, each with a different ingress, are added to the same listener of the same load balancer, only the certificate added earliest takes effect on the load balancer.

                      -
                    • SNI: Server Name Indication (SNI) is an extended protocol of TLS. It allows multiple TLS-based access domain names to be provided for external systems using the same IP address and port. Different domain names can use different security certificates. After SNI is enabled, the client is allowed to submit the requested domain name when initiating a TLS handshake request. After receiving the TLS request, the load balancer searches for the certificate based on the domain name in the request. If the certificate corresponding to the domain name is found, the load balancer returns the certificate for authorization. Otherwise, the default certificate (server certificate) is returned for authorization.
                      • The SNI option is available only when HTTPS is selected.
                      -
                      • This function is supported only in clusters of v1.15.11 and later.
                      • Specify the domain name for the SNI certificate. Only one domain name can be specified for each certificate. Wildcard-domain certificates are supported.
                      +
                    • SNI: Server Name Indication (SNI) is an extended protocol of TLS. It allows multiple TLS-based access domain names to be provided for external systems using the same IP address and port. Different domain names can use different security certificates. After SNI is enabled, the client is allowed to submit the requested domain name when initiating a TLS handshake request. After receiving the TLS request, the load balancer searches for the certificate based on the domain name in the request. If the certificate corresponding to the domain name is found, the load balancer returns the certificate for authorization. Otherwise, the default certificate (server certificate) is returned for authorization.
                      • The SNI option is available only when HTTPS is used.
                      +
                      • This function is supported only in clusters of v1.15.11 and later.
                      • Only one domain name can be specified for each SNI certificate. Wildcard-domain certificates are supported.
                      • For ingresses connected to the same ELB port, do not configure SNIs with the same domain name but different certificates. Otherwise, the SNIs will be overwritten.
                    • Security Policy: combinations of different TLS versions and supported cipher suites available to HTTPS listeners.

                      For details about security policies, see ELB User Guide.

                      • Security Policy is available only when HTTPS is selected.
                      • This function is supported only in clusters of v1.17.9 and later.
                      -
                    • Backend Protocol:

                      When Listener uses HTTP protocols, only HTTP can be selected.

                      +
                    • Backend Protocol:

                      When the listener is HTTP-compliant, only HTTP can be selected.

                      If it is an HTTPS listener, this parameter can be set to HTTP or HTTPS.

                    -
                  • Forwarding Policy: When the access address of a request matches the forwarding policy (a forwarding policy consists of a domain name and URL, for example, 10.117.117.117:80/helloworld), the request is forwarded to the corresponding target Service for processing. You can click to add multiple forwarding policies.
                    • Domain Name: actual domain name. Ensure that the domain name has been registered and archived. Once a domain name rule is configured, you must use the domain name for access.
                    • URL Matching Rule
                      • Prefix match: If the URL is set to /healthz, the URL that meets the prefix can be accessed, for example, /healthz/v1 and /healthz/v2.
                      • Exact match: The URL can be accessed only when it is fully matched. For example, if the URL is set to /healthz, only /healthz can be accessed.
                      • RegEX match: The URL is matched based on the regular expression. For example, if the regular expression is /[A-Za-z0-9_.-]+/test, all URLs that comply with this rule can be accessed, for example, /abcA9/test and /v1-Ab/test. Two regular expression standards are supported: POSIX and Perl.
                      +
                    • Forwarding Policy: When the access address of a request matches the forwarding policy (a forwarding policy consists of a domain name and URL, for example, 10.117.117.117:80/helloworld), the request is forwarded to the corresponding target Service for processing. You can click to add multiple forwarding policies.
                      • Domain Name: actual domain name. Ensure that the domain name has been registered and archived. Once a domain name rule is configured, you must use the domain name for access.
                      • URL Matching Rule
                        • Prefix match: If the URL is set to /healthz, the URL that meets the prefix can be accessed, for example, /healthz/v1 and /healthz/v2.
                        • Exact match: The URL can be accessed only when it is fully matched. For example, if the URL is set to /healthz, only /healthz can be accessed.
                        • RegEX match: The URL is matched based on the regular expression. For example, if the regular expression is /[A-Za-z0-9_.-]+/test, all URLs that comply with this rule can be accessed, for example, /abcA9/test and /v1-Ab/test. Two regular expression standards are supported: POSIX and Perl.
                      • URL: access path to be registered, for example, /healthz.

                        The access path added here must exist in the backend application. Otherwise, the forwarding fails.

                        For example, the default access URL of the Nginx application is /usr/share/nginx/html. When adding /test to the ingress forwarding policy, ensure the access URL of your Nginx application contains /usr/share/nginx/html/test. Otherwise, error 404 will be returned.

                      • Destination Service: Select an existing Service or create a Service. Services that do not meet search criteria are automatically filtered out.
                      • Destination Service Port: Select the access port of the destination Service.
                      • Set ELB:
                        • Algorithm: Three algorithms are available: weighted round robin, weighted least connections algorithm, or source IP hash.
                          • Weighted round robin: Requests are forwarded to different servers based on their weights, which indicate server processing performance. Backend servers with higher weights receive proportionately more requests, whereas equal-weighted servers receive the same number of requests. This algorithm is often used for short connections, such as HTTP services.
                          • Weighted least connections: In addition to the weight assigned to each server, the number of connections processed by each backend server is considered. Requests are forwarded to the server with the lowest connections-to-weight ratio. Building on least connections, the weighted least connections algorithm assigns a weight to each server based on their processing capability. This algorithm is often used for persistent connections, such as database connections.
                          • Source IP hash: The source IP address of each request is calculated using the hash algorithm to obtain a unique hash key, and all backend servers are numbered. The generated key allocates the client to a particular server. This enables requests from different clients to be distributed in load balancing mode and ensures that requests from the same client are forwarded to the same server. This algorithm applies to TCP connections without cookies.
                          -
                        • Sticky Session: This function is disabled by default. Options are as follows:
                          • Load balancer cookie: Enter the Stickiness Duration , which ranges from 1 to 1,440 minutes.
                          • Application cookie: This parameter is available only for shared load balancers. In addition, enter Cookie Name, which ranges from 1 to 64 characters.
                          -

                          When the distribution policy uses the source IP hash, sticky session cannot be set.

                          +
                        • Sticky Session: This function is disabled by default. Options are as follows:
                          • Load balancer cookie: Enter the Stickiness Duration , which ranges from 1 to 1,440 minutes.
                          +
                          • When the distribution policy uses the source IP hash, sticky session cannot be set.
                          • Dedicated load balancers in the clusters of a version earlier than v1.21 do not support sticky sessions. If sticky sessions are required, use shared load balancers.
                        • Health Check: Set the health check configuration of the load balancer. If this function is enabled, the following configurations are supported:
                          - - @@ -189,7 +193,7 @@ spec: - @@ -197,7 +201,7 @@ spec: - + + + + + - @@ -391,6 +407,7 @@ spec: @@ -401,6 +418,7 @@ spec: @@ -418,6 +436,16 @@ spec: ] + + + + +

                          Parameter

                          @@ -40,8 +59,8 @@

                          Protocol

                          When the protocol of the target Service port is TCP, TCP and HTTP protocols are supported.

                          -
                          • Check Path (supported by HTTP for health check): specifies the health check URL. The check path must start with a slash (/) and contain 1 to 80 characters.
                          +

                          When the protocol of the target Service port is TCP, more protocols including HTTP are supported.

                          +
                          • Check Path (supported only by HTTP for health check): specifies the health check URL. The check path must start with a slash (/) and contain 1 to 80 characters.

                          Port

                          @@ -70,18 +89,18 @@
                        • Operation: Click Delete to delete the configuration.
                        • -
                        • Annotation: Ingresses provide some advanced CCE functions, which are implemented by annotations. When you use kubectl to create a container, annotations will be used. For details, see Creating an Ingress - Automatically Creating a Load Balancer and Creating an Ingress - Interconnecting with an Existing Load Balancer.
                        • -

                        • Click OK. After the ingress is created, it is displayed in the ingress list.

                          On the ELB console, you can view the ELB automatically created through CCE. The default name is cce-lb-ingress.UID. Click the ELB name to access its details page. On the Listeners tab, view the route settings of the ingress, including the URL, listener port, and backend server group port.

                          +
                        • Annotation: Ingresses provide some advanced CCE functions, which are implemented by annotations. When you use kubectl to create a container, annotations will be used. For details, see Creating an Ingress - Automatically Creating a Load Balancer or Creating an Ingress - Interconnecting with an Existing Load Balancer.
                        • +

                        • After the configuration is complete, click OK. After the ingress is created, it is displayed in the ingress list.

                          On the ELB console, you can view the ELB automatically created through CCE. The default name is cce-lb-ingress.UID. Click the ELB name to access its details page. On the Listeners tab page, view the route settings of the ingress, including the URL, listener port, and backend server group port.

                          After an ingress is created, upgrade and maintain the selected load balancer on the CCE console. Do not modify the configuration on the ELB console. Otherwise, the ingress service may be abnormal.

                          -

                        • Access the /healthz interface of the workload, for example, workload defaultbackend.

                          1. Obtain the access address of the /healthz interface of the workload. The access address consists of the load balancer IP address, external port, and mapping URL, for example, 10.**.**.**:80/healthz.
                          2. Enter the URL of the /healthz interface, for example, http://10.**.**.**:80/healthz, in the address box of the browser to access the workload, as shown in Figure 1.
                            Figure 1 Accessing the /healthz interface of defaultbackend
                            +

                          3. Access the /healthz interface of the workload, for example, workload defaultbackend.

                            1. Obtain the access address of the /healthz interface of the workload. The access address consists of the load balancer IP address, external port, and mapping URL, for example, 10.**.**.**:80/healthz.
                            2. Enter the URL of the /healthz interface, for example, http://10.**.**.**:80/healthz, in the address box of the browser to access the workload, as shown in Figure 1.
                              Figure 1 Accessing the /healthz interface of defaultbackend

                          diff --git a/docs/cce/umn/cce_10_0252.html b/docs/cce/umn/cce_10_0252.html index e20cb622..4da8558f 100644 --- a/docs/cce/umn/cce_10_0252.html +++ b/docs/cce/umn/cce_10_0252.html @@ -1,16 +1,16 @@ -

                          Using kubectl to Create an ELB Ingress

                          -

                          Scenario

                          This section uses an Nginx workload as an example to describe how to create an ELB ingress using kubectl.

                          +

                          Using kubectl to Create a LoadBalancer Ingress

                          +

                          Scenario

                          This section uses an Nginx workload as an example to describe how to create a LoadBalancer ingress using kubectl.

                          -

                          Prerequisites

                          +

                          Prerequisites

                          Ingress Description of networking.k8s.io/v1

                          In CCE clusters of v1.23 or later, the ingress version is switched to networking.k8s.io/v1.

                          Compared with v1beta1, v1 has the following differences in parameters:

                          • The ingress type is changed from kubernetes.io/ingress.class in annotations to spec.ingressClassName.
                          • The format of backend is changed.
                          • The pathType parameter must be specified for each path. The options are as follows:
                            • ImplementationSpecific: The matching method depends on Ingress Controller. The matching method defined by ingress.beta.kubernetes.io/url-match-mode is used in CCE, which is the same as v1beta1.
                            • Exact: exact matching of the URL, which is case-sensitive.
                            • Prefix: matching based on the URL prefix separated by a slash (/). The match is case-sensitive, and elements in the path are matched one by one. A path element refers to a list of labels in the path separated by a slash (/).
                          -

                          +

                          Creating an Ingress - Automatically Creating a Load Balancer

                          The following describes how to run the kubectl command to automatically create a load balancer when creating an ingress.

                          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                          2. Create a YAML file named ingress-test.yaml. The file name can be customized.

                            vi ingress-test.yaml

                            @@ -30,6 +30,8 @@ metadata: "bandwidth_chargemode":"traffic", "bandwidth_size":5, "bandwidth_sharetype":"PER", + "vip_subnet_cidr_id": "*****", + "vip_address": "**.**.**.**", "eip_type":"5_bgp" }' spec: @@ -46,7 +48,7 @@ spec: property: ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH pathType: ImplementationSpecific - ingressClassName: cce # ELB ingress is used. + ingressClassName: cce # A LoadBalancer ingress is used.
                          Example of a shared load balancer (public network access) for clusters of v1.21 or earlier:
                          apiVersion: networking.k8s.io/v1beta1
                           kind: Ingress 
                          @@ -54,13 +56,13 @@ metadata:
                             name: ingress-test
                             annotations: 
                               kubernetes.io/elb.class: union
                          -    kubernetes.io/ingress.class: cce    # ELB ingress is used.
                          +    kubernetes.io/ingress.class: cce    # A LoadBalancer ingress is used.
                               kubernetes.io/elb.port: '80'
                               kubernetes.io/elb.autocreate: 
                                 '{
                                     "type":"public",
                                     "bandwidth_name":"cce-bandwidth-******",
                          -          "bandwidth_chargemode":"traffic",
                          +          "bandwidth_chargemode":"traffic",
                                     "bandwidth_size":5,
                                     "bandwidth_sharetype":"PER",
                                     "eip_type":"5_bgp"
                          @@ -89,10 +91,12 @@ metadata:
                                 '{
                                     "type": "public",
                                     "bandwidth_name": "cce-bandwidth-******",
                          -          "bandwidth_chargemode": "traffic",
                          +          "bandwidth_chargemode": "traffic",
                                     "bandwidth_size": 5,
                                     "bandwidth_sharetype": "PER",
                                     "eip_type": "5_bgp",
                          +          "vip_subnet_cidr_id": "*****",
                          +          "vip_address": "**.**.**.**",
                                     "elb_virsubnet_ids":["*****"],
                                     "available_zone": [
                                         "eu-de-01"
                          @@ -128,7 +132,7 @@ metadata:
                                 '{
                                     "type": "public",
                                     "bandwidth_name": "cce-bandwidth-******",
                          -          "bandwidth_chargemode": "traffic",
                          +          "bandwidth_chargemode": "traffic",
                                     "bandwidth_size": 5,
                                     "bandwidth_sharetype": "PER",
                                     "eip_type": "5_bgp",
                          @@ -168,7 +172,7 @@ spec:
                           
                        • String

                          Select a proper load balancer type.

                          -
                          • union: shared load balancer
                          • performance: dedicated load balancer..
                          +
                          • union: shared load balancer
                          • performance: dedicated load balancer

                          kubernetes.io/ingress.class

                          @@ -178,7 +182,7 @@ spec:

                          String

                          cce: The self-developed ELB ingress is used.

                          +

                          cce: A proprietary LoadBalancer ingress is used.

                          This parameter is mandatory when an ingress is created by calling the API.

                          String

                          cce: The self-developed ELB ingress is used.

                          +

                          cce: A proprietary LoadBalancer ingress is used.

                          This parameter is mandatory when an ingress is created by calling the API.

                          Yes

                          Integer

                          +

                          String

                          This parameter indicates the external port registered with the address of the LoadBalancer Service.

                          Supported range: 1 to 65535

                          @@ -328,7 +332,7 @@ spec:

                          Bandwidth mode.

                          • traffic: billed by traffic
                          -

                          Default: traffic

                          +

                          Default: traffic

                          bandwidth_size

                          @@ -363,15 +367,26 @@ spec:

                          The specific type varies with regions. For details, see the EIP console.

                          vip_subnet_cidr_id

                          +

                          No

                          +

                          String

                          +

                          Subnet where a load balancer is located. The subnet must belong to the VPC where the cluster resides.

                          +

                          If this parameter is not specified, the ELB load balancer and the cluster are in the same subnet.

                          +

                          This field can be specified only for clusters of v1.21 or later.

                          +

                          vip_address

                          No

                          String

                          Specifies the private IP address of the load balancer. Only IPv4 addresses are supported.

                          +

                          Private IP address of the load balancer. Only IPv4 addresses are supported.

                          The IP address must be in the ELB CIDR block. If this parameter is not specified, an IP address will be automatically assigned from the ELB CIDR block.

                          -

                          This parameter is available only for clusters of v1.23.11-r0, v1.25.6-r0, v1.27.3-r0, or later versions.

                          +

                          This parameter is available only in clusters of v1.23.11-r0, v1.25.6-r0, v1.27.3-r0, or later versions.

                          available_zone

                          @@ -381,6 +396,7 @@ spec:

                          Array of strings

                          AZ where the load balancer is located.

                          +

                          You can obtain all supported AZs by getting the AZ list.

                          This parameter is available only for dedicated load balancers.

                          String

                          Flavor name of the layer-4 load balancer.

                          +

                          You can obtain all supported types by getting the flavor list.

                          This parameter is available only for dedicated load balancers.

                          String

                          Flavor name of the layer-7 load balancer.

                          +

                          You can obtain all supported types by getting the flavor list.

                          This parameter is available only for dedicated load balancers. The value of this parameter must be the same as that of l4_flavor_name, that is, both are elastic specifications or fixed specifications.

                          ipv6_vip_virsubnet_id

                          +

                          No

                          +

                          String

                          +

                          Specifies the ID of the IPv6 subnet where the load balancer resides. IPv6 must be enabled for the corresponding subnet. This parameter is mandatory only when the dual-stack clusters are used.

                          +

                          This parameter is available only for dedicated load balancers.

                          +
                          @@ -425,7 +453,7 @@ spec:

                          If information similar to the following is displayed, the ingress has been created.

                          ingress/ingress-test created

                          kubectl get ingress

                          -

                          If information similar to the following is displayed, the ingress has been created successfully and the workload is accessible.

                          +

                          If information similar to the following is displayed, the ingress has been created and the workload is accessible.

                          NAME             HOSTS     ADDRESS          PORTS   AGE
                           ingress-test     *         121.**.**.**     80      10s

                        • Enter http://121.**.**.**:80 in the address box of the browser to access the workload (for example, Nginx workload).

                          121.**.**.** indicates the IP address of the unified load balancer.

                          @@ -453,7 +481,7 @@ spec: service: name: <your_service_name> # Replace it with the name of your target Service. port: - number: 8080 # Replace 8080 with the port number of your target Service. + number: 8080 # Replace 8080 with the port number of your target Service. property: ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH pathType: ImplementationSpecific @@ -482,45 +510,45 @@ spec: property: ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
                -
                Table 3 Key parameters

                Parameter

                +
                - - - - - - - - - - - - - - - @@ -531,7 +559,7 @@ spec:
                diff --git a/docs/cce/umn/cce_10_0276.html b/docs/cce/umn/cce_10_0276.html index 0b0e5b16..8f5f10bb 100644 --- a/docs/cce/umn/cce_10_0276.html +++ b/docs/cce/umn/cce_10_0276.html @@ -2,7 +2,7 @@

                Performing Rolling Upgrade for Nodes

                Scenario

                In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.

                -
                Figure 1 Workload migration
                +
                Figure 1 Workload migration

                Constraints

                • The original node and the target node to which the workload is to be migrated must be in the same cluster.
                • The cluster must be of v1.13.10 or later.
                • The default node pool DefaultPool does not support this configuration.
                diff --git a/docs/cce/umn/cce_10_0277.html b/docs/cce/umn/cce_10_0277.html index ea557562..9a263b78 100644 --- a/docs/cce/umn/cce_10_0277.html +++ b/docs/cce/umn/cce_10_0277.html @@ -4,150 +4,195 @@

                CCE provides multiple types of add-ons to extend cluster functions and meet feature requirements. You can install add-ons as required.

                CCE uses Helm charts to deploy add-ons. To modify or upgrade an add-on, perform operations on the Add-ons page or use open add-on management APIs. Do not directly modify resources related to add-ons in the background. Otherwise, add-on exceptions or other unexpected problems may occur.

                - -
                Table 3 Key parameters

                Parameter

                Mandatory

                +

                Mandatory

                Type

                +

                Type

                Description

                +

                Description

                kubernetes.io/elb.id

                +

                kubernetes.io/elb.id

                Yes

                +

                Yes

                String

                +

                String

                ID of a load balancer. The value can contain 1 to 100 characters.

                +

                ID of a load balancer. The value can contain 1 to 100 characters.

                How to obtain:

                On the management console, click Service List, and choose Networking > Elastic Load Balance. Click the name of the target load balancer. On the Summary tab page, find and copy the ID.

                kubernetes.io/elb.ip

                +

                kubernetes.io/elb.ip

                No

                +

                No

                String

                +

                String

                Service address of a load balancer. The value can be the public IP address of a public network load balancer or the private IP address of a private network load balancer.

                +

                Service address of a load balancer. The value can be the public IP address of a public network load balancer or the private IP address of a private network load balancer.

                kubernetes.io/elb.class

                +

                kubernetes.io/elb.class

                Yes

                +

                Yes

                String

                +

                String

                Load balancer type.

                +

                Load balancer type.

                • union: shared load balancer
                • performance: dedicated load balancer, which can be used only in clusters of v1.17 and later.
                -
                NOTE:

                If an ELB Ingress accesses an existing dedicated load balancer, the dedicated load balancer must be of the application load balancing (HTTP/HTTPS) type.

                +
                NOTE:

                If a LoadBalancer ingress accesses an existing dedicated load balancer, the dedicated load balancer must be of the application load balancing (HTTP/HTTPS) type.

                Table 1 Add-on list

                Add-on Name

                +

                Scheduling and Elasticity Add-ons

                +
                - - - - - - - - - - - - - - - - - - - - - -

                Add-on Name

                Description

                +

                Description

                CoreDNS

                +

                Volcano Scheduler

                This add-on is a DNS server that provides domain name resolution for Kubernetes clusters through a chain add-on.

                +

                This add-on is a scheduler for general-purpose, high-performance computing such as job scheduling, heterogeneous chip management, and job running management, serving end users through computing frameworks for different industries such as AI, big data, gene sequencing, and rendering.

                CCE Container Storage (Everest)

                +

                CCE Cluster Autoscaler

                This add-on is a cloud native container storage system, which enables clusters of Kubernetes v1.15.6 or later to use cloud storage through the Container Storage Interface (CSI).

                +

                This add-on resizes a cluster based on pod scheduling status and resource usage.

                CCE Node Problem Detector

                +

                CCE Advanced HPA

                This add-on monitors abnormal events of cluster nodes and connects to a third-party monitoring platform. It is a daemon running on each node. It collects node issues from different daemons and reports them to the API server. It can run as a DaemonSet or a daemon.

                -

                CCE Cluster Autoscaler

                -

                This add-on resizes a cluster based on pod scheduling status and resource usage.

                -

                Kubernetes Metrics Server

                -

                This add-on is an aggregator for monitoring data of core cluster resources.

                -

                CCE Advanced HPA

                -

                This add-on is developed by CCE. It can be used to flexibly scale in or out Deployments based on metrics such as CPU usage and memory usage.

                -

                CCE AI Suite (NVIDIA GPU)

                -

                NVIDIA GPU is a device management add-on that supports GPUs in containers. It supports only NVIDIA drivers.

                -

                Volcano Scheduler

                -

                This add-on is a scheduler for general-purpose, high-performance computing such as job scheduling, heterogeneous chip management, and job running management, serving end users through computing frameworks for different industries such as AI, big data, gene sequencing, and rendering.

                +

                This add-on is developed by CCE. It can be used to flexibly scale in or out Deployments based on metrics such as CPU usage and memory usage.

                -

                Add-on Lifecycle

                An add-on lifecycle involves all the statuses of the add-on from installation to uninstallation.

                - -
                Table 2 Add-on statuses

                Status

                + +

                Cloud Native Observability Add-ons

                +
                - - - - - - - - - +

                Add-on Name

                Attribute

                -

                Description

                +

                Description

                Running

                +

                CCE Node Problem Detector

                Stable state

                -

                The add-on is running properly, all add-on instances are deployed properly, and the add-on can be used properly.

                +

                This add-on monitors abnormal events of cluster nodes and connects to a third-party monitoring platform. It is a daemon running on each node. It collects node issues from different daemons and reports them to the API server. It can run as a DaemonSet or a daemon.

                Partially ready

                +

                Kubernetes Metrics Server

                Stable state

                -

                The add-on is running properly, but some add-on instances are not properly deployed. In this state, the add-on functions may be unavailable.

                +

                This add-on is an aggregator for monitoring data of core cluster resources.

                Unavailable

                +
                +
                +
                +

                Cloud Native Heterogeneous Computing Add-ons

                +
                + + + + - - - +

                Add-on Name

                +

                Description

                +

                CCE AI Suite (NVIDIA GPU)

                Stable state

                -

                The add-on malfunctions, and all add-on instances are not properly deployed.

                +

                NVIDIA GPU is a device management add-on that supports GPUs in containers. It supports only NVIDIA drivers.

                Installing

                +
                +
                +
                +

                Container Network Add-ons

                +
                + + + + - - + +

                Add-on Name

                +

                Description

                +

                CoreDNS

                Intermediate state

                +

                CoreDNS is a DNS server that provides domain name resolution for Kubernetes clusters through a chain add-on.

                The add-on is being deployed.

                +
                +
                +
                +

                Container Storage Add-on

                +
                + + + + + + + +

                Add-on Name

                +

                Description

                +

                CCE Container Storage (Everest)

                +

                This add-on is a cloud native container storage system, which enables clusters of Kubernetes v1.15.6 or later to use cloud storage through the Container Storage Interface (CSI).

                +
                +
                +
                +

                Add-on Lifecycle

                An add-on lifecycle involves all the statuses of the add-on from installation to uninstallation.

                + +
                + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - @@ -157,54 +202,54 @@

                If the add-on status is unknown and the returned status.Reason is "don't install the addon in this cluster", the secret associated with the Helm release of the add-on in the cluster is typically deleted by mistake. In this case, uninstall the add-on and reinstall it with the same configurations.

                -

                Related Operations

                You can perform the operations listed in Table 3 on the Add-ons page. -
                Table 1 Add-on statuses

                Status

                +

                Attribute

                +

                Description

                +

                Running

                +

                Stable state

                +

                The add-on is running properly, all add-on instances are deployed properly, and the add-on can be used properly.

                +

                Partially ready

                +

                Stable state

                +

                The add-on is running properly, but some add-on instances are not properly deployed. In this state, the add-on functions may be unavailable.

                +

                Unavailable

                +

                Stable state

                +

                The add-on malfunctions, and all add-on instances are not properly deployed.

                +

                Installing

                +

                Intermediate state

                +

                The add-on is being deployed.

                If all instances cannot be scheduled due to incorrect add-on configuration or insufficient resources, the system sets the add-on status to Unavailable 10 minutes later.

                Installation failed

                +

                Installation failed

                Stable state

                +

                Stable state

                Install add-on failed. Uninstall it and try again.

                +

                Install add-on failed. Uninstall it and try again.

                Upgrading

                +

                Upgrading

                Intermediate state

                +

                Intermediate state

                The add-on is being upgraded.

                +

                The add-on is being upgraded.

                Upgrade failed

                +

                Upgrade failed

                Stable state

                +

                Stable state

                Upgrade add-on failed. Upgrade it again, or uninstall it and try again.

                +

                Upgrade add-on failed. Upgrade it again, or uninstall it and try again.

                Rolling back

                +

                Rolling back

                Intermediate state

                +

                Intermediate state

                The add-on is rolling back.

                +

                The add-on is rolling back.

                Rollback failed

                +

                Rollback failed

                Stable state

                +

                Stable state

                The add-on rollback failed. Retry the rollback, or uninstall it and try again.

                +

                The add-on rollback failed. Retry the rollback, or uninstall it and try again.

                Deleting

                +

                Deleting

                Intermediate state

                +

                Intermediate state

                The add-on is being deleted.

                +

                The add-on is being deleted.

                If this state stays for a long time, an exception occurred.

                Deletion failed

                +

                Deletion failed

                Stable state

                +

                Stable state

                Delete add-on failed. Try again.

                +

                Delete add-on failed. Try again.

                Unknown

                +

                Unknown

                Stable state

                +

                Stable state

                No add-on chart found.

                +

                No add-on chart found.

                Table 3 Related operations

                Operation

                +

                Related Operations

                You can perform the operations listed in Table 2 on the Add-ons page. +
                - - - - - - - - - - - - - - - - - diff --git a/docs/cce/umn/cce_10_0279.html b/docs/cce/umn/cce_10_0279.html index 5bd9ce96..cd768839 100644 --- a/docs/cce/umn/cce_10_0279.html +++ b/docs/cce/umn/cce_10_0279.html @@ -46,23 +46,23 @@

                Node scaling components are described as follows:

                -
                Table 2 Related operations

                Operation

                Description

                +

                Description

                Procedure

                +

                Procedure

                Install

                +

                Install

                Install a specified add-on.

                +

                Install a specified add-on.

                1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Add-ons.
                2. Click Install under the target add-on.

                  Each add-on has different configuration parameters. For details, see the corresponding chapter.

                  +
                1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Add-ons.
                2. Click Install under the target add-on.

                  Each add-on has different configuration parameters. For details, see the corresponding chapter.

                3. Click OK.

                Upgrade

                +

                Upgrade

                Upgrade an add-on to the new version.

                +

                Upgrade an add-on to the new version.

                1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Add-ons.
                2. If an add-on can be upgraded, the Upgrade button is displayed under it.

                  Click Upgrade. Each add-on has different configuration parameters. For details, see the corresponding chapter.

                  +
                1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Add-ons.
                2. If an add-on can be upgraded, the Upgrade button is displayed under it.

                  Click Upgrade. Each add-on has different configuration parameters. For details, see the corresponding chapter.

                3. Click OK.

                Edit

                +

                Edit

                Edit add-on parameters.

                +

                Edit add-on parameters.

                1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Add-ons.
                2. Click Edit under the target add-on.

                  Each add-on has different configuration parameters. For details, see the corresponding chapter.

                  +
                1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Add-ons.
                2. Click Edit under the target add-on.

                  Each add-on has different configuration parameters. For details, see the corresponding chapter.

                3. Click OK.

                Uninstall

                +

                Uninstall

                Uninstall an add-on from the cluster.

                +

                Uninstall an add-on from the cluster.

                1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Add-ons.
                2. Click Uninstall under the target add-on.
                3. In the displayed dialog box, click Yes.

                  This operation cannot be undone.

                  +
                1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Add-ons.
                2. Click Uninstall under the target add-on.
                3. In the displayed dialog box, click Yes.

                  This operation cannot be undone.

                Roll back

                +

                Roll back

                Roll back an add-on to the source version.

                +

                Roll back an add-on to the source version.

                NOTE:
                • This function is used to roll back an upgraded add-on to the source version, not to undo the editing of add-on parameters.
                • An add-on cannot be rolled back repeatedly.
                1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Add-ons.
                2. If an add-on can be rolled back, the Roll Back button is displayed under it.

                  Click Roll Back.

                  +
                1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Add-ons.
                2. If an add-on can be rolled back, the Roll Back button is displayed under it.

                  Click Roll Back.

                3. In the displayed dialog box, click Yes.
                Table 2 Node scaling components

                Component Name

                +
                - - - - - - - diff --git a/docs/cce/umn/cce_10_0282.html b/docs/cce/umn/cce_10_0282.html index 644084dd..219db235 100644 --- a/docs/cce/umn/cce_10_0282.html +++ b/docs/cce/umn/cce_10_0282.html @@ -1,7 +1,7 @@

                Container Tunnel Network

                -

                Container Tunnel Network Model

                The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch. Though at some costs of performance, packet encapsulation and tunnel transmission enable higher interoperability and compatibility with advanced features (such as network policy-based isolation) for most common scenarios.
                Figure 1 Container tunnel network
                +

                Container Tunnel Network Model

                The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch. Though at some costs of performance, packet encapsulation and tunnel transmission enable higher interoperability and compatibility with advanced features (such as network policy-based isolation) for most common scenarios.
                Figure 1 Container tunnel network

                Pod-to-pod communication

                • On the same node: Packets are directly forwarded via the OVS bridge on the node.
                • Across nodes: Packets are encapsulated in the OVS bridge and then forwarded to the peer node.
                @@ -15,7 +15,7 @@

                Container IP Address Management

                The container tunnel network allocates container IP addresses according to the following rules:

                • The container CIDR block is allocated separately, which is irrelevant to the node CIDR block.
                • IP addresses are allocated by node. One or more CIDR blocks with a fixed size (16 by default) are allocated to each node in a cluster from the container CIDR block.
                • When the IP addresses on a node are used up, you can apply for a new CIDR block.
                • The container CIDR block cyclically allocates CIDR blocks to new nodes or existing nodes in sequence.
                • Pods scheduled to a node are cyclically allocated IP addresses from one or more CIDR blocks allocated to the node.
                -
                Figure 2 IP address allocation of the container tunnel network
                +
                Figure 2 IP address allocation of the container tunnel network

                Maximum number of nodes that can be created in the cluster using the container tunnel network = Number of IP addresses in the container CIDR block / Size of the IP CIDR block allocated to the node by the container CIDR block at a time (16 by default)

                For example, if the container CIDR block is 172.16.0.0/16, the number of IP addresses is 65536. If 16 IP addresses are allocated to a node at a time, a maximum of 4096 (65536/16) nodes can be created in the cluster. This is an extreme case. If 4096 nodes are created, a maximum of 16 pods can be created for each node because only 16 IP CIDR block\s are allocated to each node. In addition, the number of nodes that can be created in a cluster also depends on the node network and cluster scale.

                diff --git a/docs/cce/umn/cce_10_0283.html b/docs/cce/umn/cce_10_0283.html index 8c4b43c8..5b74890f 100644 --- a/docs/cce/umn/cce_10_0283.html +++ b/docs/cce/umn/cce_10_0283.html @@ -1,7 +1,7 @@

                VPC Network

                -

                Model Definition

                The VPC network uses VPC routing to integrate with the underlying network. This network model is suitable for performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the VPC route quota. Each node is assigned a CIDR block of a fixed size. This networking model is free from tunnel encapsulation overhead and outperforms the container tunnel network model. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in a cluster can be directly accessed from ECSs in the same VPC outside the cluster.
                Figure 1 VPC network model
                +

                Model Definition

                The VPC network uses VPC routing to integrate with the underlying network. This network model is suitable for performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the VPC route quota. Each node is assigned a CIDR block of a fixed size. This networking model is free from tunnel encapsulation overhead and outperforms the container tunnel network model. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in a cluster can be directly accessed from ECSs in the same VPC outside the cluster.
                Figure 1 VPC network model

                Pod-to-pod communication

                • On the same node: Packets are directly forwarded through IPvlan.
                • Across nodes: Packets are forwarded to the default gateway through default routes, and then to the peer node via the VPC routes.
                @@ -15,7 +15,7 @@

                Container IP Address Management

                The VPC network allocates container IP addresses according to the following rules:

                • The container CIDR block is allocated separately.
                • IP addresses are allocated by node. One CIDR block with a fixed size (which is configurable) is allocated to each node in a cluster from the container CIDR block.
                • The container CIDR block cyclically allocates CIDR blocks to new nodes in sequence.
                • Pods scheduled to a node are cyclically allocated IP addresses from CIDR blocks allocated to the node.
                -
                Figure 2 IP address management of the VPC network
                +
                Figure 2 IP address management of the VPC network

                Maximum number of nodes that can be created in the cluster using the VPC network = Number of IP addresses in the container CIDR block /Number of IP addresses in the CIDR block allocated to the node by the container CIDR block

                For example, if the container CIDR block is 172.16.0.0/16, the number of IP addresses is 65536. The mask of the container CIDR block allocated to the node is 25. That is, the number of container IP addresses on each node is 128. Therefore, a maximum of 512 (65536/128) nodes can be created. In addition, the number of nodes that can be created in a cluster also depends on the node network and cluster scale.

                diff --git a/docs/cce/umn/cce_10_0284.html b/docs/cce/umn/cce_10_0284.html index 6286775d..c4bb178d 100644 --- a/docs/cce/umn/cce_10_0284.html +++ b/docs/cce/umn/cce_10_0284.html @@ -1,11 +1,13 @@

                Cloud Native 2.0 Network

                -

                Model Definition

                Developed by CCE, Cloud Native 2.0 network deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and elastic IPs (EIPs) are bound to deliver high performance.

                -
                Figure 1 Cloud Native Network 2.0
                +

                Model Definition

                Developed by CCE, Cloud Native 2.0 network deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and EIPs are bound to deliver high performance.

                +
                Figure 1 Cloud Native 2.0 network

                Pod-to-pod communication

                • Pods on BMS nodes use ENIs, whereas pods on ECS nodes use Sub-ENIs. Sub-ENIs are attached to ENIs through VLAN sub-interfaces.
                • On the same node: Packets are forwarded through the VPC ENI or sub-ENI.
                • Across nodes: Packets are forwarded through the VPC ENI or sub-ENI.
                +

                Constraints

                This network model is available only to CCE Turbo clusters.

                +

                Advantages and Disadvantages

                Advantages

                • As the container network directly uses VPC, it is easy to locate network problems and provide the highest performance.
                • External networks in a VPC can be directly connected to container IP addresses.
                • The load balancing, security group, and EIP capabilities provided by VPC can be directly used by pods.

                Disadvantages

                @@ -13,12 +15,105 @@

                Application Scenarios

                • High performance requirements and use of other VPC network capabilities: Cloud Native Network 2.0 directly uses VPC, which delivers almost the same performance as the VPC network. Therefore, it applies to scenarios that have high requirements on bandwidth and latency.
                • Large-scale networking: Cloud Native Network 2.0 supports a maximum of 2000 ECS nodes and 100,000 containers.
                +

                Container IP Address Management

                In the Cloud Native Network 2.0 model, ECS nodes use sub-ENIs.

                +
                • The IP address of the pod is directly allocated from the VPC subnet configured for the container network. You do not need to allocate an independent small network segment to the node.
                • To add an ECS node to a cluster, bind the ENI that carries the sub-ENI first. After the ENI is bound, you can bind the sub-ENI.
                • Number of ENIs bound to an ECS node: For clusters of v1.19.16-r40, v1.21.11-r0, v1.23.9-r0, v1.25.4-r0, v1.27.1-r0, and later versions, the value is 1. For clusters of earlier versions, the value is the maximum number of sub-ENIs that can be bound to the node divided by 64 (rounded up).
                • ENIs bound to an ECS node = Number of ENIs used to bear sub-ENIs + Number of sub-ENIs currently used by pods + Number of pre-bound sub-ENIs
                • When a pod is created, an available ENI is randomly allocated from the prebinding ENI pool of the node.
                • When the pod is deleted, the ENI is released back to the ENI pool of the node.
                • When a node is deleted, the ENIs are released back to the pool, and the sub-ENIs are deleted.
                +

                Cloud Native Network 2.0 supports dynamic ENI pre-binding policies. The following table lists the scenarios.

                + +
                Table 2 Node scaling components

                Component Name

                Component Description

                +

                Component Description

                Application Scenario

                +

                Application Scenario

                Reference

                +

                Reference

                CCE Cluster Autoscaler

                +

                CCE Cluster Autoscaler

                An open source Kubernetes component for horizontal scaling of nodes, which is optimized in terms of scheduling and auto scaling capabilities.

                +

                An open source Kubernetes component for horizontal scaling of nodes, which is optimized by CCE in scheduling, auto scaling, and costs.

                Online services, deep learning, and large-scale computing with limited resource budgets

                +

                Online services, deep learning, and large-scale computing with limited resource budgets

                Creating a Node Scaling Policy

                +

                Creating a Node Scaling Policy

                + + + + + + + + + + +
                Table 1 Comparison between ENI pre-binding policies

                Policy

                +

                Dynamic ENI Pre-binding Policy (Default)

                +

                Management policy

                +

                nic-minimum-target: minimum number of ENIs (pre-bound and unused + used) bound to a node.

                +

                nic-maximum-target: If the number of ENIs bound to a node exceeds the value of this parameter, the system does not proactively pre-bind ENIs.

                +

                nic-warm-target: minimum number of pre-bound ENIs on a node.

                +

                nic-max-above-warm-target: ENIs are unbound and reclaimed only when the number of idle ENIs minus the number of nic-warm-target is greater than the threshold.

                +

                Application scenario

                +

                Accelerates pod startup while improving IP resource utilization. This mode applies to scenarios where the number of IP addresses in the container network segment is insufficient.

                +
                +
                +

                CCE provides four parameters for the dynamic ENI pre-binding policy. Set these parameters properly.

                + +
                + + + + + + + + + + + + + + + + + + + + + + + + + + +
                Table 2 Parameters of the dynamic ENI pre-binding policy

                Parameter

                +

                Default Value

                +

                Description

                +

                Suggestion

                +

                nic-minimum-target

                +

                10

                +

                Minimum number of ENIs bound to a node. The value can be a number or a percentage.

                +
                • Value: The value must be a positive integer. For example, 10 indicates that at least 10 ENIs are bound to a node. If the ENI quota of a node is exceeded, the ENI quota is used.
                • Percentage: The value ranges from 1% to 100%. For example, 10%. If the ENI quota of a node is 128, at least 12 (rounded down) ENIs are bound to the node.
                +

                Set both nic-minimum-target and nic-maximum-target to the same value or percentage.

                +

                Set these parameters based on the number of pods.

                +

                nic-maximum-target

                +

                0

                +

                If the number of ENIs bound to a node exceeds the value of nic-maximum-target, the system does not proactively pre-bind ENIs.

                +

                If the value of this parameter is greater than or equal to the value of nic-minimum-target, the check on the maximum number of the pre-bound ENIs is enabled. Otherwise, the check is disabled. The value can be a number or a percentage.

                +
                • Value: The value must be a positive integer. For example, 0. The check on the maximum number of the pre-bound ENIs is disabled. If the ENI quota of a node is exceeded, the ENI quota is used.
                • Percentage: The value ranges from 1% to 100%. For example, 50%. If the ENI quota of a node is 128, the maximum number of the pre-bound ENI is 64 (rounded down).
                +

                Set both nic-minimum-target and nic-maximum-target to the same value or percentage.

                +

                Set these parameters based on the number of pods.

                +

                nic-warm-target

                +

                2

                +

                Minimum number of pre-bound ENIs on a node. The value must be a number.

                +

                When the value of nic-warm-target + the number of bound ENIs is greater than the value of nic-maximum-target, the system will pre-bind ENIs based on the difference between the value of nic-maximum-target and the number of bound ENIs.

                +

                Set this parameter to the number of pods that can be scaled out instantaneously within 10 seconds.

                +

                nic-max-above-warm-target

                +

                2

                +

                Only when the number of idle ENIs on a node minus the value of nic-warm-target is greater than the threshold, the pre-bound ENIs will be unbound and reclaimed. The value can only be a number.

                +
                • Setting a larger value of this parameter slows down the recycling of idle ENIs and accelerates pod startup. However, the IP address usage decreases, especially when IP addresses are insufficient. Therefore, exercise caution when increasing the value of this parameter.
                • Setting a smaller value of this parameter accelerates the recycling of idle ENIs and improves the IP address usage. However, when a large number of pods increase instantaneously, the startup of some pods slows down.
                +

                Set this parameter based on the difference between the number of pods that are frequently scaled on most nodes within minutes and the number of pods that are instantly scaled out on most nodes within 10 seconds.

                +
                +
                +

                The preceding parameters support global configuration at the cluster level and custom settings at the node pool level. The latter takes priority over the former.

                +
                +
                The container networking component maintains a scalable pre-bound ENI pool for each node. The component checks and calculates the number of pre-bound ENIs or idle ENIs every 10 seconds.
                • Number of pre-bound ENIs = min(nic-maximum-target – Number of bound ENIs, max(nic-minimum-target – Number of bound ENIs, nic-warm-target – Number of idle ENIs)
                • Number of ENIs to be unbound = min(Number of idle ENIs – nic-warm-target – nic-max-above-warm-target, Number of bound ENIs – nic-minimum-target)
                +
                +
                The number of pre-binding ENIs on the node remains in the following range:
                • Minimum number of ENIs to be pre-bound = min(max(nic-minimum-target – Number of bound ENIs, nic-warm-target), nic-maximum-target – Number of bound ENIs)
                • Maximum number of ENIs to be pre-bound = max(nic-warm-target + nic-max-above-warm-target, Number of bound ENIs – nic-minimum-target)
                +
                +

                When a pod is created, an idle ENI (the earliest unused one) is preferentially allocated from the pool. If no idle ENI is available, a newsub-ENI is bound to the pod.

                +

                When the pod is deleted, the corresponding ENI is released back to the pre-bound ENI pool of the node, enters a 2 minutes cooldown period, and can be bind to another pod. If the ENI is not bound to any pod within 2 minutes, it will be released.

                +

                Recommendation for CIDR Block Planning

                As described in Cluster Network Structure, network addresses in a cluster can be divided into three parts: node network, container network, and service network. When planning network addresses, consider the following aspects:

                • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs. All subnets (including those created from the secondary CIDR block) in the VPC where the cluster resides cannot conflict with the container and Service CIDR blocks.
                • Ensure that each CIDR block has sufficient IP addresses.
                  • The IP addresses in the node CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
                  • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses.

                In the Cloud Native Network 2.0 model, the container CIDR block and node CIDR block share the network addresses in a VPC. It is recommended that the container subnet and node subnet not use the same subnet. Otherwise, containers or nodes may fail to be created due to insufficient IP resources.

                In addition, a subnet can be added to the container CIDR block after a cluster is created to increase the number of available IP addresses. In this case, ensure that the added subnet does not conflict with other subnets in the container CIDR block.

                -
                Figure 2 Configuring CIDR blocks
                +
                Figure 2 Configuring CIDR blocks

                Example of Cloud Native Network 2.0 Access

                Create a CCE Turbo cluster, which contains three ECS nodes.

                Access the details page of one node. You can see that the node has one primary ENI and one extended ENI, and both of them are ENIs. The extended ENI belongs to the container CIDR block and is used to mount a sub-ENI to the pod.

                diff --git a/docs/cce/umn/cce_10_0285.html b/docs/cce/umn/cce_10_0285.html index ee5b8716..2dca2f3f 100644 --- a/docs/cce/umn/cce_10_0285.html +++ b/docs/cce/umn/cce_10_0285.html @@ -7,18 +7,18 @@
                • Group them in different clusters for different environments.

                  Resources cannot be shared among different clusters. In addition, services in different environments can access each other only through load balancing.

                • Group them in different namespaces for different environments.

                  Workloads in the same namespace can be mutually accessed by using the Service name. Cross-namespace access can be implemented by using the Service name or namespace name.

                  The following figure shows namespaces created for the development, joint debugging, and testing environments, respectively.

                  -
                  Figure 1 One namespace for one environment
                  +
                  Figure 1 One namespace for one environment
              • Isolating namespaces by application

                You are advised to use this method if a large number of workloads are deployed in the same environment. For example, in the following figure, different namespaces (APP1 and APP2) are created to logically manage workloads as different groups. Workloads in the same namespace access each other using the Service name, and workloads in different namespaces access each other using the Service name or namespace name.

                -
                Figure 2 Grouping workloads into different namespaces
                +
                Figure 2 Grouping workloads into different namespaces
              • Managing Namespace Labels

                1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Namespaces.
                2. Locate the row containing the target namespace and choose More > Manage Label in the Operation column.
                3. In the dialog box that is displayed, the existing labels of the namespace are displayed. Modify the labels as needed.

                  • Adding a label: Click the add icon, enter the key and value of the label to be added, and click OK.

                    For example, the key is project and the value is cicd, indicating that the namespace is used to deploy CICD.

                    -
                  • Deleting a label: Click next the label to be deleted and then OK.
                  +
                4. Deleting a label: Click next the label to be deleted and then OK.
                5. Switch to the Manage Label dialog box again and check the modified labels.

                Enabling Node Affinity in a Namespace

                After node affinity is enabled in a namespace, the workloads newly created in the namespace can be scheduled only to nodes with specific labels. For details, see PodNodeSelector.

                -
                1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Namespaces.
                2. Locate the target namespace and click in the Node Affinity column.
                3. In the displayed dialog box, select Enable and click OK.

                  After node affinity is enabled, new workloads in the current namespace will be scheduled only to nodes with specified labels. For example, in namespace test, the workloads in the namespace can be scheduled only to the node whose label key is kubelet.kubernetes.io/namespace and label value is test.

                  +
                  1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Namespaces.
                  2. Locate the target namespace and click in the Node Affinity column.
                  3. In the displayed dialog box, select Enable and click OK.

                    After node affinity is enabled, new workloads in the current namespace will be scheduled only to nodes with specified labels. For example, in namespace test, the workloads in the namespace can be scheduled only to the node whose label key is kubelet.kubernetes.io/namespace and label value is test.

                  4. You can add specified labels to a node in Labels and Taints on the Nodes page. For details, see Managing Node Labels.

                Deleting a Namespace

                If a namespace is deleted, all resources (such as workloads, jobs, and ConfigMaps) in this namespace will also be deleted. Exercise caution when deleting a namespace.

                diff --git a/docs/cce/umn/cce_10_0288.html b/docs/cce/umn/cce_10_0288.html index fdd31212..c9f1e86e 100644 --- a/docs/cce/umn/cce_10_0288.html +++ b/docs/cce/umn/cce_10_0288.html @@ -1,35 +1,37 @@ -

                Security Group Policies

                +

                Binding a Custom Security Group to a Workload

                In Cloud Native Network 2.0, pods use VPC ENIs or sub-ENIs for networking. You can directly bind security groups and EIPs to pods. To bind CCE pods with security groups, CCE provides a custom resource object named SecurityGroup. Using this resource object, you can customize security isolation for workloads.

                +

                The priority of the security group bound to pods using the security group policy is higher than that of the security group in the NetworkAttachmentDefinition.

                +

                Constraints

                • This function is supported for CCE Turbo clusters of v1.19 and later. Upgrade your CCE Turbo clusters if their versions are earlier than v1.19.
                • A workload can be bound to a maximum of five security groups.

                Using the Console

                1. Log in to the CCE console and click the cluster name to access the cluster console.
                2. In the navigation pane, choose Workloads. On the displayed page, click the desired workload name.
                3. Switch to the SecurityGroups tab and click Create.

                4. Set the parameters as described in Table 1.

                  -

                  Table 1 Configuration parameters

                  Parameter

                  +
                  - - - - - - - - @@ -54,68 +56,68 @@ spec: - id: 64566556-bd6f-48fb-b2c6-df8f44617953 - id: 5451f1b0-bd6f-48fb-b2c6-df8f44617953
                  Table 2 describes the parameters in the YAML file. -
                  Table 1 Configuration parameters

                  Parameter

                  Description

                  +

                  Description

                  Example

                  +

                  Example

                  Security Group Policy Name

                  +

                  Security Group Policy Name

                  Enter a security policy name.

                  +

                  Enter a security policy name.

                  Enter 1 to 63 characters. The value must start with a lowercase letter and cannot end with a hyphen (-). Only lowercase letters, digits, and hyphens (-) are allowed.

                  security-group

                  +

                  security-group

                  Associate Security Group

                  +

                  Associate Security Group

                  The selected security group will be bound to the ENI or supplementary ENI of the selected workload. A maximum of five security groups can be selected from the drop-down list. You must select one or multiple security groups to create a SecurityGroup.

                  +

                  The selected security group will be bound to the ENI or supplementary ENI of the selected workload. A maximum of five security groups can be selected from the drop-down list. You must select one or multiple security groups to create a SecurityGroup.

                  If no security group has not been created, click Create Security Group. After the security group is created, click the refresh button.

                  -
                  NOTICE:
                  • A maximum of five security groups can be selected.
                  • Hover the cursor on next to the security group name, and you can view details about the security group.
                  +
                  NOTICE:
                  • A maximum of five security groups can be selected.
                  • Hover the cursor on next to the security group name, and you can view details about the security group.

                  64566556-bd6f-48fb-b2c6-df8f44617953

                  +

                  64566556-bd6f-48fb-b2c6-df8f44617953

                  5451f1b0-bd6f-48fb-b2c6-df8f44617953

                  Table 2 Description

                  Field

                  +
                  - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/cce/umn/cce_10_0296.html b/docs/cce/umn/cce_10_0296.html index 8396e94d..d0c6a3a6 100644 --- a/docs/cce/umn/cce_10_0296.html +++ b/docs/cce/umn/cce_10_0296.html @@ -14,8 +14,8 @@ -

                  Cluster AutoScaler Architecture

                  Figure 1 shows the Cluster AutoScaler architecture and its core modules:

                  -
                  Figure 1 Cluster AutoScaler architecture
                  +

                  Cluster Autoscaler Architecture

                  Figure 1 shows the Cluster Autoscaler architecture and its core modules.

                  +
                  Figure 1 Cluster Autoscaler architecture

                  Description

                  • Estimator: Evaluates the number of nodes to be added to each node pool to host unschedulable pods.
                  • Simulator: Finds the nodes that meet the scale-in conditions in the scale-in scenario.
                  • Expander: Selects an optimal node from the node pool picked out by the Estimator based on the user-defined policy in the scale-out scenario. Currently, the Expander has the following policies:
                  Table 2 Description

                  Field

                  Description

                  +

                  Description

                  Mandatory

                  +

                  Mandatory

                  apiVersion

                  +

                  apiVersion

                  API version. The value is crd.yangtse.cni/v1.

                  +

                  API version. The value is crd.yangtse.cni/v1.

                  Yes

                  +

                  Yes

                  kind

                  +

                  kind

                  Type of the object to be created.

                  +

                  Type of the object to be created.

                  Yes

                  +

                  Yes

                  metadata

                  +

                  metadata

                  Metadata definition of the resource object.

                  +

                  Metadata definition of the resource object.

                  Yes

                  +

                  Yes

                  name

                  +

                  name

                  Name of the SecurityGroup.

                  +

                  Name of the SecurityGroup.

                  Yes

                  +

                  Yes

                  namespace

                  +

                  namespace

                  Name of the namespace.

                  +

                  Name of the namespace.

                  Yes

                  +

                  Yes

                  spec

                  +

                  spec

                  Detailed description of the SecurityGroup.

                  +

                  Detailed description of the SecurityGroup.

                  Yes

                  +

                  Yes

                  podSelector

                  +

                  podSelector

                  Used to define the workload to be associated with security groups in the SecurityGroup.

                  +

                  Used to define the workload to be associated with security groups in the SecurityGroup.

                  Yes

                  +

                  Yes

                  securityGroups

                  +

                  securityGroups

                  Security group ID.

                  +

                  Security group ID.

                  Yes

                  +

                  Yes

                  - - @@ -58,7 +58,7 @@

                  The formula for calculating the minimum waste score (wastedScore) is as follows:

                  • wastedCPU = (Total number of CPUs of the nodes to be scaled out – Total number of CPUs of the pods to be scheduled)/Total number of CPUs of the nodes to be scaled out
                  • wastedMemory = (Total memory size of nodes to be scaled out – Total memory size of pods to be scheduled)/Total memory size of nodes to be scaled out
                  • wastedScore = wastedCPU + wastedMemory
                  - @@ -69,7 +69,7 @@ - @@ -80,8 +80,8 @@ - diff --git a/docs/cce/umn/cce_10_0298.html b/docs/cce/umn/cce_10_0298.html index a210fa13..524a1575 100644 --- a/docs/cce/umn/cce_10_0298.html +++ b/docs/cce/umn/cce_10_0298.html @@ -5,9 +5,9 @@

                  Solution

                  Two major auto scaling policies are HPA (Horizontal Pod Autoscaling) and CA (Cluster AutoScaling). HPA is for workload auto scaling and CA is for node auto scaling.

                  HPA and CA work with each other. HPA requires sufficient cluster resources for successful scaling. When the cluster resources are insufficient, CA is needed to add nodes. If HPA reduces workloads, the cluster will have a large number of idle resources. In this case, CA needs to release nodes to avoid resource waste.

                  -
                  As shown in Figure 1, HPA performs scale-out based on the monitoring metrics. When cluster resources are insufficient, newly created pods are in Pending state. CA then checks these pending pods and selects the most appropriate node pool based on the configured scaling policy to scale out the node pool.
                  Figure 1 HPA and CA working flows
                  +
                  As shown in Figure 1, HPA performs scale-out based on the monitoring metrics. When cluster resources are insufficient, newly created pods are in Pending state. CA then checks these pending pods and selects the most appropriate node pool based on the configured scaling policy to scale out the node pool.
                  Figure 1 HPA and CA working flows

                  Using HPA and CA can easily implement auto scaling in most scenarios. In addition, the scaling process of nodes and pods can be easily observed.

                  This section uses an example to describe the auto scaling process using HPA and CA policies together.

                  @@ -29,7 +29,7 @@ RUN chmod a+rx index.php
                5. Run the following command to build an image named hpa-example with the tag latest.
                  docker build -t hpa-example:latest .
                6. (Optional) Log in to the SWR console, choose Organizations in the navigation pane, and click Create Organization in the upper right corner to create an organization.

                  Skip this step if you already have an organization.

                  -
                7. In the navigation pane, choose My Images and then click Upload Through Client. On the page displayed, click Generate a temporary login command and click to copy the command.
                8. Run the login command copied in the previous step on the cluster node. If the login is successful, the message "Login Succeeded" is displayed.
                9. Tag the hpa-example image.

                  docker tag {Image name 1:Tag 1}/{Image repository address}/{Organization name}/{Image name 2:Tag 2}

                  +
                10. In the navigation pane, choose My Images and then click Upload Through Client. On the page displayed, click Generate a temporary login command and click to copy the command.
                11. Run the login command copied in the previous step on the cluster node. If the login is successful, the message "Login Succeeded" is displayed.
                12. Tag the hpa-example image.

                  docker tag {Image name 1:Tag 1}/{Image repository address}/{Organization name}/{Image name 2:Tag 2}

                  • {Image name 1:Tag 1}: name and tag of the local image to be uploaded.
                  • {Image repository address}: the domain name at the end of the login command in login command. It can be obtained on the SWR console.
                  • {Organization name}: name of the created organization.
                  • {Image name 2:Tag 2}: desired image name and tag to be displayed on the SWR console.

                  The following is an example:

                  docker tag hpa-example:latest swr.eu-de.otc.t-systems.com/group/hpa-example:latest

                  @@ -119,7 +119,7 @@ spec: averageUtilization: 50

                  Configure the parameters as follows if you are using the console.

                  -

                  +

                13. Observing the Auto Scaling Process

                  1. Check the cluster node status. In the following example, there are two nodes.

                    # kubectl get node
                     NAME            STATUS   ROLES    AGE     VERSION
                    diff --git a/docs/cce/umn/cce_10_0301.html b/docs/cce/umn/cce_10_0301.html
                    deleted file mode 100644
                    index 835be47a..00000000
                    --- a/docs/cce/umn/cce_10_0301.html
                    +++ /dev/null
                    @@ -1,74 +0,0 @@
                    -
                    -
                    -

                    Performing an In-place Upgrade

                    -

                    You can upgrade your clusters to a newer version on the CCE console.

                    -

                    Before the upgrade, learn about the target version to which each CCE cluster can be upgraded in what ways, and the upgrade impacts. For details, see Upgrade Overview and Before You Start.

                    -

                    Description

                    • An in-place upgrade updates the Kubernetes components on cluster nodes, without changing their OS version.
                    • Data plane nodes are upgraded in batches. By default, they are prioritized based on their CPU, memory, and PodDisruptionBudgets (PDBs). You can also set the priorities according to your service requirements.
                    -
                    -

                    Precautions

                    • During the cluster upgrade, the system will automatically upgrade add-ons to a version compatible with the target cluster version. Do not uninstall or reinstall add-ons during the cluster upgrade.
                    • Before the upgrade, ensure that all add-ons are running. If an add-on fails to be upgraded, rectify the fault and try again.
                    • During the upgrade, CCE checks the add-on running status. Some add-ons (such as CoreDNS) require at least two nodes to run properly. In this case, at least two nodes must be available for the upgrade.
                    -

                    For more information, see Before You Start.

                    -
                    -

                    Procedure

                    The cluster upgrade goes through check, backup, configuration and upgrade, and verification.

                    -
                    1. Log in to the CCE console and click the cluster name to access the cluster console.
                    2. In the navigation pane, choose Cluster Upgrade.
                    3. CCE automatically provides you with an optimal upgrade path based on the current cluster version. Select the target cluster version, check information such as version differences, node OS version, and add-on versions, and click Check.
                    4. Perform the pre-upgrade check. Click Start Check and confirm the check. If there are abnormal or risky items in the cluster, handle the exceptions based on the check results displayed on the page and check again.

                      • Exceptions: View the solution displayed on the page, handle the exceptions and check again.
                      • Risk Items: may affect the cluster upgrade. Check the risk description and see whether you may be impacted. If no risk exists, click OK next to the risk item to manually skip this risk item and check again.
                      -

                      After the check is passed, click Next.

                      -

                    5. Back up the cluster. During the cluster upgrade, CCE automatically backs up etcd data. You can manually back up master nodes to speed up the rollback if the master nodes fail to upgrade. If manual backup is not required, click Next.

                      -

                  Table 1 Expander policies supported by CCE

                  Policy

                  @@ -34,8 +34,8 @@

                  This policy is typically used as a basic backup for other complex policies. Only use this policy if the other policies cannot be used.

                  Assume that auto scaling is enabled for node pools 1 and 2 in the cluster and the scale-out upper limit is not reached. The policy for scaling out the number of replicas for a workload is as follows:

                  -
                  1. Pending pods trigger the Autoscaler to determine the scale-out process.
                  2. Autoscaler simulates the scheduling phase and evaluates that the pending pods can be scheduled to the added nodes in both node pools 1 and 2.
                  3. Autoscaler randomly selects node pool 1 or node pool 2 for scale-out.
                  +

                  Assume that auto scaling is enabled for node pools 1 and 2 in the cluster and the scale-out upper limit is not reached. The policy for scaling out the number of pods for a workload is as follows:

                  +
                  1. Pending pods trigger the Autoscaler to determine the scale-out process.
                  2. Autoscaler simulates the scheduling phase and evaluates that some pending pods can be scheduled to the added nodes in both node pools 1 and 2.
                  3. Autoscaler randomly selects node pool 1 or node pool 2 for scale-out.

                  most-pods

                  @@ -45,7 +45,7 @@

                  This policy is based on the maximum number of pods that can be scheduled.

                  Assume that auto scaling is enabled for node pools 1 and 2 in the cluster and the scale-out upper limit is not reached. The policy for scaling out the number of replicas for a workload is as follows:

                  +

                  Assume that auto scaling is enabled for node pools 1 and 2 in the cluster and the scale-out upper limit is not reached. The policy for scaling out the number of pods for a workload is as follows:

                  1. Pending pods trigger the Autoscaler to determine the scale-out process.
                  2. Autoscaler simulates the scheduling phase and evaluates that some pending pods can be scheduled to the added nodes in both node pools 1 and 2.
                  3. Autoscaler evaluates that node pool 1 can schedule 20 new pods and node pool 2 can schedule only 10 new pods after scale-out. Therefore, Autoscaler selects node pool 1 for scale-out.

                  Assume that auto scaling is enabled for node pools 1 and 2 in the cluster and the scale-out upper limit is not reached. The policy for scaling out the number of replicas for a workload is as follows:

                  +

                  Assume that auto scaling is enabled for node pools 1 and 2 in the cluster and the scale-out upper limit is not reached. The policy for scaling out the number of pods for a workload is as follows:

                  1. Pending pods trigger the Autoscaler to determine the scale-out process.
                  2. Autoscaler simulates the scheduling phase and evaluates that some pending pods can be scheduled to the added nodes in both node pools 1 and 2.
                  3. Autoscaler evaluates that the minimum waste score of node pool 1 after scale-out is smaller than that of node pool 2. Therefore, Autoscaler selects node pool 1 for scale-out.

                  This policy allows you to configure and manage the priorities of node pools or scaling groups through the console or API, while the least-waste policy can reduce the resource waste ratio in common scenarios. This policy has wider applicability and is used as the default selection policy.

                  Assume that auto scaling is enabled for node pools 1 and 2 in the cluster and the scale-out upper limit is not reached. The policy for scaling out the number of replicas for a workload is as follows:

                  +

                  Assume that auto scaling is enabled for node pools 1 and 2 in the cluster and the scale-out upper limit is not reached. The policy for scaling out the number of pods for a workload is as follows:

                  1. Pending pods trigger the Autoscaler to determine the scale-out process.
                  2. Autoscaler simulates the scheduling phase and evaluates that some pending pods can be scheduled to the added nodes in both node pools 1 and 2.
                  3. Autoscaler evaluates that node pool 1 has a higher priority than node pool 2. Therefore, Autoscaler selects node pool 1 for scale-out.

                  This policy is used for rescheduling global resources for pods or nodes (instead of only adding nodes) to reduce the overall resource fragmentation rate of the cluster. Use this policy only in rescheduling scenarios.

                  Assume that auto scaling is enabled for node pools 1 and 2 in the cluster and the scale-out upper limit is not reached. The policy for scaling out the number of replicas for a workload is as follows:

                  -
                  1. Pending pods trigger the Autoscaler to determine the scale-out process.
                  2. Autoscaler simulates the scheduling phase and evaluates that some pending pods can be scheduled to the added nodes in both node pools 1 and 2.
                  3. Autoscaler determines a preferentially selected node pool and evaluates that the CPU/memory ratio of pods is 1:4. The node flavor in node pool 1 is 2 vCPUs and 8 GiB of memory (the CPU/memory ratio is 1:4), and the node flavor in node pool 2 is vCPUs and 4 GiB of memory (the CPU/memory ratio is 1:2). Therefore, node pool 1 is preferred for this scale-out.
                  +

                  Assume that auto scaling is enabled for node pools 1 and 2 in the cluster and the scale-out upper limit is not reached. The policy for scaling out the number of pods for a workload is as follows:

                  +
                  1. Pending pods trigger the Autoscaler to determine the scale-out process.
                  2. Autoscaler simulates the scheduling phase and evaluates that some pending pods can be scheduled to the added nodes in both node pools 1 and 2.
                  3. Autoscaler determines a preferentially selected node pool and evaluates that the CPU/memory ratio of pods is 1:4. The node flavor in node pool 1 is 2 vCPUs and 8 GiB of memory (the CPU/memory ratio is 1:4), and the node flavor in node pool 2 is 2 vCPUs and 4 GiB of memory (the CPU/memory ratio is 1:2). Therefore, node pool 1 is preferred for this scale-out.
                  - - - - - - - - - - - - - - - - - - - - - - -

                  Backup Type

                  -

                  Backup Object

                  -

                  Backup Mode

                  -

                  Backup Time

                  -

                  Rollback Time

                  -

                  Description

                  -

                  etcd data backup

                  -

                  etcd data

                  -

                  Automatic backup during the upgrade

                  -

                  1-5 minutes

                  -

                  2 hours

                  -

                  Mandatory. The backup is automatically performed during the upgrade.

                  -

                  CBR cloud server backup

                  -

                  Master node disks, including component images, configurations, logs, and etcd data

                  -

                  One-click backup on web pages (manually triggered)

                  -

                  20 minutes to 2 hours (based on the cloud backup tasks in the current region)

                  -

                  20 minutes

                  -

                  This function is gradually replaced by EVS snapshot backup.

                  -
                  -
                  -

                14. Configure the upgrade parameters.

                  • Add-on Upgrade Configuration: Add-ons that have been installed in your cluster are listed. During the cluster upgrade, CCE automatically upgrades the selected add-ons to be compatible with the target cluster version. You can click Set to re-define the add-on parameters.

                    If an add-on is marked with on its right side, the add-on cannot be compatible with both the source and target versions of the cluster upgrade. In this case, CCE will upgrade the add-on after the cluster upgrade. The add-on may be unavailable during the cluster upgrade.

                    -
                    -
                  • Node Upgrade Configuration: You can set the maximum number of nodes to be upgraded in a batch.

                    Node pools will be upgraded in sequence. Nodes in the same node pool will be upgraded in batches. One node is upgraded in the first batch, two nodes in the second batch, and the number of nodes to be upgraded in each subsequent batch increases by a power of 2 until the maximum number of nodes to be upgraded in each batch is reached.

                    -
                  • Node Priority: You can set priorities for nodes to be upgraded. If you do not set this parameter, the system will determine the nodes to upgrade in batches based on specific conditions. Before setting the node upgrade priority, select a node pool. Nodes and node pools will be upgraded according to the priorities you specify.
                    • Add Upgrade Priority: Add upgrade priorities for node pools.
                    • Add Node Priority: After adding a node pool priority, you can set the upgrade sequence of nodes in the node pool. The system upgrades nodes in the sequence you specify. If you skip this setting, the system upgrades nodes based on the default policy.
                    -
                  -

                15. After the configuration is complete, click Upgrade and confirm the upgrade. The cluster starts to be upgraded. You can view the process in the lower part of the page.

                  If an upgrade failure message is displayed during the cluster upgrade, rectify the fault as prompted and try again.

                  -
                  -

                16. After the upgrade is complete, click Next. Verify the upgrade based on the displayed check items. After confirming that all check items are normal, click Complete and confirm that the post-upgrade check is complete. For details, see Performing Post-Upgrade Verification.

                  You can verify the cluster Kubernetes version on the Clusters page.

                  -

                17. - - -
                  - -
                  - diff --git a/docs/cce/umn/cce_10_0302.html b/docs/cce/umn/cce_10_0302.html index b274e760..27f01ecb 100644 --- a/docs/cce/umn/cce_10_0302.html +++ b/docs/cce/umn/cce_10_0302.html @@ -3,11 +3,12 @@

                  Before You Start

                  Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Upgrade Overview.

                  Precautions

                  Before upgrading a cluster, pay attention to the following points:

                  -
                  • Upgrading a cluster cannot be rolled back. Perform an upgrade at a proper time to minimize the impact on your services. To ensure data security, you back up your data before an upgrade.
                  • Before upgrading a cluster, ensure that no high-risk operations are performed in the cluster. Otherwise, the cluster upgrade may fail or the configuration may be lost after the upgrade. Common high-risk operations include modifying cluster node configurations locally and modifying the configurations of the listeners managed by CCE on the ELB console. Instead, modify configurations on the CCE console so that the modifications can be automatically inherited during the upgrade.
                  • Before upgrading a cluster, ensure the cluster is working properly.
                  • Before upgrading a cluster, learn about the features and differences of each cluster version in Kubernetes Release Notes to prevent exceptions due to the use of an incompatible cluster version. For example, check whether any APIs deprecated in the target version are used in the cluster. Otherwise, calling the APIs may fail after the upgrade. For details, see Deprecated APIs.
                  +
                  • Perform an upgrade during off-peak hours to minimize the impact on your services.
                  • Before upgrading a cluster, learn about the features and differences of each cluster version in Kubernetes Release Notes to prevent exceptions due to the use of an incompatible cluster version. For example, check whether any APIs deprecated in the target version are used in the cluster. Otherwise, calling the APIs may fail after the upgrade. For details, see Deprecated APIs.

                  During a cluster upgrade, pay attention to the following points that may affect your services:

                  -
                  • During a cluster upgrade, do not perform any operation on the cluster. Do not stop, restart, or delete nodes during cluster upgrade. Otherwise, the upgrade will fail.
                  • During a cluster upgrade, the running workloads will not be interrupted, but access to the API server will be temporarily interrupted.
                  • During a cluster upgrade, the node.kubernetes.io/upgrade taint (equivalent to NoSchedule) will be added to the nodes in the cluster. The taint will be removed after the cluster is upgraded. Do not add taints with the same key name on a node. Even if the taints have different effects, they may be deleted by the system by mistake after the upgrade.
                  +
                  • During a cluster upgrade, do not perform any operation on the cluster. Do not stop, restart, or delete nodes during cluster upgrade. Otherwise, the upgrade will fail.
                  • Before upgrading a cluster, ensure no high-risk operations are performed in the cluster. Otherwise, the cluster upgrade may fail or the configuration may be lost after the upgrade. Common high-risk operations include modifying cluster node configurations locally and modifying the configurations of the listeners managed by CCE on the ELB console. Instead, modify configurations on the CCE console so that the modifications can be automatically inherited during the upgrade.
                  • During a cluster upgrade, the running workloads will not be interrupted, but access to the API server will be temporarily interrupted.
                  • By default, application scheduling is not restricted during a cluster upgrade. During an upgrade of the following early cluster versions, the node.kubernetes.io/upgrade taint (equivalent to NoSchedule) will be added to the nodes in the cluster. The taint will be removed after the cluster is upgraded:
                    • All v1.15 clusters
                    • All v1.17 clusters
                    • v1.19 clusters with patch versions earlier than or equal to v1.19.16-r4
                    • v1.21 clusters with patch versions earlier than or equal to v1.21.7-r0
                    • v1.23 clusters with patch versions earlier than or equal to v1.23.5-r0
                    +
                  • Clusters of version 1.27 or later do not support nodes running EulerOS 2.5 or CentOS 7.7. If a node running EulerOS 2.5 or CentOS 7.7 is used, migrate the node OS to EulerOS release 2.9, Ubuntu 22.04, or HCE OS 2.0 before the cluster upgrade. For details, see Resetting a Node.
                  -

                  Constraints

                  • Clusters can be rolled back if an exception occurs during the cluster upgrade. A cluster cannot be rolled back if other operations are performed on it after its upgrade.
                  • If there are any nodes created using a private image, the cluster cannot be upgraded.
                  • After the cluster is upgraded, if the containerd vulnerability of the container engine is fixed in Kubernetes Release Notes, manually restart containerd for the upgrade to take effect. The same applies to the existing pods.
                  • If you mount the docker.sock file on a node to a pod using the hostPath mode, that is, the Docker in Docker scenario, Docker will restart during the upgrade, but the docker.sock file does not change. As a result, your services may malfunction. You are advised to mount the docker.sock file by mounting the directory.
                  • When clusters using the tunnel network model are upgraded to v1.19.16-r4, v1.21.7-r0, v1.23.5-r0, v1.25.1-r0, or later, the SNAT rule whose destination address is the container CIDR block but the source address is not the container CIDR block will be removed. If you have configured VPC routes to directly access all pods outside the cluster, only the pods on the corresponding nodes can be directly accessed after the upgrade.
                  • For more details, see Version Differences.
                  +

                  Constraints

                  • If an error occurred during a cluster upgrade, the cluster can be rolled back using the backup data. If you perform other operations (for example, modifying cluster specifications) after a successful cluster upgrade, the cluster cannot be rolled back using the backup data.
                  • When clusters using the tunnel network model are upgraded to v1.19.16-r4, v1.21.7-r0, v1.23.5-r0, v1.25.1-r0, or later, the SNAT rule whose destination address is the container CIDR block but the source address is not the container CIDR block will be removed. If you have configured VPC routes to directly access all pods outside the cluster, only the pods on the corresponding nodes can be directly accessed after the upgrade.
                  • For more details, see Version Differences.

                  Version Differences

                  - + + + + @@ -140,48 +149,48 @@

                  Deprecated APIs

                  With the evolution of Kubernetes APIs, APIs are periodically reorganized or upgraded, and old APIs are deprecated and finally deleted. The following tables list the deprecated APIs in each Kubernetes community version. For details about more deprecated APIs, see Deprecated API Migration Guide.

                  - +

                  When an API is deprecated, the existing resources are not affected. However, when you create or edit the resources, the API version will be intercepted.

                  -

                  Upgrade Path

                  @@ -18,7 +19,15 @@

                  v1.23 to v1.25

                  +

                  v1.23 or v1.25

                  +

                  Upgraded to v1.27

                  +

                  Docker is no longer recommended. Use containerd instead. For details, see Container Engine.

                  +

                  This item has been included in the pre-upgrade check.

                  +

                  v1.23 to v1.25

                  Since Kubernetes v1.25, PodSecurityPolicy has been replaced by pod Security Admission. For details, see Configuring Pod Security Admission.

                  Table 2 APIs deprecated in Kubernetes v1.27

                  Resource Name

                  +
                  - - - - - - - - - - - - - - - @@ -503,44 +512,44 @@

                  Upgrade Backup

                  How to back up a node:

                  -
                  Table 2 APIs deprecated in Kubernetes v1.27

                  Resource Name

                  Deprecated API Version

                  +

                  Deprecated API Version

                  Substitute API Version

                  +

                  Substitute API Version

                  Change Description

                  +

                  Change Description

                  CSIStorageCapacity

                  +

                  CSIStorageCapacity

                  storage.k8s.io/v1beta1

                  +

                  storage.k8s.io/v1beta1

                  storage.k8s.io/v1

                  +

                  storage.k8s.io/v1

                  (This API is available since v1.24.)

                  -

                  +

                  None

                  FlowSchema and PriorityLevelConfiguration

                  +

                  FlowSchema and PriorityLevelConfiguration

                  flowcontrol.apiserver.k8s.io/v1beta1

                  +

                  flowcontrol.apiserver.k8s.io/v1beta1

                  flowcontrol.apiserver.k8s.io/v1beta3

                  +

                  flowcontrol.apiserver.k8s.io/v1beta3

                  (This API is available since v1.26.)

                  -

                  +

                  None

                  HorizontalPodAutoscaler

                  +

                  HorizontalPodAutoscaler

                  autoscaling/v2beta2

                  +

                  autoscaling/v2beta2

                  autoscaling/v2

                  +

                  autoscaling/v2

                  (This API is available since v1.23.)

                  -

                  +

                  None

                  Backup Type

                  +
                  - - - - - - - - - - - - - - - - - diff --git a/docs/cce/umn/cce_10_0307.html b/docs/cce/umn/cce_10_0307.html index 5d5f8b78..f0c8adad 100644 --- a/docs/cce/umn/cce_10_0307.html +++ b/docs/cce/umn/cce_10_0307.html @@ -2,145 +2,268 @@

                  Overview

                  Container Storage

                  CCE container storage is implemented based on Kubernetes container storage APIs (CSI). CCE integrates multiple types of cloud storage and covers different application scenarios. CCE is fully compatible with Kubernetes native storage services, such as emptyDir, hostPath, secret, and ConfigMap.

                  -
                  Figure 1 Container storage type
                  +
                  Figure 1 Container storage types
                  -
                  CCE allows you to mount cloud storage volumes to your pods. Their features are described below. -

                  Backup Type

                  Backup Object

                  +

                  Backup Object

                  Backup Mode

                  +

                  Backup Mode

                  Backup Time

                  +

                  Backup Time

                  Rollback Time

                  +

                  Rollback Time

                  Description

                  +

                  Description

                  etcd data backup

                  +

                  etcd data backup

                  etcd data

                  +

                  etcd data

                  Automatic backup during the upgrade

                  +

                  Automatic backup during an upgrade

                  1-5 minutes

                  +

                  1-5min

                  2 hours

                  +

                  2h

                  Mandatory. The backup is automatically performed during the upgrade.

                  +

                  Mandatory. The backup is automatically performed during the upgrade.

                  CBR cloud server backup

                  +

                  CBR cloud server backup

                  Master node disks, including component images, configurations, logs, and etcd data

                  +

                  Master node disks, including component images, configurations, logs, and etcd data

                  One-click backup on web pages (manually triggered)

                  +

                  One-click backup on web pages (manually triggered)

                  20 minutes to 2 hours (based on the cloud backup tasks in the current region)

                  +

                  20 minutes to 2 hours (based on the cloud backup tasks in the current region)

                  20 minutes

                  +

                  20min

                  This function is gradually replaced by EVS snapshot backup.

                  +

                  This function is gradually replaced by EVS snapshot backup.

                  - - - - - - - @@ -86,14 +78,14 @@ - - @@ -101,7 +93,7 @@

                  Garbage Collection Policies for Container Images

                  When the container engine space is insufficient, image garbage collection is triggered.

                  -

                  The policy for garbage collecting images takes two factors into consideration: HighThresholdPercent and LowThresholdPercent. Disk usage above the high threshold (default: 85%) will trigger garbage collection. The garbage collection will delete least recently used images until the low threshold (default: 80%) has been met.

                  +

                  The policy for garbage collecting images takes two factors into consideration: HighThresholdPercent and LowThresholdPercent. Disk usage exceeding the high threshold (default: 80%) will trigger garbage collection. The garbage collection will delete least recently used images until the low threshold (default: 70%) is met.

                  Recommended Configuration for the Container Engine Space

                  • The container engine space should be greater than the total disk space used by containers. Formula: Container engine space > Number of containers x basesize
                  • You are advised to create and delete files of containerized services in local storage volumes (such as emptyDir and hostPath volumes) or cloud storage directories mounted to the containers. In this way, the thin pool space is not occupied. emptyDir volumes occupy the kubelet space. Therefore, properly plan the size of the kubelet space.
                  • You can deploy services on nodes that use the OverlayFS (for details, see Mapping Between OS and Container Storage Rootfs) so that the disk space occupied by files created or deleted in containers can be released immediately.
                  diff --git a/docs/cce/umn/cce_10_0342.html b/docs/cce/umn/cce_10_0342.html index a26154a8..3077df30 100644 --- a/docs/cce/umn/cce_10_0342.html +++ b/docs/cce/umn/cce_10_0342.html @@ -1,73 +1,73 @@ -

                  CCE Turbo Clusters and CCE Standard Clusters

                  -

                  Comparison Between CCE Turbo Clusters and CCE Standard Clusters

                  The following table lists the differences between CCE Turbo clusters and CCE standard clusters.

                  +

                  Comparison Between Cluster Types

                  +

                  Comparison

                  CCE provides different types of clusters for you to select. The following table lists the differences between them.

                  -
                  Table 1 Cloud storage comparison

                  Dimension

                  +

                  CCE allows workload pods to use multiple types of storage:

                  +
                  • In terms of implementation, storage supports Container Storage Interface (CSI) and Kubernetes native storage. +
                    - - - - - - - + + + + +

                    Type

                    EVS

                    -

                    SFS

                    -

                    SFS Turbo

                    -

                    OBS

                    +

                    Description

                    Definition

                    +

                    CSI

                    EVS offers scalable block storage for cloud servers. With high reliability, high performance, and rich specifications, EVS disks can be used for distributed file systems, dev/test environments, data warehouses, and high-performance computing (HPC) applications.

                    +

                    An out-of-tree volume add-on, which specifies the standard container storage API and allows storage vendors to use standard custom storage plugins that are mounted using PVCs and PVs without the need to add their plugin source code to the Kubernetes repository for unified build, compilation, and release. CSI is a recommended in Kubernetes 1.13 and later versions.

                    Expandable to petabytes, SFS provides fully hosted shared file storage, highly available and stable to handle data- and bandwidth-intensive applications in HPC, media processing, file sharing, content management, and web services.

                    +

                    Kubernetes native storage

                    +

                    An "in-tree" volume add-on that is built, compiled, and released with the Kubernetes repository.

                    +
                    +
                    +
                  • In terms of storage media, storage can be classified as cloud storage, local storage, and Kubernetes resource objects. +
                    + + + + + + + + + + + + + + + + + +

                    Type

                    +

                    Description

                    +

                    Application Scenario

                    +

                    Cloud storage

                    +

                    The storage media is provided by storage vendors. Storage volumes of this type are mounted using PVCs and PVs.

                    +

                    Data requires high availability or needs to be shared, for example, logs and media resources.

                    +

                    Select a proper cloud storage type based on the application scenario. For details, see Cloud Storage Comparison.

                    +

                    Local storage

                    +

                    The storage media is the local data disk or memory of the node. The local persistent volume is a customized storage type provided by CCE and mounted using PVCs and PVs through the CSI. Other storage types are Kubernetes native storage.

                    +

                    Non-HA data requires high I/O and low latency.

                    +

                    Select a proper local storage type based on the application scenario. For details, see Local Storage Comparison.

                    +

                    Kubernetes resource objects

                    +

                    ConfigMaps and secrets are resources created in clusters. They are special storage types and are provided by tmpfs (RAM-based file system) on the Kubernetes API server.

                    +

                    ConfigMaps are used to inject configuration data to pods.

                    +

                    Secrets are used to transmit sensitive information such as passwords to pods.

                    +
                    +
                    +
                  +

                  Cloud Storage Comparison

                  +
                  + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + +

                  Item

                  +

                  EVS

                  +

                  SFS

                  +

                  SFS Turbo

                  +

                  OBS

                  +

                  Definition

                  +

                  EVS offers scalable block storage for cloud servers. With high reliability, high performance, and rich specifications, EVS disks can be used for distributed file systems, dev/test environments, data warehouses, and high-performance computing (HPC) applications.

                  +

                  Expandable to petabytes, SFS provides fully hosted shared file storage, highly available and stable to handle data- and bandwidth-intensive applications in HPC, media processing, file sharing, content management, and web services.

                  Expandable to 320 TB, SFS Turbo provides fully hosted shared file storage, which is highly available and stable, to support small files and applications requiring low latency and high IOPS. You can use SFS Turbo in high-traffic websites, log storage, compression/decompression, DevOps, enterprise OA, and containerized applications.

                  +

                  Expandable to 320 TB, SFS Turbo provides fully hosted shared file storage, which is highly available and stable, to support small files and applications requiring low latency and high IOPS. You can use SFS Turbo in high-traffic websites, log storage, compression/decompression, DevOps, enterprise OA, and containerized applications.

                  Object Storage Service (OBS) provides massive, secure, and cost-effective data storage for you to store data of any type and size. You can use it in enterprise backup/archiving, video on demand (VoD), video surveillance, and many other scenarios.

                  +

                  Object Storage Service (OBS) provides massive, secure, and cost-effective data storage for you to store data of any type and size. You can use it in enterprise backup/archiving, video on demand (VoD), video surveillance, and many other scenarios.

                  Data storage logic

                  +

                  Data storage logic

                  Stores binary data and cannot directly store files. To store files, format the file system first.

                  +

                  Stores binary data and cannot directly store files. To store files, format the file system first.

                  Stores files and sorts and displays data in the hierarchy of files and folders.

                  +

                  Stores files and sorts and displays data in the hierarchy of files and folders.

                  Stores files and sorts and displays data in the hierarchy of files and folders.

                  +

                  Stores files and sorts and displays data in the hierarchy of files and folders.

                  Stores objects. Files directly stored automatically generate the system metadata, which can also be customized by users.

                  +

                  Stores objects. Files directly stored automatically generate the system metadata, which can also be customized by users.

                  Access mode

                  +

                  Access mode

                  Accessible only after being mounted to ECSs or BMSs and initialized.

                  +

                  Accessible only after being mounted to ECSs or BMSs and initialized.

                  Mounted to ECSs or BMSs using network protocols. A network address must be specified or mapped to a local directory for access.

                  +

                  Mounted to ECSs or BMSs using network protocols. A network address must be specified or mapped to a local directory for access.

                  Supports the Network File System (NFS) protocol (NFSv3 only). You can seamlessly integrate existing applications and tools with SFS Turbo.

                  +

                  Supports the Network File System (NFS) protocol (NFSv3 only). You can seamlessly integrate existing applications and tools with SFS Turbo.

                  Accessible through the Internet or Direct Connect (DC). Specify the bucket address and use transmission protocols such as HTTP or HTTPS.

                  +

                  Accessible through the Internet or Direct Connect (DC). Specify the bucket address and use transmission protocols such as HTTP or HTTPS.

                  Static provisioning

                  +

                  Static storage volumes

                  Supported. For details, see Using an Existing EVS Disk Through a Static PV.

                  +

                  Supported. For details, see Using an Existing EVS Disk Through a Static PV.

                  Supported. For details, see Using an Existing SFS File System Through a Static PV.

                  +

                  Supported. For details, see Using an Existing SFS File System Through a Static PV.

                  Supported. For details, see Using an Existing SFS Turbo File System Through a Static PV.

                  +

                  Supported. For details, see Using an Existing SFS Turbo File System Through a Static PV.

                  Supported. For details, see Using an Existing OBS Bucket Through a Static PV.

                  +

                  Supported. For details, see Using an Existing OBS Bucket Through a Static PV.

                  Dynamic provisioning

                  +

                  Dynamic storage volumes

                  Supported. For details, see Using an EVS Disk Through a Dynamic PV.

                  +

                  Supported. For details, see Using an EVS Disk Through a Dynamic PV.

                  Supported. For details, see Using an SFS File System Through a Dynamic PV.

                  +

                  Supported. For details, see Using an SFS File System Through a Dynamic PV.

                  Not supported

                  +

                  Not supported

                  Supported. For details, see Using an OBS Bucket Through a Dynamic PV.

                  +

                  Supported. For details, see Using an OBS Bucket Through a Dynamic PV.

                  Features

                  +

                  Features

                  Non-shared storage. Each volume can be mounted to only one node.

                  +

                  Non-shared storage. Each volume can be mounted to only one node.

                  Shared storage featuring high performance and throughput

                  +

                  Shared storage featuring high performance and throughput

                  Shared storage featuring high performance and bandwidth

                  +

                  Shared storage featuring high performance and bandwidth

                  Shared, user-mode file system

                  +

                  Shared, user-mode file system

                  Usage

                  +

                  Application scenarios

                  HPC, enterprise core cluster applications, enterprise application systems, and dev/test

                  +

                  HPC, enterprise core cluster applications, enterprise application systems, and dev/test

                  NOTE:

                  HPC apps here require high-speed and high-IOPS storage, such as industrial design and energy exploration.

                  HPC, media processing, content management, web services, big data, and analysis applications

                  +

                  HPC, media processing, content management, web services, big data, and analysis applications

                  NOTE:

                  HPC apps here require high bandwidth and shared file storage, such as gene sequencing and image rendering.

                  High-traffic websites, log storage, DevOps, and enterprise OA

                  +

                  High-traffic websites, log storage, DevOps, and enterprise OA

                  Big data analytics, static website hosting, online video on demand (VoD), gene sequencing, intelligent video surveillance, backup and archiving, and enterprise cloud boxes (web disks)

                  +

                  Big data analytics, static website hosting, online video on demand (VoD), gene sequencing, intelligent video surveillance, backup and archiving, and enterprise cloud boxes (web disks)

                  Capacity

                  +

                  Capacity

                  TB

                  +

                  TB

                  SFS 1.0: PB

                  +

                  SFS 1.0: PB

                  General-purpose: TB

                  +

                  General-purpose: TB

                  EB

                  +

                  EB

                  Latency

                  +

                  Latency

                  1–2 ms

                  +

                  1–2 ms

                  SFS 1.0: 3–20 ms

                  +

                  SFS 1.0: 3–20 ms

                  General-purpose: 1–5 ms

                  +

                  General-purpose: 1–5 ms

                  10 ms

                  +

                  10 ms

                  IOPS/TPS

                  +

                  Max. IOPS

                  33,000 for a single disk

                  +

                  2200–256000, depending on flavors

                  SFS 1.0: 2000

                  +

                  SFS 1.0: 2000

                  General-purpose: up to 100,000

                  +

                  General-purpose: up to 100,000

                  Tens of millions

                  +

                  Tens of millions

                  Bandwidth

                  +

                  Bandwidth

                  MB/s

                  +

                  MB/s

                  SFS 1.0: GB/s

                  +

                  SFS 1.0: GB/s

                  General-purpose: up to GB/s

                  +

                  General-purpose: up to GB/s

                  TB/s

                  +

                  TB/s

                  +
                  +
                  +
                  +

                  Local Storage Comparison

                  +
                  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/cce/umn/cce_10_0336.html b/docs/cce/umn/cce_10_0336.html index e0b91e74..3b273939 100644 --- a/docs/cce/umn/cce_10_0336.html +++ b/docs/cce/umn/cce_10_0336.html @@ -13,7 +13,7 @@

                  kubectl edit ds everest-csi-driver -nkube-system

                  Search for disable-auto-mount-secret and set it to true.

                  -

                  +

                  Run :wq to save the settings and exit. Wait until the pod is restarted.

                  Obtaining an Access Key

                  1. Log in to the console.
                  2. Hover the cursor over the username in the upper right corner and choose My Credentials from the drop-down list.
                  3. In the navigation pane, choose Access Keys.
                  4. Click Create Access Key. The Create Access Key dialog box is displayed.
                  5. Click OK to download the access key.
                  @@ -226,8 +226,7 @@ spec:
                  touch: setting times of '/temp/test': No such file or directory
                   command terminated with exit code 1
                18. Set the read/write permissions for the IAM user who mounted the OBS volume by referring to the bucket policy configuration.

                  -

                  -

                  +

                19. Write data into the mount path again. In this example, the write operation succeeded.

                  kubectl exec obs-secret-5cd558f76f-vxslv -- touch /temp/test

                20. Check the mount path in the container to see whether the data is successfully written.

                  kubectl exec obs-secret-5cd558f76f-vxslv -- ls -l /temp/

                  Expected outputs:

                  diff --git a/docs/cce/umn/cce_10_0337.html b/docs/cce/umn/cce_10_0337.html index 47c19587..0521a8e2 100644 --- a/docs/cce/umn/cce_10_0337.html +++ b/docs/cce/umn/cce_10_0337.html @@ -4,7 +4,7 @@

                  This section describes how to configure SFS volume mount options. You can configure mount options in a PV and bind the PV to a PVC. Alternatively, configure mount options in a StorageClass and use the StorageClass to create a PVC. In this way, PVs can be dynamically created and inherit mount options configured in the StorageClass by default.

                  Prerequisites

                  The CCE Container Storage (Everest) add-on version must be 1.2.8 or later. This add-on identifies the mount options and transfers them to the underlying storage resources. The parameter settings take effect only if the underlying storage resources support the specified options.

                  -

                  Constraints

                  • Mount options cannot be configured for secure containers.
                  • Due to the restrictions of the NFS protocol, if an SFS volume is mounted to a node for multiple times, link-related mounting parameters (such as timeo) take effect only when the SFS volume is mounted for the first time. For example, if an SFS volume is mounted to multiple pods running on a node, the values of the mounting parameters configured later will not overwrite the existing parameter values.
                  +

                  Constraints

                  • Mount options cannot be configured for Kata containers.
                  • Due to the restrictions of the NFS protocol, if an SFS volume is mounted to a node for multiple times, link-related mounting parameters (such as timeo) take effect only when the SFS volume is mounted for the first time by default. For example, if the same SFS file system is mounted to multiple pods running on a node, the mounting parameter set later does not overwrite the existing parameter value. If you want to configure different mounting parameters in the preceding scenario, additionally configure the nosharecache parameter.

                  SFS Volume Mount Options

                  The Everest add-on in CCE presets the options described in Table 1 for mounting SFS volumes.

                  @@ -18,10 +18,10 @@
                21. - @@ -34,7 +34,7 @@ - @@ -48,13 +48,22 @@ - + + + +

                  Item

                  +

                  Local PV

                  +

                  Local Ephemeral Volume

                  +

                  emptyDir

                  +

                  hostPath

                  +

                  Definition

                  +

                  Node's local disks form a storage pool (VolumeGroup) through LVM. LVM divides them into logical volumes (LVs) and mounts them to pods.

                  +

                  Kubernetes native emptyDir, where node's local disks form a storage pool (VolumeGroup) through LVM. LVs are created as the storage media of emptyDir and mounted to pods. LVs deliver better performance than the default storage medium of emptyDir.

                  +

                  Kubernetes native emptyDir. Its lifecycle is the same as that of a pod. Memory can be specified as the storage media. When the pod is deleted, the emptyDir volume is deleted and its data is lost.

                  +

                  Used to mount a file directory of the host where a pod is located to a specified mount point of the pod.

                  +

                  Features

                  +

                  Low-latency, high-I/O, and non-HA persistent volume.

                  +

                  Storage volumes are non-shared storage and bound to nodes through labels. Therefore, storage volumes can be mounted only to a single pod.

                  +

                  Local temporary volume. The storage space is from local LVs.

                  +

                  Local temporary volume. The storage space comes from the local kubelet root directory or memory.

                  +

                  Used to mount files or directories of the host file system. Host directories can be automatically created. Pods can be migrated (not bound to nodes).

                  +

                  Storage volume mounting

                  +

                  Static storage volumes are not supported.

                  +

                  Using a Local PV Through a Dynamic PV is supported.

                  +

                  For details, see Using a Local EV.

                  +

                  For details, see Using a Temporary Path.

                  +

                  For details, see hostPath.

                  +

                  Application scenarios

                  +

                  High I/O requirements and built-in HA solutions of applications, for example, deploying MySQL in HA mode.

                  +
                  • Scratch space, such as for a disk-based merge sort
                  • Checkpointing a long computation for recovery from crashes
                  • Saving the files obtained by the content manager container when web server container data is used
                  +
                  • Scratch space, such as for a disk-based merge sort
                  • Checkpointing a long computation for recovery from crashes
                  • Saving the files obtained by the content manager container when web server container data is used
                  +

                  Requiring a node file, for example, if Docker is used, you can use hostPath to mount the /var/lib/docker path of the node.

                  +
                  NOTICE:

                  Avoid using hostPath volumes as much as possible, as they are prone to security risks. If hostPath volumes must be used, they can only be applied to files or directories and mounted in read-only mode.

                  +

                  keep-original-ownership

                  Leave it blank.

                  +

                  Blank

                  Whether to retain the ownership of the file mount point. If this option is used, the Everest add-on must be v1.2.63 or v2.1.2 or later.

                  -
                  • By default, this option is not added. and the mount point ownership is root:root when SFS is mounted.
                  +
                  • By default, this option is not added, and the mount point ownership is root:root when SFS is mounted.
                  • If this option is added, the original ownership of the file system is retained when SFS is mounted.

                  nolock

                  Leave it blank.

                  +

                  Blank

                  Whether to lock files on the server using the NLM protocol. If nolock is selected, the lock is valid for applications on one host. For applications on another host, the lock is invalid.

                  hard/soft

                  Leave it blank.

                  +

                  Blank

                  Mounting mode.

                  • hard: If the NFS request times out, the client keeps resending the request until the request is successful.
                  • soft: If the NFS request times out, the client returns an error to the invoking program.

                  The default value is hard.

                  sharecache/nosharecache

                  +

                  Blank

                  +

                  How the data cache and attribute cache are shared when one file system is concurrently mounted to different clients. If this parameter is set to sharecache, the caches are shared between the mountings. If this parameter is set to nosharecache, the caches are not shared, and one cache is configured for each client mounting. The default value is sharecache.

                  +
                  NOTE:

                  The nosharecache setting will affect the performance. The mounting information must be obtained for each mounting, which increases the communication overhead with the NFS server and the memory consumption of the NFS clients. In addition, the nosharecache setting on the NFS clients may lead to inconsistent caches. Determine whether to use nosharecache based on site requirements.

                  +
                  +
                  @@ -76,12 +85,12 @@ spec: csi: driver: disk.csi.everest.io # Dependent storage driver for the mounting. fsType: nfs - volumeHandle: <your_volume_id> # ID of the SFS Capacity-Oriented volume. + volumeHandle: <your_volume_id> # ID of the SFS Capacity-Oriented volume volumeAttributes: everest.io/share-export-location: <your_location> # Shared path of the SFS volume. storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner persistentVolumeReclaimPolicy: Retain # Reclaim policy. - storageClassName: csi-nas # Storage class name. + storageClassName: csi-nas # Storage class name. mountOptions: # Mount options. - vers=3 - nolock diff --git a/docs/cce/umn/cce_10_0341.html b/docs/cce/umn/cce_10_0341.html index 0fc17dd6..4a934beb 100644 --- a/docs/cce/umn/cce_10_0341.html +++ b/docs/cce/umn/cce_10_0341.html @@ -3,24 +3,23 @@

                  Data Disk Space Allocation

                  This section describes how to allocate data disk space to nodes so that you can configure the data disk space accordingly.

                  Allocating Data Disk Space

                  When creating a node, configure data disks for the node. You can also click Expand and customize the data disk space allocation for the node.

                  -
                  • Allocate Disk Space:

                    CCE divides the data disk space for two parts by default. One part is used to store the Docker/containerd working directories, container images, and image metadata. The other is reserved for kubelet and emptyDir volumes. The available container engine space affects image pulls and container startup and running.

                    -
                    • Container engine and container image space (90% by default): stores the container runtime working directories, container image data, and image metadata.
                    • kubelet and emptyDir space (10% by default): stores pod configuration files, secrets, and mounted storage such as emptyDir volumes.
                    -
                  • Allocate Pod Basesize: indicates the basesize of a pod. You can set an upper limit for the disk space occupied by each workload pod (including the space occupied by container images). This setting prevents the pods from taking all the disk space available, which may cause service exceptions. It is recommended that the value is less than or equal to 80% of the container engine space. This parameter is related to the node OS and container storage rootfs and is not supported in some scenarios.
                  +
                  • Space Allocation for Container Engines
                    • Specified disk space: CCE divides the data disk space for two parts by default. One part is used to store the Docker/containerd working directories, container images, and image metadata. The other is reserved for kubelet and emptyDir volumes. The available container engine space affects image pulls and container startup and running.
                      • Container engine and container image space (90% by default): stores the container runtime working directories, container image data, and image metadata.
                      • kubelet and emptyDir space (10% by default): stores pod configuration files, secrets, and mounted storage such as emptyDir volumes.
                      +
                    +
                  • Space Allocation for Pods: indicates the basesize of a pod. You can set an upper limit for the disk space occupied by each workload pod (including the space occupied by container images). This setting prevents the pods from taking all the disk space available, which may cause service exceptions. It is recommended that the value is less than or equal to 80% of the container engine space. This parameter is related to the node OS and container storage rootfs and is not supported in some scenarios. For details, see Mapping Between OS and Container Storage Rootfs.
                  • Write Mode
                    • Linear: A linear logical volume integrates one or more physical volumes. Data is written to the next physical volume when the previous one is used up.
                    • Striped: available only if there are at least two data disks. A striped logical volume stripes data into blocks of the same size and stores them in multiple physical volumes in sequence. This allows data to be concurrently read and written. A storage pool consisting of striped volumes cannot be scaled-out.
                    +
                  -

                  Allocating Disk Space

                  For a node using a non-shared data disk (100 GiB for example), the division of the disk space varies depending on the container storage Rootfs type Device Mapper or OverlayFS. For details about the container storage Rootfs corresponding to different OSs, see Mapping Between OS and Container Storage Rootfs.

                  +

                  Space Allocation for Container Engines

                  For a node using a non-shared data disk (100 GiB for example), the division of the disk space varies depending on the container storage Rootfs type Device Mapper or OverlayFS. For details about the container storage Rootfs corresponding to different OSs, see Mapping Between OS and Container Storage Rootfs.

                  • Rootfs (Device Mapper)
                    By default, the container engine and image space, occupying 90% of the data disk, can be divided into the following two parts:
                    • The /var/lib/docker directory is used as the Docker working directory and occupies 20% of the container engine and container image space by default. (Space size of the /var/lib/docker directory = Data disk space x 90% x 20%)
                    • The thin pool is used to store container image data, image metadata, and container data, and occupies 80% of the container engine and container image space by default. (Thin pool space = Data disk space x 90% x 80%)

                      The thin pool is dynamically mounted. You can view it by running the lsblk command on a node, but not the df -h command.

                    -
                    Figure 1 Space allocation for container engines of Device Mapper
                    +
                    Figure 1 Space allocation for container engines of Device Mapper
                  • Rootfs (OverlayFS)

                    No separate thin pool. The entire container engine and container image space (90% of the data disk by default) are in the /var/lib/docker directory.

                    -
                    Figure 2 Space allocation for container engines of OverlayFS
                    +
                    Figure 2 Space allocation for container engines of OverlayFS
                  -

                  Allocating Basesize for Pods

                  The customized pod container space (basesize) is related to the node OS and container storage Rootfs. For details about the container storage Rootfs, see Mapping Between OS and Container Storage Rootfs.

                  -
                  • Device Mapper supports custom pod basesize. The default value is 10 GB.
                  • In OverlayFS mode, the pod container space is not limited by default.

                    When you use Docker on EulerOS 2.9 nodes, basesize will not take effect if CAP_SYS_RESOURCE or privileged is configured for a container.

                    -
                    -
                  +

                  Space Allocation for Pods

                  The customized pod container space (basesize) is related to the node OS and container storage Rootfs. For details about the container storage Rootfs, see Mapping Between OS and Container Storage Rootfs.

                  +
                  • Device Mapper supports custom pod basesize. The default value is 10 GiB.
                  • In OverlayFS mode, the pod container space is not limited by default.

                  When configuring basesize, consider the maximum number of pods on a node. The container engine space should be greater than the total disk space used by containers. Formula: the container engine space and container image space (90% by default) > Number of containers x basesize. Otherwise, the container engine space allocated to the node may be insufficient and the container cannot be started.

                  For nodes that support basesize, when Device Mapper is used, although you can limit the size of the /home directory of a single container (to 10 GB by default), all containers on the node still share the thin pool of the node for storage. They are not completely isolated. When the sum of the thin pool space used by certain containers reaches the upper limit, other containers cannot run properly.

                  In addition, after a file is deleted in the /home directory of the container, the thin pool space occupied by the file is not released immediately. Therefore, even if basesize is set to 10 GB, the thin pool space occupied by files keeps increasing until 10 GB when files are created in the container. The space released after file deletion will be reused but after a while. If the number of containers on the node multiplied by basesize is greater than the thin pool space size of the node, there is a possibility that the thin pool space has been used up.

                  @@ -34,19 +33,12 @@

                  EulerOS 2.5

                  -

                  Device Mapper

                  -

                  Supported only when the container engine is Docker. The default value is 10 GB.

                  -

                  EulerOS 2.9

                  +

                  EulerOS 2.9

                  OverlayFS

                  Supported only by clusters of v1.19.16, v1.21.3, v1.23.3, and later. The container basesize is not limited by default.

                  -

                  Not supported if the cluster versions are earlier than v1.19.16, v1.21.3, and v1.23.3.

                  +

                  Supported only by clusters of v1.19.16, v1.21.3, v1.23.3, or later. There are no limits by default.

                  +

                  Not supported if the cluster versions are earlier than v1.19.16, v1.21.3, or v1.23.3.

                  Ubuntu 22.04

                  @@ -60,7 +52,7 @@

                  OverlayFS

                  Supported only when the container engine is Docker. The container basesize is not limited by default.

                  +

                  Supported only by Docker clusters. There are no limits by default.

                  ECS VMs use OverlayFS.

                  Supported only when Rootfs is set to OverlayFS and the container engine is Docker. The container basesize is not limited by default.

                  +

                  Supported only when Rootfs is set to OverlayFS and the container engine is Docker. The container basesize is not limited by default.

                  HCE OS 2.0

                  OverlayFS

                  Supported only when the container engine is Docker. The container basesize is not limited by default.

                  +

                  Supported only by Docker clusters. There are no limits by default.

                  Category

                  +
                  - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/cce/umn/cce_10_0345.html b/docs/cce/umn/cce_10_0345.html index 596b684a..6c03f9d0 100644 --- a/docs/cce/umn/cce_10_0345.html +++ b/docs/cce/umn/cce_10_0345.html @@ -5,9 +5,11 @@

                  Prerequisites

                  • A GPU node has been created. For details, see Creating a Node.
                  • The gpu-device-plugin (previously gpu-beta add-on) has been installed. During the installation, select the GPU driver on the node. For details, see CCE AI Suite (NVIDIA GPU).
                  • gpu-device-plugin mounts the driver directory to /usr/local/nvidia/lib64. To use GPU resources in a container, add /usr/local/nvidia/lib64 to the LD_LIBRARY_PATH environment variable.

                    Generally, you can use any of the following methods to add a file:

                    1. Configure the LD_LIBRARY_PATH environment variable in the Dockerfile used for creating an image. (Recommended)
                      ENV LD_LIBRARY_PATH /usr/local/nvidia/lib64:$LD_LIBRARY_PATH
                    2. Configure the LD_LIBRARY_PATH environment variable in the image startup command.
                      /bin/bash -c "export LD_LIBRARY_PATH=/usr/local/nvidia/lib64:$LD_LIBRARY_PATH && ..."
                      -
                    3. Define the LD_LIBRARY_PATH environment variable when creating a workload. (Ensure that this variable is not configured in the container. Otherwise, it will be overwritten.)
                                env:
                      +
                    4. Define the LD_LIBRARY_PATH environment variable when creating a workload. (Ensure that this variable is not configured in the container. Otherwise, it will be overwritten.)
                      ...
                      +          env:
                                   - name: LD_LIBRARY_PATH
                      -              value: /usr/local/nvidia/lib64
                      + value: /usr/local/nvidia/lib64 +...
                  diff --git a/docs/cce/umn/cce_10_0349.html b/docs/cce/umn/cce_10_0349.html index f3111472..99f7f06d 100644 --- a/docs/cce/umn/cce_10_0349.html +++ b/docs/cce/umn/cce_10_0349.html @@ -45,7 +45,7 @@ @@ -53,7 +53,7 @@ - diff --git a/docs/cce/umn/cce_10_0351.html b/docs/cce/umn/cce_10_0351.html index d45488ed..b30f22cf 100644 --- a/docs/cce/umn/cce_10_0351.html +++ b/docs/cce/umn/cce_10_0351.html @@ -5,6 +5,8 @@
                  • CPU throttling
                  • Context switching
                  • Processor cache misses
                  • Cross-socket memory access
                  • Hyperthreads that are expected to run on the same physical CPU card

                  If your workloads are sensitive to any of these items and CPU cache affinity and scheduling latency significantly affect workload performance, kubelet allows alternative CPU management policies (CPU binding) to determine some placement preferences on the node. The CPU manager preferentially allocates resources on a socket and full physical cores to avoid interference.

                  +

                  Constraints

                  The CPU management policy cannot take effect on physical cloud server nodes.

                  +

                  Enabling the CPU Management Policy

                  A CPU management policy is specified by the kubelet flag --cpu-manager-policy. By default, Kubernetes supports the following policies:

                  • Disabled (none): the default policy. The none policy explicitly enables the existing default CPU affinity scheme, providing no affinity beyond what the OS scheduler does automatically.
                  • Enabled (static): The static policy allows containers in guaranteed pods with integer GPU requests to be granted increased CPU affinity and exclusivity on the node.

                  When creating a cluster, you can configure the CPU management policy in Advanced Settings.

                  diff --git a/docs/cce/umn/cce_10_0352.html b/docs/cce/umn/cce_10_0352.html index acbd3b98..0064a748 100644 --- a/docs/cce/umn/cce_10_0352.html +++ b/docs/cce/umn/cce_10_0352.html @@ -2,7 +2,11 @@

                  Managing Node Taints

                  Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.

                  -

                  Taints

                  A taint is a key-value pair associated with an effect. The following effects are available:

                  +

                  Procedure for Operations Performed on the Console

                  On the CCE console, you can also batch manage nodes' taints.

                  +
                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                  2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab, select the target node and click Labels and Taints in the upper left corner.
                  3. In the displayed dialog box, click Add Operation under Batch Operation, and then choose Add/Update or Delete as well as Taint.

                    Enter the key and value of the taint to be operated, choose a taint effect, and click OK.

                    +

                  4. After the taint is added, check the added taint in node data.
                  +
                  +

                  Procedure for Operations Performed Through kubectl Commands

                  A taint is a key-value pair associated with an effect. The following effects are available:

                  • NoSchedule: No pod will be scheduled onto the node unless it has a matching toleration. Existing pods will not be evicted from the node.
                  • PreferNoSchedule: Kubernetes prevents pods that cannot tolerate this taint from being scheduled onto the node.
                  • NoExecute: If the pod has been running on a node, the pod will be evicted from the node. If the pod has not been running on a node, the pod will not be scheduled onto the node.

                  To add a taint to a node, run the kubectl taint node nodename command as follows:

                  $ kubectl get node
                  @@ -27,7 +31,7 @@ spec:
                       key: key1
                       value: value1
                   ...
                  -

                  To remove a taint, run the following command with a hyphen (-) added after NoSchedule:

                  +

                  To remove a taint, add a hyphen (-) at the end of the command for adding a taint, as shown in the following example:

                  $ kubectl taint node 192.168.10.240 key1=value1:NoSchedule-
                   node/192.168.10.240 untainted
                   $ kubectl describe node 192.168.10.240
                  @@ -35,21 +39,19 @@ Name:               192.168.10.240
                   ...
                   Taints:             <none>
                   ...
                  -

                  On the CCE console, you can also manage taints of a node in batches.

                  -
                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                  2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab, select the target node and click Labels and Taints in the upper left corner.
                  3. In the displayed dialog box, click Add batch operations under Batch Operation, choose Add/Update, and select Taint.

                    Enter the key and value of the taint to be added, select the taint effect, and click OK.

                    -

                  4. After the taint is added, check the added taint in node data.
                  +
                  +

                  Configuring a Node Scheduling Policy in One-Click Mode

                  You can configure a node to be unschedulable on the console. Then, CCE will add a taint with key node.kubernetes.io/unschedulable and the NoSchedule setting to the node. After a node is set to be unschedulable, new pods cannot be scheduled to this node, but pods running on the node are not affected.

                  +
                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                  2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
                  3. In the node list, locate the target node and choose More > Disable Scheduling in the Operation column.
                  4. In the dialog box that is displayed, click Yes to configure the node to be unschedulable.

                    This operation will add a taint to the node. You can use kubectl to view the content of the taint.

                    +
                    $ kubectl describe node 192.168.10.240
                    +...
                    +Taints:             node.kubernetes.io/unschedulable:NoSchedule
                    +...
                    +

                  5. Go back to the node list, locate the target node, and choose More > Enable Scheduling. Then, the node changes to be schedulable.

                  System Taints

                  When some issues occurred on a node, Kubernetes automatically adds a taint to the node. The built-in taints are as follows:

                  • node.kubernetes.io/not-ready: The node is not ready. The node Ready value is False.
                  • node.kubernetes.io/unreachable: The node controller cannot access the node. The node Ready value is Unknown.
                  • node.kubernetes.io/memory-pressure: The node memory is approaching the upper limit.
                  • node.kubernetes.io/disk-pressure: The node disk space is approaching the upper limit.
                  • node.kubernetes.io/pid-pressure: The node PIDs are approaching the upper limit.
                  • node.kubernetes.io/network-unavailable: The node network is unavailable.
                  • node.kubernetes.io/unschedulable: The node cannot be scheduled.
                  • node.cloudprovider.kubernetes.io/uninitialized: If an external cloud platform driver is specified when kubelet is started, kubelet adds a taint to the current node and marks it as unavailable. After a controller of cloud-controller-manager initializes the node, kubelet will delete the taint.
                  -

                  Node Scheduling Settings

                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                  2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
                  3. In the node list, locate the target node and choose More > Disable Scheduling in the Operation column.
                  4. In the dialog box that is displayed, click Yes to configure the node to be unschedulable.

                    This operation will add a taint to the node. You can use kubectl to view the content of the taint.

                    -
                    $ kubectl describe node 192.168.10.240
                    -...
                    -Taints:             node.kubernetes.io/unschedulable:NoSchedule
                    -...
                    -

                  5. Go back to the node list, locate the target node, and choose More > Enable Scheduling. Then, the node changes to be schedulable.
                  -
                  -

                  Tolerations

                  Tolerations are applied to pods, and allow (but do not require) the pods to schedule onto nodes with matching taints.

                  +

                  Related Operations (Tolerations)

                  Tolerations are applied to pods, and allow (but do not require) the pods to schedule onto nodes with matching taints.

                  Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. One or more taints are applied to a node. This marks that the node should not accept any pods that do not tolerate the taints.

                  Example:

                  apiVersion: v1
                  diff --git a/docs/cce/umn/cce_10_0353.html b/docs/cce/umn/cce_10_0353.html
                  index 5d0655a7..20c07cb2 100644
                  --- a/docs/cce/umn/cce_10_0353.html
                  +++ b/docs/cce/umn/cce_10_0353.html
                  @@ -23,7 +23,6 @@ spec:
                     imagePullSecrets:                 
                     - name: default-secret

                  An image pull policy can also be configured on the CCE console. When creating a workload, configure Pull Policy. If Always is selected, images are always pulled. If Always is not selected, images are pulled as needed.

                  -

                  Use a new tag each time you create an image. If you do not update the tag but only update the image, when Pull Policy is set to IfNotPresent, CCE considers that an image with the tag already exists on the current node and will not pull the image again.

                  diff --git a/docs/cce/umn/cce_10_0355.html b/docs/cce/umn/cce_10_0355.html index 8a3e5dde..9f06fb34 100644 --- a/docs/cce/umn/cce_10_0355.html +++ b/docs/cce/umn/cce_10_0355.html @@ -5,14 +5,13 @@

                  kube-proxy is responsible for intra-cluster forwarding. kube-proxy has two forwarding modes: iptables and IPVS. iptables is a simple polling forwarding mode. IPVS has multiple forwarding modes but it requires modifying the startup parameters of kube-proxy. Compared with iptables and IPVS, load balancers provide more flexible forwarding policies as well as health check capabilities.

                  Solution

                  CCE supports passthrough networking. You can configure the annotation of kubernetes.io/elb.pass-through for the Loadbalancer Service. Intra-cluster access to the Service load balancer address is then forwarded to backend pods by the load balancer.

                  -
                  Figure 1 Passthrough networking illustration
                  +
                  Figure 1 Passthrough networking illustration
                  • CCE clusters

                    When a LoadBalancer Service is accessed within the cluster, the access is forwarded to the backend pods using iptables/IPVS by default.

                    When a LoadBalancer Service (configured with elb.pass-through) is accessed within the cluster, the access is first forwarded to the load balancer, then the nodes, and finally to the backend pods using iptables/IPVS.

                    -
                  • CCE Turbo clusters

                    When a LoadBalancer Service is accessed within the cluster, the access is forwarded to the backend pods using iptables/IPVS by default.

                    -

                    When a LoadBalancer Service (configured with elb.pass-through) is accessed within the cluster, the access is first forwarded to the load balancer, and then to the pods.

                    +
                  • CCE Turbo clusters

                    When a client accesses a LoadBalancer Service from within the cluster, pass-through is used by default. In this case, the client directly accesses the load balancer private network IP address and then access a container through the load balancer.

                  -

                  Constraints

                  • After passthrough networking is configured for a dedicated load balancer, containers on the node where the workload runs cannot be accessed through the Service.
                  • Passthrough networking is not supported for clusters of v1.15 or earlier.
                  • In IPVS network mode, the pass-through settings of Service connected to the same ELB must be the same.
                  • If node-level (local) service affinity is used, kubernetes.io/elb.pass-through is automatically set to onlyLocal to enable pass-through.
                  +

                  Constraints

                  • After passthrough networking is configured for a dedicated load balancer, in a CCE standard cluster, pods that run on the same node as the workload and pods that run on the same node cannot be accessed through the LoadBalancer Service.
                  • Passthrough networking is not supported for clusters of v1.15 or earlier.
                  • In IPVS network mode, the pass-through settings of Service connected to the same ELB must be the same.
                  • If node-level (local) service affinity is used, kubernetes.io/elb.pass-through is automatically set to onlyLocal to enable pass-through.

                  Procedure

                  This section describes how to create a Deployment using an Nginx image and create a Service with passthrough networking enabled.

                  1. Use the Nginx image to create a Deployment.

                    apiVersion: apps/v1     
                    @@ -41,7 +40,7 @@ spec:
                                 memory: 200Mi
                           imagePullSecrets:
                           - name: default-secret
                    -

                  2. Create a LoadBalancer Service and configure kubernetes.io/elb.pass-through to true.

                    apiVersion: v1 
                    +

                  3. For a LoadBalance Service type, set kubernetes.io/elb.pass-through to true. In this example, a shared load balancer named james is automatically created.

                    apiVersion: v1 
                     kind: Service 
                     metadata: 
                       annotations:   
                    @@ -62,11 +61,10 @@ spec:
                         app: nginx 
                       type: LoadBalancer
                    -

                    A shared load balancer named james is automatically created. Use kubernetes.io/elb.subnet-id to specify the VPC subnet where the load balancer is located. The load balancer and the cluster must be in the same VPC.

                  Verification

                  Check the ELB load balancer corresponding to the created Service. The load balancer name is james. The number of ELB connections is 0, as shown in the following figure.

                  -

                  +

                  Use kubectl to connect to the cluster, go to an Nginx container, and access the ELB address. The access is successful.

                  # kubectl get pod
                   NAME                     READY   STATUS    RESTARTS   AGE
                  @@ -100,7 +98,7 @@ Commercial support is available at
                   </body>
                   </html>

                  Wait for a period of time and view the ELB monitoring data. A new access connection is created for the ELB, indicating that the access passes through the ELB load balancer as expected.

                  -

                  +

                  diff --git a/docs/cce/umn/cce_10_0360.html b/docs/cce/umn/cce_10_0360.html index 997f7eb9..d743f805 100644 --- a/docs/cce/umn/cce_10_0360.html +++ b/docs/cce/umn/cce_10_0360.html @@ -21,7 +21,7 @@ nameserver 10.247.3.10 search default.svc.cluster.local svc.cluster.local cluster.local options ndots:5 timeout single-request-reopen

                  When a user accesses the Service name:Port of the Nginx pod, the IP address of the Nginx Service is resolved from CoreDNS, and then the IP address of the Nginx Service is accessed. In this way, the user can access the backend Nginx pod.

                  -
                  Figure 1 Example of domain name resolution in a cluster
                  +
                  Figure 1 Example of domain name resolution in a cluster

                  How Does Domain Name Resolution Work in Kubernetes?

                  DNS policies can be configured for each pod. Kubernetes supports DNS policies Default, ClusterFirst, ClusterFirstWithHostNet, and None. For details, see DNS for Services and Pods. These policies are specified in the dnsPolicy field in the pod-specific.

                  • Default: Pods inherit the name resolution configuration from the node that the pods run on. The custom upstream DNS server and the stub domain cannot be used together with this policy.
                  • ClusterFirst: Any DNS query that does not match the configured cluster domain suffix, such as www.kubernetes.io, is forwarded to the upstream name server inherited from the node. Cluster administrators may have extra stub domains and upstream DNS servers configured.
                  • ClusterFirstWithHostNet: For pods running with hostNetwork, set its DNS policy ClusterFirstWithHostNet.
                  • None: It allows a pod to ignore DNS settings from the Kubernetes environment. All DNS settings are supposed to be provided using the dnsPolicy field in the pod-specific.
                  @@ -33,7 +33,7 @@ options ndots:5 timeout single-request-reopen
                  1. The query is first sent to the DNS caching layer in CoreDNS.
                  2. From the caching layer, the suffix of the request is examined and then the request is forwarded to the corresponding DNS:
                    • Names with the cluster suffix, for example, .cluster.local: The request is sent to CoreDNS.
                    • Names with the stub domain suffix, for example, .acme.local: The request is sent to the configured custom DNS resolver that listens, for example, on 1.2.3.4.
                    • Names that do not match the suffix (for example, widget.com): The request is forwarded to the upstream DNS.
                  -
                  Figure 2 Routing
                  +
                  Figure 2 Routing

                  Related Operations

                  You can also configure DNS in a workload. For details, see DNS Configuration.

                  You can also use CoreDNS to implement user-defined domain name resolution. For details, see Using CoreDNS for Custom Domain Name Resolution.

                  diff --git a/docs/cce/umn/cce_10_0361.html b/docs/cce/umn/cce_10_0361.html index a2c05ccc..5396766f 100644 --- a/docs/cce/umn/cce_10_0361.html +++ b/docs/cce/umn/cce_10_0361.html @@ -11,7 +11,7 @@

                  Configuring the Stub Domain for CoreDNS

                  Cluster administrators can modify the ConfigMap for the CoreDNS Corefile to change how service discovery works.

                  Assume that a cluster administrator has a Consul DNS server located at 10.150.0.1 and all Consul domain names have the suffix .consul.local.

                  -
                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                  2. In the navigation pane, choose Add-ons. On the displayed page, click Edit under CoreDNS.
                  3. Add a stub domain in the Parameters area. The format is a key-value pair. The key is a DNS suffix domain name, and the value is a DNS IP address or a group of DNS IP addresses, for example, consul.local -- 10.150.0.1.
                  4. Click OK.
                  5. Choose ConfigMaps and Secrets in the navigation pane, select the kube-system namespace, and view the ConfigMap data of coredns to check whether the update is successful.

                    The corresponding Corefile content is as follows:

                    +
                    1. Log in to the CCE console and click the cluster name to access the cluster console.
                    2. In the navigation pane, choose Add-ons. Then, click Edit under CoreDNS.
                    3. Add a stub domain in the Parameters area. The format is a key-value pair. The key is a DNS suffix domain name, and the value is a DNS IP address or a group of DNS IP addresses, for example, consul.local -- 10.150.0.1.
                    4. Click OK.
                    5. Choose ConfigMaps and Secrets in the navigation pane, select the kube-system namespace, and view the ConfigMap data of coredns to check whether the update is successful.

                      The corresponding Corefile content is as follows:

                      .:5353 {
                           bind {$POD_IP}
                           cache 30
                      @@ -38,7 +38,7 @@
                       

                  Modifying the CoreDNS Hosts Configuration File

                  After modifying the hosts file in CoreDNS, you do not need to configure the hosts file in each pod to add resolution records.

                  -
                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                  2. In the navigation pane, choose Add-ons. On the displayed page, click Edit under CoreDNS.
                  3. Edit the advanced configuration under Parameters and add the following content to the plugins field:

                    {
                    +
                    1. Log in to the CCE console and click the cluster name to access the cluster console.
                    2. In the navigation pane, choose Add-ons. Then, click Edit under CoreDNS.
                    3. Edit the advanced configuration under Parameters and add the following content to the plugins field:

                      {
                         "configBlock": "192.168.1.1 www.example.com\nfallthrough",
                         "name": "hosts"
                       }
                      @@ -70,7 +70,7 @@

                  Adding the CoreDNS Rewrite Configuration to Point the Domain Name to Services in the Cluster

                  Use the Rewrite plug-in of CoreDNS to resolve a specified domain name to the domain name of a Service. For example, the request for accessing the example.com domain name is redirected to the example.default.svc.cluster.local domain name, that is, the example service in the default namespace.

                  -
                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                  2. In the navigation pane, choose Add-ons. On the displayed page, click Edit under CoreDNS.
                  3. Edit the advanced configuration under Parameters and add the following content to the plugins field:

                    {
                    +
                    1. Log in to the CCE console and click the cluster name to access the cluster console.
                    2. In the navigation pane, choose Add-ons. Then, click Edit under CoreDNS.
                    3. Edit the advanced configuration under Parameters and add the following content to the plugins field:

                      {
                          "name": "rewrite",
                          "parameters": "name example.com example.default.svc.cluster.local"
                       }
                      @@ -96,7 +96,7 @@

                  Using CoreDNS to Cascade Self-Built DNS

                  By default, CoreDNS uses the /etc/resolv.conf file of the node for resolution. You can also change the resolution address to that of the external DNS.

                  -
                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                  2. In the navigation pane, choose Add-ons. On the displayed page, click Edit under CoreDNS.
                  3. Edit the advanced configuration under Parameters and modify the following content in the plugins field:

                    {
                    +
                    1. Log in to the CCE console and click the cluster name to access the cluster console.
                    2. In the navigation pane, choose Add-ons. Then, click Edit under CoreDNS.
                    3. Edit the advanced configuration under Parameters and modify the following content in the plugins field:

                      {
                           "configBlock": "policy random",
                           "name": "forward",
                           "parameters": ". 192.168.1.1"
                      diff --git a/docs/cce/umn/cce_10_0363.html b/docs/cce/umn/cce_10_0363.html
                      index c7751230..65bb2ef1 100644
                      --- a/docs/cce/umn/cce_10_0363.html
                      +++ b/docs/cce/umn/cce_10_0363.html
                      @@ -3,12 +3,12 @@
                       

                      Creating a Node

                      Prerequisites

                      • At least one cluster has been created.
                      • A key pair has been created for identity authentication upon remote node login.
                      -

                      Constraints

                      • The node has at least 2 vCPUs and 4 GiB of memory.
                      • To ensure node stability, a certain number of CCE node resources will be reserved for Kubernetes components (such as kubelet, kube-proxy, and docker) based on the node specifications. Therefore, the total number of node resources and the number of allocatable node resources for your cluster are different. The larger the node specifications, the more the containers deployed on the node. Therefore, more node resources need to be reserved to run Kubernetes components. For details, see Node Resource Reservation Policy.
                      • Networks including VM networks and container networks of nodes are all managed by CCE. Do not add or delete ENIs or change routes. Otherwise, services may be unavailable. For example, the NIC named gw_11cbf51a@eth0 on the node is the container network gateway and cannot be modified.
                      • During the node creation, software packages are downloaded from OBS using the domain name. A private DNS server must be used to resolve the OBS domain name. Therefore, the DNS server address of the subnet where the node resides must be set to the private DNS server address so that the node can access the private DNS server. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
                      • Once a node is created, its AZ cannot be changed.
                      +

                      Constraints

                      • The node has at least 2 vCPUs and 4 GiB of memory.
                      • To ensure node stability, a certain number of CCE node resources will be reserved for Kubernetes components (such as kubelet, kube-proxy, and docker) based on the node specifications. Therefore, the total number of node resources and the number of allocatable node resources for your cluster are different. The larger the node specifications, the more the containers deployed on the node. Therefore, more node resources need to be reserved to run Kubernetes components. For details, see Node Resource Reservation Policy.
                      • Networks including VM networks and container networks of nodes are all managed by CCE. Do not add or delete ENIs, or change routes and IP addresses. Otherwise, services may be unavailable. For example, the NIC named gw_11cbf51a@eth0 on the node is the container network gateway and cannot be modified.
                      • During the node creation, software packages are downloaded from OBS using the domain name. A private DNS server must be used to resolve the OBS domain name. Therefore, the DNS server address of the subnet where the node resides must be set to the private DNS server address so that the node can access the private DNS server. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
                      • Once a node is created, its AZ cannot be changed.

                      Procedure

                      After a cluster is created, you can create nodes for the cluster.

                      -
                      1. Log in to the CCE console.
                      2. In the navigation pane of the CCE console, choose Clusters. Click the target cluster name to access its details page.
                      3. In the navigation pane, choose Nodes. On the page displayed, click the Nodes tab and then Create Node in the upper right corner. Configure node parameters.

                        Compute Settings

                        -
                        You can configure the specifications and OS of a cloud server, on which your containerized applications run. -

                  Category

                  Subcategory

                  +

                  Subcategory

                  CCE

                  +

                  CCE Standard

                  CCE Turbo

                  +

                  CCE Turbo

                  Positioning

                  +

                  Positioning

                  -

                  +

                  -

                  Standard clusters that provide highly reliable and secure containers for commercial use

                  +

                  Standard clusters that provide highly reliable and secure containers for commercial use

                  Next-gen container cluster designed for Cloud Native 2.0, with accelerated computing, networking, and scheduling

                  +

                  Next-gen container cluster designed for Cloud Native 2.0, with accelerated computing, networking, and scheduling

                  Application scenario

                  +

                  Application scenario

                  -

                  +

                  -

                  For users who expect to use container clusters to manage applications, obtain elastic computing resources, and enable simplified management on computing, network, and storage resources

                  +

                  For users who expect to use container clusters to manage applications, obtain elastic computing resources, and enable simplified management on computing, network, and storage resources

                  For users who have higher requirements on performance, resource utilization, and full-scenario coverage

                  +

                  For users who have higher requirements on performance, resource utilization, and full-scenario coverage

                  Specification difference

                  +

                  Specification difference

                  Network model

                  +

                  Network model

                  Cloud-native network 1.0: applies to common, smaller-scale scenarios.

                  -
                  • Tunnel network
                  • Virtual Private Cloud (VPC) network
                  +

                  Cloud-native network 1.0: applies to common, smaller-scale scenarios.

                  +
                  • Tunnel network
                  • Virtual Private Cloud (VPC) network

                  Cloud Native Network 2.0: applies to large-scale and high-performance scenarios.

                  -

                  Max networking scale: 2,000 nodes

                  +

                  Cloud Native Network 2.0: applies to large-scale and high-performance scenarios.

                  +

                  Max networking scale: 2,000 nodes

                  Network performance

                  +

                  Network performance

                  Overlays the VPC network with the container network, causing certain performance loss.

                  +

                  Overlays the VPC network with the container network, causing certain performance loss.

                  Flattens the VPC network and container network into one, achieving zero performance loss.

                  +

                  Flattens the VPC network and container network into one, achieving zero performance loss.

                  Network isolation

                  +

                  Network isolation

                  • Tunnel network model: supports network policies for intra-cluster communications.
                  • VPC network model: supports no isolation.
                  +
                  • Tunnel network model: supports network policies for intra-cluster communications.
                  • VPC network model: supports no isolation.

                  Associates pods with security groups. Unifies security isolation in and out the cluster via security groups' network policies.

                  +

                  Associates pods with security groups. Unifies security isolation in and out the cluster via security groups' network policies.

                  Security isolation

                  +

                  Security isolation

                  Runs common containers, isolated by cgroups.

                  +

                  Runs common containers, isolated by cgroups.

                  • Physical machine: runs Kata containers, allowing VM-level isolation.
                  • VM: runs common containers, isolated by cgroups.
                  +
                  • Physical machine: runs Kuasar containers, allowing VM-level isolation.
                  • Runs common containers, isolated by cgroups.

                  Edge infrastructure management

                  +

                  Edge infrastructure management

                  None

                  +

                  Not supported

                  Supports management of Intelligent EdgeSite (IES).

                  +

                  Supports management of Intelligent EdgeSite (IES).

                  The internal IP address in the cluster cannot be pinged.

                  The internal IP address in the cluster can be pinged.

                  -
                  NOTE:

                  The cluster IP address in clusters of v1.27 or later cannot be pinged due to security hardening.

                  +
                  NOTE:

                  The IP address in clusters of v1.27 or later cannot be pinged due to security hardening.

                  When there are more than 1000 Services in the cluster, network delay may occur.

                  • If an Ingress and a Service use the same load balancer, the Ingress cannot be accessed from the nodes and containers in the cluster because kube-proxy mounts the LoadBalancer Service address to the ipvs-0 bridge. This bridge intercepts the traffic of the load balancer used by the Ingress. Use different load balancers for the Ingress and Service.
                  +
                  • If an ingress and a Service use the same load balancer, the ingress cannot be accessed from the nodes and containers in the cluster because kube-proxy mounts the LoadBalancer Service address to the ipvs-0 bridge. This bridge intercepts the traffic of the load balancer used by the ingress. Use different load balancers for the ingress and Service.
                  Table 1 Configuration parameters

                  Parameter

                  +
                  1. Log in to the CCE console.
                  2. In the navigation pane of the CCE console, choose Clusters. Click the target cluster name to access its details page.
                  3. In the navigation pane, choose Nodes. On the page displayed, click the Nodes tab and then Create Node in the upper right corner. Configure node parameters.

                    Configurations

                    +
                    You can configure the flavor and OS of a cloud server, on which your containerized applications run. +
                    @@ -23,28 +23,28 @@ - + + + - - - - @@ -53,7 +53,7 @@ - - - - - - - - - - @@ -170,8 +170,8 @@ metadata: everest.io/reclaim-policy: retain-volume-only name: pv-evs-test labels: - failure-domain.beta.kubernetes.io/region: <your_region> # Region of the node where the application is to be deployed - failure-domain.beta.kubernetes.io/zone: <your_zone> # AZ of the node where the application is to be deployed + failure-domain.beta.kubernetes.io/region: <your_region> # Region of the node where the application is to be deployed + failure-domain.beta.kubernetes.io/zone: <your_zone> # AZ of the node where the application is to be deployed spec: accessModes: - ReadWriteOnce diff --git a/docs/cce/umn/cce_10_0379.html b/docs/cce/umn/cce_10_0379.html index 53e12d93..ae373b19 100644 --- a/docs/cce/umn/cce_10_0379.html +++ b/docs/cce/umn/cce_10_0379.html @@ -2,9 +2,9 @@

                    Using an Existing OBS Bucket Through a Static PV

                    This section describes how to use an existing Object Storage Service (OBS) bucket to statically create PVs and PVCs and implement data persistence and sharing in workloads.

                    -

                    Prerequisites

                    • You have created a cluster and installed the CCE Container Storage (Everest) add-on in the cluster.
                    • If you want to create a cluster using commands, use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                    • You have created an OBS bucket. An OBS bucket of the parallel file system type can be selected only when it is in the same region as the cluster.
                    +

                    Prerequisites

                    -

                    Constraints

                    • If OBS volumes are used, the owner group and permission of the mount point cannot be modified.
                    • CCE allows parallel file systems to be mounted using OBS SDKs or PVCs. If PVC mounting is used, the obsfs tool provided by OBS must be used. An obsfs resident process is generated each time an object storage volume generated from the parallel file system is mounted to a node, as shown in the following figure.
                      Figure 1 obsfs resident process
                      +

                      Constraints

                      • If OBS volumes are used, the owner group and permission of the mount point cannot be modified.
                      • CCE allows parallel file systems to be mounted using OBS SDKs or PVCs. If PVC mounting is used, the obsfs tool provided by OBS must be used. An obsfs resident process is generated each time an object storage volume generated from the parallel file system is mounted to a node, as shown in the following figure.
                        Figure 1 obsfs resident process

                        Reserve 1 GiB of memory for each obsfs process. For example, for a node with 4 vCPUs and 8 GiB of memory, an obsfs parallel file system should be mounted to no more than eight pods.

                        • An obsfs resident process runs on a node. If the consumed memory exceeds the upper limit of the node, the node malfunctions. On a node with 4 vCPUs and 8 GiB of memory, if more than 100 pods are mounted to a parallel file system, the node will be unavailable. Control the number of pods mounted to a parallel file system on a single node.
                        @@ -12,67 +12,67 @@
                        • Multiple PVs can use the same OBS storage volume with the following restrictions:
                          • Do not mount all PVCs/PVs that use the same underlying object storage volume to a pod. This leads to a pod startup failure because not all PVCs can be mounted to the pod due to the same volumeHandle values of these PVs.
                          • The persistentVolumeReclaimPolicy parameter in the PVs must be set to Retain. Otherwise, when a PV is deleted, the associated underlying volume may be deleted. In this case, other PVs associated with the underlying volume malfunction.
                          • If underlying storage is repeatedly used, you are required to maintain data consistency. Enable isolation and protection for ReadWriteMany at the application layer and prevent multiple clients from writing the same file to prevent data overwriting and loss.
                      -

                      Using an Existing OBS Bucket on the Console

                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                      2. Statically create a PVC and PV.

                        1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters. -
                    Table 1 Node configuration parameters

                    Parameter

                    Description

                    Node Type

                    CCE standard cluster:
                    • ECS (VM): Containers run on ECSs.
                    +

                    Select a node type based on service requirements. Then, the available node flavors will be automatically displayed in the Specifications area for you to select.

                    +
                    CCE standard clusters support the following node types:
                    • ECS (VM): A virtualized ECS is used as a cluster node.
                    -
                    CCE Turbo cluster:
                    • ECS (VM): Containers run on ECSs. Only the ECSs that can be bound with multiple NICs are supported.
                    +
                    CCE Turbo clusters support the following node types:
                    • ECS (VM): A virtualized ECS is used as a cluster node. A CCE Turbo cluster supports only the cloud servers that allow multiple ENIs. Select a server type displayed on the CCE console.

                    Specifications

                    +

                    Select node specifications that best fit your service needs.

                    +

                    The available node flavors vary depending on AZs. Obtain the flavors displayed on the console.

                    +

                    Container Engine

                    CCE clusters support Docker and containerd in some scenarios.
                    • VPC network clusters of v1.23 and later versions support containerd. Tunnel network clusters of v1.23.2-r0 and later versions support containerd.
                    • For a CCE Turbo cluster, both Docker and containerd are supported. For details, see Mapping between Node OSs and Container Engines.
                    -
                    -

                    Specifications

                    -

                    Select node specifications that best fit your service needs.

                    -

                    The available node flavors vary depending on AZs. Obtain the flavors displayed on the console.

                    +

                    The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

                    OS

                    Select an OS type. Different types of nodes support different OSs.
                    • Public image: Select a public image for the node.
                    • Private image: Select a private image for the node.
                    -
                    NOTE:
                    • Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.
                    +
                    NOTE:

                    Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.

                    Name of the node. When nodes (ECSs) are created in batches, the value of this parameter is used as the name prefix for each ECS.

                    The system generates a default name for you, which can be modified.

                    -

                    A node name must start with a lowercase letter and cannot end with a hyphen (-). Only digits, lowercase letters, and hyphens (-) are allowed.

                    +

                    Enter 1 to 56 characters. Only lowercase letters, digits, hyphens (-), and periods (.) are allowed. The name must start with a lowercase letter and cannot end with a hyphen (-). Only lowercase letters or digits are allowed before and after periods (.).

                    Login Mode

                    @@ -68,7 +68,7 @@

                    Storage Settings

                    -
                    Configure storage resources on a node for the containers running on it. Set the disk size according to site requirements. +
                    Configure storage resources on a node for the containers running on it. Select a disk type and configure its size based on service requirements.
                    - @@ -142,22 +141,22 @@ - - - - + + + - - -
                    Table 2 Configuration parameters

                    Parameter

                    Description

                    @@ -78,24 +78,23 @@

                    System Disk

                    System disk used by the node OS. The value ranges from 40 GiB to 1024 GiB. The default value is 50 GiB.

                    -
                    Encryption: System disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. This function is available only in certain regions.
                    • Encryption is not selected by default.
                    • After selecting Encryption, you can select an existing key in the displayed dialog box. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the Encryption text box.
                    +
                    Encryption: System disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. This function is available only in certain regions.
                    • Encryption is not selected by default.
                    • After setting System Disk Encryption to Encryption, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the Encryption text box.

                    Data Disk

                    At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

                    -
                    • First data disk: used for container runtime and kubelet components. The value ranges from 20 GiB to 32,768 GiB. The default value is 100 GiB.
                    • Other data disks: You can set the data disk size to a value ranging from 10 GiB to 32,768 GiB. The default value is 100 GiB.
                    -
                    NOTE:

                    If the node flavor is disk-intensive or ultra-high I/O, one data disk can be a local disk.

                    -

                    Local disks may break down and do not ensure data reliability. Store your service data in EVS disks, which are more reliable than local disks.

                    +
                    • First data disk: used for container runtime and kubelet components. The value ranges from 20 GiB to 32768 GiB. The default value is 100 GiB.
                    • Other data disks: You can set the data disk size to a value ranging from 10 GiB to 32768 GiB. The default value is 100 GiB.
                    +
                    NOTE:
                    • If the node flavor is disk-intensive or ultra-high I/O, one data disk can be a local disk.
                    • Local disks may break down and do not ensure data reliability. Store your service data in EVS disks, which are more reliable than local disks.

                    Advanced Settings

                    -

                    Click Expand to configure the following parameters:

                    -
                    • Data Disk Space Allocation: After selecting Set Container Engine Space, you can specify the proportion of the space for the container engine, image, and temporary storage on the data disk. The container engine space is used to store the working directory, container image data, and image metadata for the container runtime. The remaining space of the data disk is used for pod configuration files, keys, and EmptyDir. For details about how to allocate data disk space, see Data Disk Space Allocation.
                    • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. This function is available only in certain regions.
                      • Encryption is not selected by default.
                      • After selecting Encryption, you can select an existing key in the displayed dialog box. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the Encryption text box.
                      +

                      Click Expand and configure the following parameters:

                      +
                      • Data Disk Space Allocation: allocates space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.
                      • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. This function is available only in certain regions.
                        • Encryption is not selected by default.
                        • After selecting Encryption, you can select an existing key in the displayed dialog box. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the Encryption text box.
                      -

                      Adding Multiple Data Disks

                      +

                      Adding data disks

                      A maximum of four data disks can be added. By default, raw disks are created without any processing. You can also click Expand and select any of the following options:

                      -
                      • Default: By default, a raw disk is created without any processing.
                      • Mount Disk: The data disk is attached to a specified directory.
                      • Use as PV: applicable to scenarios in which there is a high performance requirement on PVs. The node.kubernetes.io/local-storage-persistent label is added to the node with PV configured. The value is linear or striped.
                      • Use as ephemeral volume: applicable to scenarios in which there is a high performance requirement on EmptyDir.
                      +
                      • Default: By default, a raw disk is created without any processing.
                      • Mount Disk: The data disk is attached to a specified directory.
                      • Use as PV: applicable when there is a high performance requirement on PVs. The node.kubernetes.io/local-storage-persistent label is added to the node with PV configured. The value is linear or striped.
                      • Use as ephemeral volume: applicable when there is a high performance requirement on EmptyDir.
                      NOTE:
                      • Local PVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 2.1.23 or later. Version 2.1.23 or later is recommended.
                      • Local EVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 1.2.29 or later.
                      Local Persistent Volumes and Local EVs support the following write modes:
                      • Linear: A linear logical volume integrates one or more physical volumes. Data is written to the next physical volume when the previous one is used up.
                      • Striped: A striped logical volume stripes data into blocks of the same size and stores them in multiple physical volumes in sequence, allowing data to be concurrently read and written. A storage pool consisting of striped volumes cannot be scaled-out. This option can be selected only when multiple volumes exist.
                      @@ -114,7 +113,7 @@

                    Node Subnet

                    +

                    VPC/Node Subnet

                    The node subnet selected during cluster creation is used by default. You can choose another subnet instead.

                    Kubernetes Label

                    -

                    A key-value pair added to a Kubernetes object (such as a pod). A maximum of 20 labels can be added.

                    -

                    Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

                    -

                    Resource Tag

                    +

                    Resource Tag

                    You can add resource tags to classify resources.

                    -

                    You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

                    +

                    You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

                    CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

                    Kubernetes Label

                    +

                    A key-value pair added to a Kubernetes object (such as a pod). After specifying a label, click Add Label for more. A maximum of 20 labels can be added.

                    +

                    Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

                    +

                    Taint

                    This parameter is left blank by default. You can add taints to configure anti-affinity for the node. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
                    • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
                    • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
                    • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
                    +
                    This parameter is left blank by default. You can add taints to configure node anti-affinity. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
                    • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
                    • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
                    • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.

                    For details, see Managing Node Taints.

                    NOTE:

                    For a cluster of v1.19 or earlier, the workload may have been scheduled to a node before the taint is added. To avoid such a situation, select a cluster of v1.19 or later.

                    @@ -175,18 +174,18 @@

                    An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.

                    Anti-affinity: ECSs in an ECS group are deployed on different physical hosts to improve service reliability.

                    -

                    Select an existing ECS group, or click Add ECS Group to create one. After the ECS group is created, click the refresh button.

                    +

                    Select an existing ECS group, or click Add ECS Group to create one. After the ECS group is created, click the refresh icon.

                    Pre-installation Command

                    Enter commands. A maximum of 1000 characters are allowed.

                    +

                    Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

                    The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

                    Post-installation Command

                    Enter commands. A maximum of 1000 characters are allowed.

                    +

                    Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

                    The script will be executed after Kubernetes software is installed, which does not affect the installation.

                    NOTE:

                    Do not run the reboot command in the post-installation script to restart the system immediately. To restart the system, run the shutdown -r 1 command to restart with a delay of one minute.

                    @@ -202,7 +201,7 @@
                    -

                  4. Configure the number of nodes to be purchased. Then, click Next: Confirm. Confirm the configured parameters and specifications.
                  5. Click Submit.

                    The node list page is displayed. If the node status is Running, the node is created successfully. It takes about 6 to 10 minutes to create a node.

                    +

                  6. Configure the number of nodes to be created. Then, click Next: Confirm. Confirm the configured parameters and specifications.
                  7. Click Submit.

                    The node list page is displayed. If the node status is Running, the node is created successfully. It takes about 6 to 10 minutes to create a node.

                  8. Click Back to Node List. The node is created successfully if it changes to the Running state.
                  9. diff --git a/docs/cce/umn/cce_10_0365.html b/docs/cce/umn/cce_10_0365.html index 2512c4ea..c9f6d9b9 100644 --- a/docs/cce/umn/cce_10_0365.html +++ b/docs/cce/umn/cce_10_0365.html @@ -8,7 +8,7 @@ options ndots:5
                    Configuration Options
                    • nameserver: an IP address list of a name server that the resolver will query. If this parameter is set to 10.247.x.x, the resolver will query the kube-dns/CoreDNS. If this parameter is set to another IP address, the resolver will query a cloud or on-premises DNS server.
                    • search: a search list for host-name lookup. When a domain name cannot be resolved, DNS queries will be attempted combining the domain name with each domain in the search list in turn until a match is found or all domains in the search list are tried. For CCE clusters, the search list is currently limited to three domains per container. When a nonexistent domain name is being resolved, eight DNS queries will be initiated because each domain name (including those in the search list) will be queried twice, one for IPv4 and the other for IPv6.
                    • options: options that allow certain internal resolver variables to be modified. Common options include timeout and ndots.

                      The value ndots:5 means that if a domain name has fewer than 5 dots (.), DNS queries will be attempted by combining the domain name with each domain in the search list in turn. If no match is found after all the domains in the search list are tried, the domain name is then used for DNS query. If the domain name has 5 or more than 5 dots, it will be tried first for DNS query. In case that the domain name cannot be resolved, DNS queries will be attempted by combining the domain name with each domain in the search list in turn.

                      -

                      For example, the domain name www.***.com has only two dots (smaller than the value of ndots), and therefore the sequence of DNS queries is as follows: www.***.default.svc.cluster.local, www.***.com.svc.cluster.local, www.***.com.cluster.local, and www.***.com. This means that at least seven DNS queries will be initiated before the domain name is resolved into an IP address. It is clear that when many unnecessary DNS queries will be initiated to access an external domain name. There is room for improvement in workload's DNS configuration.

                      +

                      For example, the domain name www.***.com has only two dots (smaller than the value of ndots), and therefore the sequence of DNS queries is as follows: www.***.com.default.svc.cluster.local, www.***.com.svc.cluster.local, www.***.com.cluster.local, and www.***.com. This means that at least seven DNS queries will be initiated before the domain name is resolved into an IP address. It is clear that when many unnecessary DNS queries will be initiated to access an external domain name. There is room for improvement in workload's DNS configuration.

                    For more information about configuration options in the resolver configuration file used by Linux operating systems, visit http://man7.org/linux/man-pages/man5/resolv.conf.5.html.

                    diff --git a/docs/cce/umn/cce_10_0367.html b/docs/cce/umn/cce_10_0367.html index 59c06e9f..c61cef1a 100644 --- a/docs/cce/umn/cce_10_0367.html +++ b/docs/cce/umn/cce_10_0367.html @@ -8,12 +8,28 @@

                    Constraints

                    This feature is available only to clusters of v1.19 and later.

                    -

                    Customizing a SAN

                    1. Log in to the CCE console.
                    2. Click the target cluster in the cluster list to go to the cluster details page.
                    3. In the Connection Information area, click next to Custom SAN. In the dialog box displayed, enter the IP address or domain name and click Save.

                      1. This operation will restart kube-apiserver and update the kubeconfig.json file for a short period of time. Do not perform operations on the cluster during this period.

                      +

                      Customizing a SAN

                      1. Log in to the CCE console.
                      2. Click the target cluster in the cluster list to go to the cluster details page.
                      3. In the Connection Information area, click next to Custom SAN. In the dialog box displayed, enter the IP address or domain name and click Save.

                        1. This operation will restart kube-apiserver and update the kubeconfig.json file for a short period of time. Do not perform operations on the cluster during this period.

                        2. A maximum of 128 domain names or IP addresses, separated by commas (,), are allowed.

                        3. If a custom domain name needs to be bound to an EIP, ensure that an EIP has been configured.

                      +

                      Connecting to a Cluster Using the SAN

                      Using kubectl to access the cluster

                      +
                      1. Download the kubeconfig.json file again after the SAN is modified.

                        1. Log in to the CCE console and click the cluster name to access the cluster console.
                        2. On the Overview page, locate the Connection Info area, click Configure next to kubectl. On the page displayed, download the configuration file.
                        +

                      2. Configure kubectl.

                        1. Log in to your client and copy the kubeconfig.json file downloaded in 1.b to the /home directory on your client.
                        2. Configure the kubectl authentication file.
                          cd /home
                          +mkdir -p $HOME/.kube
                          +mv -f kubeconfig.json $HOME/.kube/config
                          +
                        3. Change the kubectl access mode and use the SAN to access the cluster.
                          kubectl config use-context customSAN-0
                          +

                          In the preceding command, customSAN-0 indicates the configuration name of the custom SAN. If multiple SANs are configured, the number in the configuration name of each SAN starts from 0 and increases in ascending order, for example, customSAN-0, customSAN-1, and so on.

                          +
                        +

                      +

                      Using an X.509 certificate to access the cluster

                      +
                      1. After the SAN is modified, download the X509 certificate again.

                        1. Log in to the CCE console and click the cluster name to access the cluster console.
                        2. On the Overview page, locate the Connection Info area, and click Download next to X.509 certificate.
                        3. In the Obtain Certificate dialog box displayed, select the certificate expiration time and download the X.509 certificate of the cluster as prompted.
                        +

                      2. Call native Kubernetes APIs using the cluster certificate.

                        For example, run the curl command to call the APIs to view the pod information. In the following information, example.com:5443 indicates the custom SAN.

                        +
                        curl --cacert ./ca.crt --cert ./client.crt --key ./client.key  https://example.com:5443/api/v1/namespaces/default/pods/
                        +

                        For more cluster APIs, see Kubernetes API.

                        +

                      +

                    EVS

                    Supported

                    +

                    √

                    Not supported

                    +

                    x

                    SFS

                    Not supported

                    +

                    x

                    Supported

                    +

                    √

                    OBS

                    Not supported

                    +

                    x

                    Supported

                    +

                    √

                    SFS Turbo

                    Not supported

                    +

                    x

                    Supported

                    +

                    √

                    Local PV

                    Supported

                    +

                    √

                    Not supported

                    +

                    x

                    Parameter

                    +

                    Using an Existing OBS Bucket on the Console

                    1. Log in to the CCE console and click the cluster name to access the cluster console.
                    2. Statically create a PVC and PV.

                      1. Choose Storage in the navigation pane and click the PVCs tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters. +
                        - - - - - - - - - - - - - - - - - - - - - @@ -81,36 +81,36 @@

                        a: The parameter is available when Creation Method is set to Use existing.

                        b: The parameter is available when Creation Method is set to Create new.

                        -
                      2. Click Create to create a PVC and a PV.

                        You can choose Storage in the navigation pane and view the created PVC and PV on the PersistentVolumeClaims (PVCs) and PersistentVolumes (PVs) tab pages, respectively.

                        +
                      3. Click Create to create a PVC and a PV.

                        You can choose Storage in the navigation pane and view the created PVC and PV on the PVCs and PVs tab pages, respectively.

                      4. Create an application.

                        1. In the navigation pane on the left, click Workloads. In the right pane, click the Deployments tab.
                        2. Click Create Workload in the upper right corner. On the displayed page, click Data Storage in the Container Settings area and click Add Volume to select PVC.
                          Mount and use storage volumes, as shown in Table 1. For details about other parameters, see Workloads. -
                      5. Parameter

                        Description

                        +

                        Description

                        PVC Type

                        +

                        PVC Type

                        In this example, select OBS.

                        +

                        In this example, select OBS.

                        PVC Name

                        +

                        PVC Name

                        Enter the PVC name, which must be unique in the same namespace.

                        +

                        Enter the PVC name, which must be unique in the same namespace.

                        Creation Method

                        +

                        Creation Method

                        • If underlying storage is available, create a storage volume or use an existing storage volume to statically create a PVC based on whether a PV has been created.
                        • If no underlying storage is available, select Dynamically provision. For details, see Using an OBS Bucket Through a Dynamic PV.
                        +
                        • If underlying storage is available, create a storage volume or use an existing storage volume to statically create a PVC based on whether a PV is available.
                        • If no underlying storage is available, select Dynamically provision. For details, see Using an OBS Bucket Through a Dynamic PV.

                        In this example, select Create new to create a PV and PVC at the same time on the console.

                        PVa

                        +

                        PVa

                        Select an existing PV volume in the cluster. Create a PV in advance. For details, see "Creating a storage volume" in Related Operations.

                        +

                        Select an existing PV volume in the cluster. Create a PV in advance. For details, see "Creating a storage volume" in Related Operations.

                        You do not need to specify this parameter in this example.

                        OBSb

                        +

                        OBSb

                        Click Select OBS. On the displayed page, select the OBS bucket that meets your requirements and click OK.

                        +

                        Click Select OBS. On the displayed page, select the OBS storage that meets your requirements and click OK.

                        PV Nameb

                        +

                        PV Nameb

                        Enter the PV name, which must be unique in the same cluster.

                        +

                        Enter the PV name, which must be unique in the same cluster.

                        Access Modeb

                        +

                        Access Modeb

                        OBS volumes support only ReadWriteMany, indicating that a storage volume can be mounted to multiple nodes in read/write mode. For details, see Volume Access Modes.

                        +

                        OBS volumes support only ReadWriteMany, indicating that a storage volume can be mounted to multiple nodes in read/write mode. For details, see Volume Access Modes.

                        Reclaim Policyb

                        +

                        Reclaim Policyb

                        You can select Delete or Retain to specify the reclaim policy of the underlying storage when the PVC is deleted. For details, see PV Reclaim Policy.

                        +

                        You can select Delete or Retain to specify the reclaim policy of the underlying storage when the PVC is deleted. For details, see PV Reclaim Policy.

                        NOTE:

                        If multiple PVs use the same OBS volume, use Retain to avoid cascading deletion of underlying volumes.

                        Access Key (AK/SK)b

                        +

                        Access Key (AK/SK)b

                        Custom: Customize a secret if you want to assign different user permissions to different OBS storage devices. For details, see Using a Custom Access Key (AK/SK) to Mount an OBS Volume.

                        +

                        Custom: Customize a secret if you want to assign different user permissions to different OBS storage devices. For details, see Using a Custom Access Key (AK/SK) to Mount an OBS Volume.

                        Only secrets with the secret.kubernetes.io/used-by = csi label can be selected. The secret type is cfe/secure-opaque. If no secret is available, click Create Secret to create one.
                        • Name: Enter a secret name.
                        • Namespace: Select the namespace where the secret is.
                        • Access Key (AK/SK): Upload a key file in .csv format. For details, see Obtaining an Access Key.

                        Mount Optionsb

                        +

                        Mount Optionsb

                        Enter the mounting parameter key-value pairs. For details, see Configuring OBS Mount Options.

                        +

                        Enter the mounting parameter key-value pairs. For details, see Configuring OBS Mount Options.

                        Table 1 Mounting a storage volume

                        Parameter

                        +
                        - - - - - - - - - @@ -119,11 +119,11 @@

                        In this example, the disk is mounted to the /data path of the container. The container data generated in this path is stored in the OBS volume.

                        -
                      6. After the configuration, click Create Workload.

                        After the workload is created, the data in the container mount directory will be persistently stored. Verify the storage by referring to PV Reclaim Policy.

                        +
                      7. After the configuration, click Create Workload.

                        After the workload is created, the data in the container mount directory will be persistently stored. Verify the storage by referring to Verifying Data Persistence and Sharing.

                      8. -

                        (kubectl) Using an Existing OBS Bucket

                        1. Use kubectl to connect to the cluster.
                        2. Create a PV.

                          1. Create the pv-obs.yaml file.
                            apiVersion: v1
                            +

                            (kubectl) Using an Existing OBS Bucket

                            1. Use kubectl to access the cluster.
                            2. Create a PV.

                              1. Create the pv-obs.yaml file.
                                apiVersion: v1
                                 kind: PersistentVolume
                                 metadata:
                                   annotations:
                                @@ -151,57 +151,57 @@ spec:
                                   storageClassName: csi-obs               # Storage class name.
                                   mountOptions: []                         # Mount options.
                                -
                        Table 1 Mounting a storage volume

                        Parameter

                        Description

                        +

                        Description

                        PVC

                        +

                        PVC

                        Select an existing object storage volume.

                        +

                        Select an existing object storage volume.

                        Mount Path

                        +

                        Mount Path

                        Enter a mount path, for example, /tmp.

                        +

                        Enter a mount path, for example, /tmp.

                        This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run. Otherwise, containers will be malfunctional. Mount the volume to an empty directory. If the directory is not empty, ensure that there are no files that affect container startup. Otherwise, the files will be replaced, causing container startup failures or workload creation failures.
                        NOTICE:

                        If a volume is mounted to a high-risk directory, use an account with minimum permissions to start the container. Otherwise, high-risk files on the host machine may be damaged.

                        Subpath

                        +

                        Subpath

                        Enter the subpath of the storage volume and mount a path in the storage volume to the container. In this way, different folders of the same storage volume can be used in a single pod. tmp, for example, indicates that data in the mount path of the container is stored in the tmp folder of the storage volume. If this parameter is left blank, the root path is used by default.

                        +

                        Enter the subpath of the storage volume and mount a path in the storage volume to the container. In this way, different folders of the same storage volume can be used in a single pod. tmp, for example, indicates that data in the mount path of the container is stored in the tmp folder of the storage volume. If this parameter is left blank, the root path is used by default.

                        Permission

                        +

                        Permission

                        • Read-only: You can only read the data in the mounted volumes.
                        • Read/Write: You can modify the data volumes mounted to the path. Newly written data will not be migrated if the container is migrated, which may cause data loss.
                        +
                        • Read-only: You can only read the data in the mounted volumes.
                        • Read/Write: You can modify the data volumes mounted to the path. Newly written data will not be migrated if the container is migrated, which may cause data loss.
                        Table 2 Key parameters

                        Parameter

                        +
                        - - - - - - - - - - - - - - - - - - - - - @@ -269,47 +269,47 @@ spec: storageClassName: csi-obs # Storage class name, which must be the same as that of the PV. volumeName: pv-obs # PV name. -
                        Table 2 Key parameters

                        Parameter

                        Mandatory

                        +

                        Mandatory

                        Description

                        everest.io/reclaim-policy: retain-volume-only

                        +

                        everest.io/reclaim-policy: retain-volume-only

                        No

                        +

                        No

                        Optional.

                        Currently, only retain-volume-only is supported.

                        This field is valid only when the Everest version is 1.2.9 or later and the reclaim policy is Delete. If the reclaim policy is Delete and the current value is retain-volume-only, the associated PV is deleted while the underlying storage volume is retained, when a PVC is deleted.

                        fsType

                        +

                        fsType

                        Yes

                        +

                        Yes

                        Instance type. The value can be obsfs or s3fs.

                        • obsfs: Parallel file system, which is mounted using obsfs (recommended).
                        • s3fs: Object bucket, which is mounted using s3fs.

                        volumeHandle

                        +

                        volumeHandle

                        Yes

                        +

                        Yes

                        OBS volume name.

                        everest.io/obs-volume-type

                        +

                        everest.io/obs-volume-type

                        Yes

                        +

                        Yes

                        OBS storage class.

                        • If fsType is set to s3fs, STANDARD (standard bucket) and WARM (infrequent access bucket) are supported.
                        • This parameter is invalid when fsType is set to obsfs.

                        everest.io/region

                        +

                        everest.io/region

                        Yes

                        +

                        Yes

                        Region where the OBS bucket is deployed.

                        -

                        For details about the value of region, see Regions and Endpoints.

                        +

                        For details about the value of region, see Regions and Endpoints.

                        nodePublishSecretRef

                        +

                        nodePublishSecretRef

                        No

                        +

                        No

                        Access key (AK/SK) used for mounting the object storage volume. You can use the AK/SK to create a secret and mount it to the PV. For details, see Using a Custom Access Key (AK/SK) to Mount an OBS Volume.

                        An example is as follows:
                        nodePublishSecretRef:
                        @@ -210,16 +210,16 @@ spec:
                         

                        mountOptions

                        +

                        mountOptions

                        No

                        +

                        No

                        Mount options. For details, see Configuring OBS Mount Options.

                        persistentVolumeReclaimPolicy

                        +

                        persistentVolumeReclaimPolicy

                        Yes

                        +

                        Yes

                        A reclaim policy is supported when the cluster version is or later than 1.19.10 and the Everest version is or later than 1.2.9.

                        The Delete and Retain reclaim policies are supported. For details, see PV Reclaim Policy. If multiple PVs use the same OBS volume, use Retain to avoid cascading deletion of underlying volumes.

                        @@ -228,17 +228,17 @@ spec:

                        Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV is in the Released status and cannot be bound to the PVC again.

                        storage

                        +

                        storage

                        Yes

                        +

                        Yes

                        Storage capacity, in Gi.

                        -

                        For OBS buckets, this field is used only for verification (cannot be empty or 0). Its value is fixed at 1, and any value you set does not take effect for OBS buckets.

                        +

                        For OBS, this field is used only for verification (cannot be empty or 0). Its value is fixed at 1, and any value you set does not take effect for OBS.

                        storageClassName

                        +

                        storageClassName

                        Yes

                        +

                        Yes

                        The storage class name of OBS volumes is csi-obs.

                        Table 3 Key parameters

                        Parameter

                        +
                        - - - - - - - - - - - @@ -339,13 +339,13 @@ spec: image: nginx:latest volumeMounts: - name: pvc-obs-volume #Volume name, which must be the same as the volume name in the volumes field. - mountPath: /data #Location where the storage volume is mounted. + mountPath: /data # Location where the storage volume is mounted. imagePullSecrets: - name: default-secret volumes: - - name: pvc-obs-volume #Volume name, which can be customized. + - name: pvc-obs-volume # Volume name, which can be customized. persistentVolumeClaim: - claimName: pvc-obs #Name of the created PVC. + claimName: pvc-obs # Name of the created PVC.
                      9. Run the following command to create a workload to which the OBS volume is mounted:
                        kubectl apply -f web-demo.yaml

                        After the workload is created, you can try Verifying Data Persistence and Sharing.

                      10. @@ -395,46 +395,46 @@ static

                        Related Operations

                        You can also perform the operations listed in Table 4. -
                        Table 3 Key parameters

                        Parameter

                        Mandatory

                        +

                        Mandatory

                        Description

                        csi.storage.k8s.io/node-publish-secret-name

                        +

                        csi.storage.k8s.io/node-publish-secret-name

                        No

                        +

                        No

                        Name of the custom secret specified in the PV.

                        csi.storage.k8s.io/node-publish-secret-namespace

                        +

                        csi.storage.k8s.io/node-publish-secret-namespace

                        No

                        +

                        No

                        Namespace of the custom secret specified in the PV.

                        storage

                        +

                        storage

                        Yes

                        +

                        Yes

                        Requested capacity in the PVC, in Gi.

                        For OBS, this field is used only for verification (cannot be empty or 0). Its value is fixed at 1, and any value you set does not take effect for OBS.

                        storageClassName

                        +

                        storageClassName

                        Yes

                        +

                        Yes

                        Storage class name, which must be the same as the storage class of the PV in 1.

                        The storage class name of OBS volumes is csi-obs.

                        volumeName

                        +

                        volumeName

                        Yes

                        +

                        Yes

                        PV name, which must be the same as the PV name in 1.

                        Table 4 Related operations

                        Operation

                        +
                        - - - - - - - - - - - - - - diff --git a/docs/cce/umn/cce_10_0380.html b/docs/cce/umn/cce_10_0380.html index c3e71cfa..8ccc329b 100644 --- a/docs/cce/umn/cce_10_0380.html +++ b/docs/cce/umn/cce_10_0380.html @@ -3,7 +3,7 @@

                        StorageClass

                        Introduction

                        StorageClass describes the classification of storage types in a cluster and can be represented as a configuration template for creating PVs. When creating a PVC or PV, specify StorageClass.

                        As a user, you only need to specify storageClassName when defining a PVC to automatically create a PV and underlying storage, significantly reducing the workload of creating and maintaining a PV.

                        -
                        In addition to the default storage classes provided by CCE, you can also customize storage classes. +

                        CCE Default Storage Classes

                        As of now, CCE provides storage classes such as csi-disk, csi-nas, and csi-obs by default. When defining a PVC, you can use a storageClassName to automatically create a PV of the corresponding type and automatically create underlying storage resources.

                        @@ -22,50 +22,210 @@ metadata: provisioner: everest-csi-provisioner parameters: csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io - csi.storage.k8s.io/fstype: ext4 +csi.storage.k8s.io/fstype: ext4 # (Optional) Set the file system type to xfs or ext4. If it is left blank, ext4 is used by default. everest.io/disk-volume-type: SAS everest.io/passthrough: 'true' - csi.storage.k8s.io/fstype: xfs # (Optional) Set the file system type to xfs. If it is left blank, ext4 is used by default. reclaimPolicy: Delete allowVolumeExpansion: true volumeBindingMode: Immediate -
                        Table 4 Related operations

                        Operation

                        Description

                        +

                        Description

                        Procedure

                        +

                        Procedure

                        Creating a storage volume (PV)

                        +

                        Creating a storage volume (PV)

                        Create a PV on the CCE console.

                        +

                        Create a PV on the CCE console.

                        1. Choose Storage in the navigation pane and click the PersistentVolumes (PVs) tab. Click Create Volume in the upper right corner. In the dialog box displayed, configure the parameters.
                          • Volume Type: Select OBS.
                          • OBS: Click Select OBS. On the displayed page, select the OBS storage that meets your requirements and click OK.
                          • PV Name: Enter the PV name, which must be unique in the same cluster.
                          • Access Mode: SFS volumes support only ReadWriteMany, indicating that a storage volume can be mounted to multiple nodes in read/write mode. For details, see Volume Access Modes.
                          • Reclaim Policy: Delete or Retain. For details, see PV Reclaim Policy.
                            NOTE:

                            If multiple PVs use the same underlying storage volume, select Retain to avoid deleting the underlying volumes.

                            +
                        1. Choose Storage in the navigation pane and click the PVs tab. Click Create PersistentVolume in the upper right corner. In the dialog box displayed, configure parameters.
                          • Volume Type: Select OBS.
                          • OBS: Click Select OBS. On the displayed page, select the OBS storage that meets your requirements and click OK.
                          • PV Name: Enter the PV name, which must be unique in the same cluster.
                          • Access Mode: SFS volumes support only ReadWriteMany, indicating that a storage volume can be mounted to multiple nodes in read/write mode. For details, see Volume Access Modes.
                          • Reclaim Policy: Delete or Retain is supported. For details, see PV Reclaim Policy.
                            NOTE:

                            If multiple PVs use the same underlying storage volume, use Retain to avoid cascading deletion of underlying volumes.

                            -
                          • Custom: Customize a secret if you want to assign different user permissions to different OBS storage devices. For details, see Using a Custom Access Key (AK/SK) to Mount an OBS Volume.

                            Only secrets with the secret.kubernetes.io/used-by = csi label can be selected. The secret type is cfe/secure-opaque. If no secret is available, click Create Secret to create one.

                            +
                          • Access Key (AK/SK): Customize a secret if you want to assign different user permissions to different OBS storage devices. For details, see Using a Custom Access Key (AK/SK) to Mount an OBS Volume.

                            Only secrets with the secret.kubernetes.io/used-by = csi label can be selected. The secret type is cfe/secure-opaque. If no secret is available, click Create Secret to create one.

                          • Mount Options: Enter the mounting parameter key-value pairs. For details, see Configuring OBS Mount Options.
                        2. Click Create.

                        Updating an access key

                        +

                        Updating an access key

                        Update the access key of object storage on the CCE console.

                        +

                        Update the access key of object storage on the CCE console.

                        1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click More in the Operation column of the target PVC and select Update Access Key.
                        2. Upload a key file in .csv format. For details, see Obtaining an Access Key. Click OK.
                          NOTE:

                          After a global access key is updated, all pods mounted with the object storage that uses this access key can be accessed only after being restarted.

                          +
                        1. Choose Storage in the navigation pane and click the PVCs tab. Click More in the Operation column of the target PVC and select Update Access Key.
                        2. Upload a key file in .csv format. For details, see Obtaining an Access Key. Click OK.
                          NOTE:

                          After a global access key is updated, all pods mounted with the object storage that uses this access key can be accessed only after being restarted.

                        Viewing events

                        +

                        Viewing events

                        You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time of the PVC or PV.

                        +

                        You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time of the PVC or PV.

                        1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                        2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).
                        +
                        1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                        2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).

                        Viewing a YAML file

                        +

                        Viewing a YAML file

                        You can view, copy, and download the YAML files of a PVC or PV.

                        +

                        You can view, copy, and download the YAML files of a PVC or PV.

                        1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                        2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                        +
                        1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                        2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                        - - - - - @@ -219,8 +218,8 @@ preemptionPolicy: PreemptLowerPriority value: -99999 EOF -

                      11. Deploy online and offline jobs and configure priorityClasses for these jobs.

                        The volcano.sh/qos-level label needs to be added to annotation to distinguish offline jobs. The value is an integer ranging from -7 to 7. If the value is less than 0, the job is an offline job. If the value is greater than or equal to 0, the job is a high-priority job, that is, online job. You do not need to set this label for online jobs. For both online and offline jobs, set schedulerName to volcano to enable the Volcano scheduler.

                        -

                        The priorities of online/online and offline/offline jobs are not differentiated, and the value validity is not verified. If the value of volcano.sh/qos-level of an offline job is not a negative integer ranging from -7 to 0, the job is processed as an online job.

                        +

                      12. Deploy online and offline jobs and configure priorityClasses for these jobs.

                        The volcano.sh/qos-level label needs to be added to annotation to distinguish offline jobs. The value is an integer ranging from -7 to 7. If the value is less than 0, the job is an offline job. If the value is greater than or equal to 0, the job is a high-priority job, that is, online job. You do not need to set this label for online jobs. For both online and offline jobs, set schedulerName to volcano to enable Volcano Scheduler.

                        +

                        The priorities of online/online and offline/offline jobs are not differentiated, and the value validity is not verified. If the value of volcano.sh/qos-level of an offline job is not a negative integer ranging from -7 to 0, the job is processed as an online job.

                        For an offline job:

                        kind: Deployment
                        @@ -233,7 +232,7 @@ spec:
                                 metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
                                 volcano.sh/qos-level: "-1"       # Offline job label
                             spec:
                        -      schedulerName: volcano             # The Volcano scheduler is used.
                        +      schedulerName: volcano             # Volcano is used.
                               priorityClassName: testing         # Configure the testing priorityClass.
                               ...

                        For an online job:

                        @@ -246,7 +245,7 @@ spec: annotations: metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]' spec: - schedulerName: volcano # The Volcano scheduler is used. + schedulerName: volcano # Volcano is used. priorityClassName: production # Configure the production priorityClass. ...

                      13. Run the following command to check the number of oversubscribed resources and the resource usage:

                        kubectl describe node <nodeIP>

                        @@ -267,9 +266,10 @@ Allocated resources: -------- -------- ------ cpu 4950m (126%) 4950m (126%) memory 1712Mi (27%) 1712Mi (27%) +

                        In the preceding command, CPU and memory are in the unit of mCPUs and MiB, respectively.

                      14. Deployment Example

                        The following uses an example to describe how to deploy online and offline jobs in hybrid mode.

                        -
                        1. Assume that a cluster has two nodes: one oversubscribed node and one non-oversubscribed node.

                          # kubectl get node
                          +
                          1. Configure a cluster with two nodes, one oversubscribed and the other non-oversubscribed.

                            # kubectl get node
                             NAME           STATUS   ROLES    AGE    VERSION
                             192.168.0.173   Ready    <none>   4h58m   v1.19.16-r2-CCE22.5.1
                             192.168.0.3     Ready    <none>   148m    v1.19.16-r2-CCE22.5.1
                            @@ -297,7 +297,7 @@ spec: annotations: volcano.sh/qos-level: "-1" # Offline job label spec: - schedulerName: volcano # The Volcano scheduler is used. + schedulerName: volcano # Volcano is used. priorityClassName: testing # Configure the testing priorityClass. containers: - name: container-1 @@ -333,7 +333,7 @@ spec: labels: app: online spec: - schedulerName: volcano # The Volcano scheduler is used. + schedulerName: volcano # Volcano is used. priorityClassName: production # Configure the production priorityClass. containers: - name: container-1 @@ -378,7 +378,7 @@ spec: operator: In values: - 192.168.0.173 - schedulerName: volcano # The Volcano scheduler is used. + schedulerName: volcano # Volcano is used. priorityClassName: production # Configure the production priorityClass. containers: - name: container-1 @@ -401,7 +401,7 @@ offline-69cdd49bf4-z8kxh 1/1 Running 0 13m 192.168.10.131 192. online-6f44bb68bd-b8z9p 1/1 Running 0 3m4s 192.168.10.18 192.168.0.173 online-6f44bb68bd-g6xk8 1/1 Running 0 3m12s 192.168.10.69 192.168.0.173
                        -
                        Observe the oversubscribed node (192.168.0.173). You can find that oversubscribed resources exist and the CPU allocation rate exceeds 100%.
                        # kubectl describe node 192.168.0.173
                        +
                        Check the oversubscribed node with IP address 192.168.0.173. It is found that resources are oversubscribed, where there are 2343 mCPUs and 3073653200 MiB of memory. Additionally, the CPU allocation rate exceeded 100%.
                        # kubectl describe node 192.168.0.173
                         Name:              192.168.0.173
                         Roles:              <none>
                         Labels:              …
                        @@ -429,7 +429,7 @@ online-6f44bb68bd-g6xk8   1/1     Running   0       24m   192.168.10.69  192.168
                         

                        -

                        Handling Suggestions

                        • After kubelet of the oversubscribed node is restarted, the resource view of the Volcano scheduler is not synchronized with that of kubelet. As a result, OutOfCPU occurs in some newly scheduled jobs, which is normal. After a period of time, the Volcano scheduler can properly schedule online and offline jobs.
                        • After online and offline jobs are submitted, you are not advised to dynamically change the job type (adding or deleting annotation volcano.sh/qos-level: "-1") because the current kernel does not support the change of an offline job to an online job.
                        • CCE collects the resource usage (CPU/memory) of all pods running on a node based on the status information in the cgroups system. The resource usage may be different from the monitored resource usage, for example, the resource statistics displayed by running the top command.
                        • You can add oversubscribed resources (such as CPU and memory) at any time.

                          You can reduce the oversubscribed resource types only when the resource allocation rate does not exceed 100%.

                          +

                          Handling Suggestions

                          • After kubelet of the oversubscribed node is restarted, the resource view of Volcano Scheduler is not synchronized with that of kubelet. As a result, OutOfCPU occurs in some newly scheduled jobs, which is normal. After a period of time, Volcano Scheduler can properly schedule online and offline jobs.
                          • After online and offline jobs are submitted, you are not advised to dynamically change the job type (adding or deleting annotation volcano.sh/qos-level: "-1") because the current kernel does not support the change of an offline job to an online job.
                          • CCE collects the resource usage (CPU/memory) of all pods running on a node based on the status information in the cgroups system. The resource usage may be different from the monitored resource usage, for example, the resource statistics displayed by running the top command.
                          • You can add oversubscribed resources (such as CPU and memory) at any time.

                            You can reduce the oversubscribed resource types only when the resource allocation rate does not exceed 100%.

                          • If an offline job is deployed on a node ahead of an online job and the online job cannot be scheduled due to insufficient resources, configure a higher priorityClass for the online job than that for the offline job.
                          • If there are only online jobs on a node and the eviction threshold is reached, the offline jobs that are scheduled to the current node will be evicted soon. This is normal.
                        diff --git a/docs/cce/umn/cce_10_0385.html b/docs/cce/umn/cce_10_0385.html index 1510c0d1..d462816a 100644 --- a/docs/cce/umn/cce_10_0385.html +++ b/docs/cce/umn/cce_10_0385.html @@ -1,8 +1,8 @@ -

                        Using Annotations to Configure Load Balancing

                        +

                        Using Annotations to Balance Load

                        Parameter

                        +
                        - - - - - - - - - - - - - + + +
                        Table 1 Key parameters

                        Parameter

                        Description

                        +

                        Description

                        provisioner

                        +

                        provisioner

                        Specifies the storage resource provider, which is the Everest add-on for CCE. Set this parameter to everest-csi-provisioner.

                        +

                        Specifies the storage resource provider, which is the Everest add-on for CCE. Set this parameter to everest-csi-provisioner.

                        parameters

                        +

                        parameters

                        Specifies the storage parameters, which vary with storage types.

                        +

                        Specifies the storage parameters, which vary with storage types. For details, see Table 2.

                        reclaimPolicy

                        +

                        reclaimPolicy

                        Specifies the value of persistentVolumeReclaimPolicy for creating a PV. The value can be Delete or Retain. If reclaimPolicy is not specified when a StorageClass object is created, the value defaults to Delete.

                        +

                        Specifies the value of persistentVolumeReclaimPolicy for creating a PV. The value can be Delete or Retain. If reclaimPolicy is not specified when a StorageClass object is created, the value defaults to Delete.

                        • Delete: indicates that a dynamically created PV will be automatically destroyed.
                        • Retain: indicates that a dynamically created PV will not be automatically destroyed.

                        allowVolumeExpansion

                        +

                        allowVolumeExpansion

                        Specifies whether the PV of this storage class supports dynamic capacity expansion. The default value is false. Dynamic capacity expansion is implemented by the underlying storage add-on. This is only a switch.

                        +

                        Specifies whether the PV of this storage class supports dynamic capacity expansion. The default value is false. Dynamic capacity expansion is implemented by the underlying storage add-on. This is only a switch.

                        volumeBindingMode

                        +

                        volumeBindingMode

                        Specifies the volume binding mode, that is, the time when a PV is dynamically created. The value can be Immediate or WaitForFirstConsumer.

                        +

                        Specifies the volume binding mode, that is, the time when a PV is dynamically created. The value can be Immediate or WaitForFirstConsumer.

                        • Immediate: PV binding and dynamic creation are completed when a PVC is created.
                        • WaitForFirstConsumer: PV binding and creation are delayed. The PV creation and binding processes are executed only when the PVC is used in the workload.

                        mountOptions

                        +

                        mountOptions

                        This field must be supported by the underlying storage. If this field is not supported but is specified, the PV creation will fail.

                        +

                        This field must be supported by the underlying storage. If this field is not supported but is specified, the PV creation will fail.

                        +
                        +
                        + +
                        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -116,7 +276,7 @@ spec: - @@ -125,17 +285,16 @@ spec:

                        Custom Storage Class

                        This section uses the custom storage class of EVS disks as an example to describe how to define SAS EVS disk and SSD EVS disk as a storage class, respectively. For example, if you define a storage class named csi-disk-sas, which is used to create SAS storage, the differences are shown in the following figure. When compiling a YAML file, you only need to specify storageClassName.

                        -

                        +

                        • You can customize a high I/O storage class in a YAML file. For example, the name csi-disk-sas indicates that the disk type is SAS (high I/O).
                          apiVersion: storage.k8s.io/v1
                           kind: StorageClass
                           metadata:
                             name: csi-disk-sas                          # Name of the high I/O storage class, which can be customized.
                           parameters:
                             csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
                          -  csi.storage.k8s.io/fstype: ext4
                          +csi.storage.k8s.io/fstype: ext4    # (Optional) Set the file system type to xfs or ext4. If it is left blank, ext4 is used by default.
                              everest.io/disk-volume-type: SAS            # High I/O EVS disk type, which cannot be customized.
                             everest.io/passthrough: "true"
                          -  csi.storage.k8s.io/fstype: xfs    # (Optional) Set the file system type to xfs. If it is left blank, ext4 is used by default.
                           provisioner: everest-csi-provisioner
                           reclaimPolicy: Delete
                           volumeBindingMode: Immediate
                          @@ -146,10 +305,9 @@ metadata:
                             name: csi-disk-ssd                       # Name of the ultra-high I/O storage class, which can be customized.
                           parameters:
                             csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
                          -  csi.storage.k8s.io/fstype: ext4
                          +csi.storage.k8s.io/fstype: ext4    # (Optional) Set the file system type to xfs or ext4. If it is left blank, ext4 is used by default.
                             everest.io/disk-volume-type: SSD         # Ultra-high I/O EVS disk type, which cannot be customized.
                             everest.io/passthrough: "true"
                          -  csi.storage.k8s.io/fstype: xfs    # (Optional) Set the file system type to xfs. If it is left blank, ext4 is used by default.
                           provisioner: everest-csi-provisioner
                           reclaimPolicy: Delete
                           volumeBindingMode: Immediate
                          @@ -174,7 +332,7 @@ csi-nas             everest-csi-provisioner         17d
                           csi-obs             everest-csi-provisioner         17d
                           csi-sfsturbo        everest-csi-provisioner         17d
                        -

                        Specifying a Default StorageClass

                        You can specify a storage class as the default class. In this way, if you do not specify storageClassName when creating a PVC, the PVC is created using the default storage class.

                        +

                        Specifying a Default Storage Class

                        You can specify a storage class as the default class. In this way, if you do not specify storageClassName when creating a PVC, the PVC is created using the default storage class.

                        For example, to specify csi-disk-ssd as the default storage class, edit your YAML file as follows:

                        apiVersion: storage.k8s.io/v1
                         kind: StorageClass
                        diff --git a/docs/cce/umn/cce_10_0381.html b/docs/cce/umn/cce_10_0381.html
                        index a6f67bdd..721103f2 100644
                        --- a/docs/cce/umn/cce_10_0381.html
                        +++ b/docs/cce/umn/cce_10_0381.html
                        @@ -3,7 +3,7 @@
                         

                        Snapshots and Backups

                        CCE works with EVS to support snapshots. A snapshot is a complete copy or image of EVS disk data at a certain point of time, which can be used for data DR.

                        You can create snapshots to rapidly save the disk data at a certain point of time. In addition, you can use snapshots to create disks so that the created disks will contain the snapshot data in the beginning.

                        -

                        Precautions

                        • The snapshot function is available only for clusters of v1.15 or later and requires the CSI-based Everest add-on.
                        • The subtype (common I/O, high I/O, or ultra-high I/O), disk mode (SCSI or VBD), data encryption, sharing status, and capacity of an EVS disk created from a snapshot must be the same as those of the disk associated with the snapshot. These attributes cannot be modified after being queried or set.
                        • Snapshots can be created only for EVS disks that are available or in use, and a maximum of seven snapshots can be created for a single EVS disk.
                        • Snapshots can be created only for PVCs created using the storage class (whose name starts with csi) provided by the Everest add-on. Snapshots cannot be created for PVCs created using the Flexvolume storage class whose name is ssd, sas, or sata.
                        • Snapshot data of encrypted disks is stored encrypted, and that of non-encrypted disks is stored non-encrypted.
                        • A PVC with the xfs file system type can generate snapshots. The file system type of the disk associated with the PVC created using one of these snapshots remains xfs.
                        +

                        Precautions

                        • The snapshot function is available only for clusters of v1.15 or later and requires the CSI-based Everest add-on.
                        • The subtype (common I/O, high I/O, or ultra-high I/O), disk mode (SCSI or VBD), data encryption, sharing status, and capacity of an EVS disk created from a snapshot must be the same as those of the disk associated with the snapshot. These attributes cannot be modified after being queried or set.
                        • Snapshots can be created only for EVS disks that are available or in use, and a maximum of seven snapshots can be created for a single EVS disk.
                        • Snapshots can be created only for PVCs created using the storage class (whose name starts with csi) provided by the Everest add-on. Snapshots cannot be created for PVCs created using the Flexvolume storage class whose name is ssd, sas, or sata.
                        • Snapshot data of encrypted disks is stored encrypted, and that of non-encrypted disks is stored non-encrypted.
                        • A PVC of the xfs file system type can generate snapshots. The file system of the disk associated with the PVC created using these snapshots remains xfs.

                        Application Scenarios

                        The snapshot feature helps address your following needs:

                        • Routine data backup

                          You can create snapshots for EVS disks regularly and use snapshots to recover your data in case that data loss or data inconsistency occurred due to misoperations, viruses, or attacks.

                          @@ -45,8 +45,8 @@ metadata: namespace: default annotations: everest.io/disk-volume-type: SSD # EVS disk type, which must be the same as that of the snapshot's source EVS disk. - everest.io/disk-volume-tags: '{"key1":"value1","key2":"value2"}' # (Optional) Custom resource tags - csi.storage.k8s.io/fstype: xfs # (Optional) Configure this field when the snapshot file system type is xfs. + everest.io/disk-volume-tags: '{"key1":"value1","key2":"value2"}' # (Optional) Custom resource tags + csi.storage.k8s.io/fstype: xfs # (Optional) Configure this field when the snapshot file system type is xfs. labels: failure-domain.beta.kubernetes.io/region: <your_region> # Replace the region with the one where the EVS disk is located. failure-domain.beta.kubernetes.io/zone: <your_zone> # Replace the AZ with the one where the EVS disk is located. diff --git a/docs/cce/umn/cce_10_0384.html b/docs/cce/umn/cce_10_0384.html index 4bc10db0..e145f32f 100644 --- a/docs/cce/umn/cce_10_0384.html +++ b/docs/cce/umn/cce_10_0384.html @@ -4,7 +4,7 @@

                          Many services see surges in traffic. To ensure performance and stability, resources are often requested at the maximum needed. However, the surges may ebb very shortly and resources, if not released, are wasted in non-peak hours. Especially for online jobs that request a large quantity of resources to ensure SLA, resource utilization can be as low as it gets.

                          Resource oversubscription is the process of making use of idle requested resources. Oversubscribed resources are suitable for deploying offline jobs, which focus on throughput but have low SLA requirements and can tolerate certain failures.

                          Hybrid deployment of online and offline jobs in a cluster can better utilize cluster resources.

                          -
                          Figure 1 Resource oversubscription
                          +
                          Figure 1 Resource oversubscription

                          Features

                          After dynamic resource oversubscription and elastic scaling are enabled in a node pool, oversubscribed resources change rapidly because the resource usage of high-priority applications changes in real time. To prevent frequent node scale-ins and scale-outs, do not consider oversubscribed resources when evaluating node scale-ins.

                          Hybrid deployment is supported, and CPU and memory resources can be oversubscribed. The key features are as follows:

                          @@ -13,9 +13,9 @@
                          • Online jobs can use only non-oversubscribed resources if scheduled to an oversubscribed node.

                            Offline jobs can use both oversubscribed and non-oversubscribed resources of an oversubscribed node.

                          • In the same scheduling period, online jobs take precedence over offline jobs.

                            If both online and offline jobs exist, online jobs are scheduled first. When the node resource usage exceeds the upper limit and the node requests exceed 100%, offline jobs will be evicted.

                            -
                          • CPU/memory isolation is provided by kernels.

                            CPU isolation: Online jobs can quickly preempt CPU resources of offline jobs and suppress the CPU usage of the offline jobs.

                            +
                          • CPU/Memory isolation is provided by kernels.

                            CPU isolation: Online jobs can quickly preempt CPU resources of offline jobs and suppress the CPU usage of the offline jobs.

                            Memory isolation: When system memory resources are used up and OOM Kill is triggered, the kernel evicts offline jobs first.

                            -
                          • kubelet offline jobs admission rules:

                            After the the pod is scheduled to a node, kubelet starts the pod only when the node resources can meet the pod request (predicateAdmitHandler.Admit). kubelet starts the pod when both of the following conditions are met:

                            +
                          • kubelet offline jobs admission rules:

                            After the pod is scheduled to a node, kubelet starts the pod only when the node resources can meet the pod request (predicateAdmitHandler.Admit). kubelet starts the pod when both of the following conditions are met:

                            • The total request of pods to be started and online running jobs < allocatable nodes
                            • The total request of pods to be started and online/offline running job < allocatable nodes+oversubscribed nodes
                          • Resource oversubscription and hybrid deployment:

                            If only hybrid deployment is used, configure the label volcano.sh/colocation=true for the node and delete the node label volcano.sh/oversubscription or set its value to false.

                            If the label volcano.sh/colocation=true is configured for a node, hybrid deployment is enabled. If the label volcano.sh/oversubscription=true is configured, resource oversubscription is enabled. The following table lists the available feature combinations after hybrid deployment or resource oversubscription is enabled. @@ -23,9 +23,9 @@
                        - - @@ -44,7 +44,7 @@ - - - @@ -74,7 +74,7 @@

                        kubelet Oversubscription

                        Specifications
                        • Cluster version
                          • v1.19: v1.19.16-r4 or later
                          • v1.21: v1.21.7-r0 or later
                          • v1.23: v1.23.5-r0 or later
                          • v1.25 or later
                        • Cluster type: CCE Standard or CCE Turbo
                        • Node OS: EulerOS 2.9 (kernel-4.18.0-147.5.1.6.h729.6.eulerosv2r9.x86_64)or HCE OS 2.0
                        • Node type: ECS
                        • Volcano version: 1.7.0 or later
                        -
                        Constraints
                        • Before enabling oversubscription, ensure that the overcommit add-on is not enabled on Volcano.
                        • Modifying the label of an oversubscribed node does not affect the running pods.
                        • Running pods cannot be converted between online and offline services. To convert services, you need to rebuild pods.
                        • If the label volcano.sh/oversubscription=true is configured for a node in the cluster, the oversubscription configuration must be added to the Volcano add-on. Otherwise, the scheduling of oversold nodes will be abnormal. Ensure that you have correctly configure labels because the scheduler does not check the add-on and node configurations. For details about the labels, see Table 1.
                        • To disable oversubscription, perform the following operations:
                          • Remove the volcano.sh/oversubscription label from the oversubscribed node.
                          • Set over-subscription-resource to false.
                          • Modify the configmap of the Volcano scheduler named volcano-scheduler-configmap and remove the oversubscription add-on.
                          +
                          Constraints
                          • Before enabling oversubscription, ensure that the overcommit add-on is not enabled on Volcano.
                          • Modifying the label of an oversubscribed node does not affect the running pods.
                          • Running pods cannot be converted between online and offline services. To convert services, you need to rebuild pods.
                          • If the label volcano.sh/oversubscription=true is configured for a node in the cluster, the oversubscription configuration must be added to the Volcano add-on. Otherwise, the scheduling of oversold nodes will be abnormal. Ensure that you have correctly configure labels because the scheduler does not check the add-on and node configurations. For details, see Table 1.
                          • To disable oversubscription, perform the following operations:
                            • Remove the volcano.sh/oversubscription label from the oversubscribed node.
                            • Set over-subscription-resource to false.
                            • Modify the configmap of Volcano Scheduler named volcano-scheduler-configmap and remove the oversubscription add-on.
                          • If cpu-manager-policy is set to static core binding on a node, do not assign the QoS class of Guaranteed to offline pods. If core binding is required, change the pods to online pods. Otherwise, offline pods may occupy the CPUs of online pods, causing online pod startup failures, and offline pods fail to be started although they are successfully scheduled.
                          • If cpu-manager-policy is set to static core binding on a node, do not bind cores to all online pods. Otherwise, online pods occupy all CPU or memory resources, leaving a small number of oversubscribed resources.
                        @@ -121,11 +121,11 @@
                        Table 2 Parameters

                        Volume Type

                        +

                        Parameter

                        +

                        Mandatory

                        +

                        Description

                        +

                        EVS

                        +

                        csi.storage.k8s.io/csi-driver-name

                        +

                        Yes

                        +

                        Driver type. If an EVS disk is used, the parameter value is fixed at disk.csi.everest.io.

                        +

                        csi.storage.k8s.io/fstype

                        +

                        Yes

                        +

                        If an EVS disk is used, the parameter value can be ext4 or xfs.

                        +
                        The restrictions on using xfs are as follows:
                        • The nodes must run CentOS 7 or Ubuntu 22.04, and the Everest version in the cluster must be 2.3.2 or later.
                        • Only common containers are supported.
                        +
                        +

                        everest.io/disk-volume-type

                        +

                        Yes

                        +
                        EVS disk type. All letters are in uppercase.
                        • SATA: common I/O
                        • SAS: high I/O
                        • SSD: ultra-high I/O
                        +
                        +

                        everest.io/passthrough

                        +

                        Yes

                        +

                        The parameter value is fixed at true, which indicates that the EVS device type is SCSI. Other parameter values are not allowed.

                        +

                        SFS

                        +

                        csi.storage.k8s.io/csi-driver-name

                        +

                        Yes

                        +

                        Driver type. If SFS is used, the parameter value is fixed at nas.csi.everest.io.

                        +

                        csi.storage.k8s.io/fstype

                        +

                        Yes

                        +

                        If SFS is used, the value can be nfs.

                        +

                        everest.io/share-access-level

                        +

                        Yes

                        +

                        The parameter value is fixed at rw, indicating that the SFS data is readable and writable.

                        +

                        everest.io/share-access-to

                        +

                        Yes

                        +

                        VPC ID of the cluster.

                        +

                        everest.io/share-is-public

                        +

                        No

                        +

                        The parameter value is fixed at false, indicating that the file is shared to private.

                        +

                        You do not need to configure this parameter when SFS 3.0 is used.

                        +

                        everest.io/sfs-version

                        +

                        No

                        +

                        This parameter is mandatory only when SFS 3.0 is used. The value is fixed at sfs3.0.

                        +

                        SFS Turbo

                        +

                        csi.storage.k8s.io/csi-driver-name

                        +

                        Yes

                        +

                        Driver type. If SFS Turbo is used, the parameter value is fixed at sfsturbo.csi.everest.io.

                        +

                        csi.storage.k8s.io/fstype

                        +

                        Yes

                        +

                        If SFS Turbo is used, the value can be nfs.

                        +

                        everest.io/share-access-to

                        +

                        Yes

                        +

                        VPC ID of the cluster.

                        +

                        everest.io/share-expand-type

                        +

                        No

                        +

                        Extension type. The default value is bandwidth, indicating an enhanced file system. This parameter does not take effect.

                        +

                        everest.io/share-source

                        +

                        Yes

                        +

                        The parameter value is fixed at sfs-turbo.

                        +

                        everest.io/share-volume-type

                        +

                        No

                        +

                        SFS Turbo storage class. The default value is STANDARD, indicating standard and standard enhanced editions. This parameter does not take effect.

                        +

                        OBS

                        +

                        csi.storage.k8s.io/csi-driver-name

                        +

                        Yes

                        +

                        Driver type. If OBS is used, the parameter value is fixed at obs.csi.everest.io.

                        +

                        csi.storage.k8s.io/fstype

                        +

                        Yes

                        +

                        Instance type, which can be obsfs or s3fs.

                        +
                        • obsfs: Parallel file system, which is mounted using obsfs (recommended).
                        • s3fs: Object bucket, which is mounted using s3fs.
                        +

                        everest.io/obs-volume-type

                        +

                        Yes

                        +

                        OBS storage class.

                        +
                        • If fsType is set to s3fs, STANDARD (standard bucket) and WARM (infrequent access bucket) are supported.
                        • This parameter is invalid when fsType is set to obsfs.

                        If you set the default StorageClass in the cluster, you can create storage without specifying the storageClassName in the YAML file.

                        Specifying a Default StorageClass

                        +

                        Specifying a Default Storage Class

                        Resource Oversubscription Enabled (volcano.sh/oversubscription=true)

                        Use Oversubscribed Resources

                        +

                        Resource Oversubscription

                        Conditions for Evicting Offline Pods

                        +

                        When Offline Pod Eviction Triggered (Using Annotations to Configure Limits)

                        No

                        The node resource usage exceeds the high threshold.

                        +

                        The actual resource usage of a node exceeds the upper limit.

                        No

                        @@ -53,7 +53,7 @@

                        Yes

                        The node resource usage exceeds the high threshold, and the node request exceeds 100%.

                        +

                        The actual resource usage of a node exceeds the upper limit and the pod requests on the node exceed 100%.

                        Yes

                        @@ -62,7 +62,7 @@

                        Yes

                        The node resource usage exceeds the high threshold.

                        +

                        The actual resource usage of a node exceeds the upper limit.

                        -
                        1. Configure the Volcano add-on.

                          1. Use kubectl to connect to the cluster.
                          2. Install the Volcano add-on and add the oversubscription add-on to volcano-scheduler-configmap. Ensure that the add-on configuration does not contain the overcommit add-on. If - name: overcommit exists, delete this configuration. In addition, set enablePreemptable and enableJobStarving of the gang add-on to false and configure a preemption action.
                            # kubectl edit cm volcano-scheduler-configmap -n kube-system
                            +
                            1. Configure the Volcano add-on.

                              1. Use kubectl to access the cluster.
                              2. Install the Volcano add-on and add the oversubscription add-on to volcano-scheduler-configmap. Ensure that the add-on configuration does not contain the overcommit add-on. If - name: overcommit exists, delete this configuration. In addition, set enablePreemptable and enableJobStarving of the gang add-on to false and configure a preemption action.
                                # kubectl edit cm volcano-scheduler-configmap -n kube-system
                                 apiVersion: v1
                                 data:
                                   volcano-scheduler.conf: |
                                -    actions: "enqueue, allocate, preempt"   # Configure a preemption action.
                                +    actions: "allocate, backfill, preempt"   # Configure a preemption action.
                                     tiers:
                                     - plugins:
                                       - name: gang
                                @@ -144,9 +144,8 @@ data:
                                       - name: cce-gpu-topology-priority
                                       - name: cce-gpu
                              -

                            2. Enable the node oversubscription feature.

                              A label can be configured to use oversubscribed resources only after the oversubscription feature is enabled for a node. Related nodes can be created only in a node pool. To enable the oversubscription feature, perform the following steps:

                              -
                              1. Create a node pool.
                              2. Choose Manage in the Operation column of the created node pool.
                              3. In the Manage Components window that is displayed, set over-subscription-resource under kubelet to true and click OK.
                              -

                              +

                            3. Enable node oversubscription.

                              A label can be configured to use oversubscribed resources only after the oversubscription feature is enabled for a node. Related nodes can be created only in a node pool. To enable the oversubscription feature, perform the following steps:

                              +
                              1. Create a node pool.
                              2. Choose Manage in the Operation column of the created node pool.
                              3. On the Manage Configurations page, enable Node oversubscription feature (over-subscription-resource) and click OK.

                            4. Set the node oversubscription label.

                              The volcano.sh/oversubscription label needs to be configured for an oversubscribed node. If this label is set for a node and the value is true, the node is an oversubscribed node. Otherwise, the node is not an oversubscribed node.

                              kubectl label node 192.168.0.0 volcano.sh/oversubscription=true

                              An oversubscribed node also supports the oversubscription thresholds, as listed in Table 2. For example:

                              @@ -168,32 +167,32 @@ Annotations: ...

                        volcano.sh/evicting-cpu-high-watermark

                        When the CPU usage of a node exceeds the specified value, offline job eviction is triggered and the node becomes unschedulable.

                        +

                        Upper limit for CPU usage. When the CPU usage of a node exceeds the specified value, offline job eviction is triggered and the node becomes unschedulable.

                        The default value is 80, indicating that offline job eviction is triggered when the CPU usage of a node exceeds 80%.

                        volcano.sh/evicting-cpu-low-watermark

                        After eviction is triggered, the scheduling starts again when the CPU usage of a node is lower than the specified value.

                        +

                        Lower limit for CPU usage. After eviction is triggered, the scheduling starts again when the CPU usage of a node is lower than the specified value.

                        The default value is 30, indicating that scheduling starts again when the CPU usage of a node is lower than 30%.

                        volcano.sh/evicting-memory-high-watermark

                        When the memory usage of a node exceeds the specified value, offline job eviction is triggered and the node becomes unschedulable.

                        +

                        Upper limit for memory usage. When the memory usage of a node exceeds the specified value, offline job eviction is triggered and the node becomes unschedulable.

                        The default value is 60, indicating that offline job eviction is triggered when the memory usage of a node exceeds 60%.

                        volcano.sh/evicting-memory-low-watermark

                        After eviction is triggered, the scheduling starts again when the memory usage of a node is lower than the specified value.

                        +

                        Lower limit for memory usage. After eviction is triggered, the scheduling starts again when the memory usage of a node is lower than the specified value.

                        The default value is 30, indicating that the scheduling starts again when the memory usage of a node is less than 30%.

                        volcano.sh/oversubscription-types

                        Oversubscribed resource type. The options are as follows:

                        -
                        • CPU (oversubscribed CPU)
                        • memory (oversubscribed memory)
                        • cpu,memory (oversubscribed CPU and memory)
                        +

                        Oversubscribed resource type. Options:

                        +
                        • cpu: oversubscribed CPU
                        • memory: oversubscribed memory
                        • cpu,memory: oversubscribed CPU and memory

                        The default value is cpu,memory.

                        @@ -82,7 +82,7 @@
                        Table 1 Annotations for interconnecting with ELB

                        Parameter

                        The following shows how to use the preceding annotations:

                        -
                        • Associating an existing load balancer. For details, see Using kubectl to Create a Service (Using an Existing Load Balancer).
                          apiVersion: v1 
                          +
                          • Associate an existing load balancer. For details, see Using kubectl to Create a Service (Using an Existing Load Balancer).
                            apiVersion: v1 
                             kind: Service 
                             metadata: 
                               name: nginx
                            @@ -99,7 +99,7 @@ spec:
                                 protocol: TCP 
                                 targetPort: 80
                               type: LoadBalancer
                            -
                          • Automatically creating a load balancer. For details, see Using kubectl to Create a Service (Automatically Creating a Load Balancer).
                            Shared load balancer:
                            apiVersion: v1 
                            +
                          • Automatically create a load balancer. For details, see Using kubectl to Create a Service (Automatically Creating a Load Balancer).
                            Shared load balancer:
                            apiVersion: v1 
                             kind: Service 
                             metadata: 
                               annotations:   
                            @@ -139,7 +139,7 @@ metadata:
                                 kubernetes.io/elb.autocreate: '{
                                   "type": "public",
                                   "bandwidth_name": "cce-bandwidth-1626694478577",
                            -      "bandwidth_chargemode": "traffic",
                            +      "bandwidth_chargemode": "traffic",
                                   "bandwidth_size": 5,
                                   "bandwidth_sharetype": "PER",
                                   "eip_type": "5_bgp",
                            @@ -204,7 +204,7 @@ metadata:
                               name: nginx
                               annotations:
                                 kubernetes.io/elb.id: <your_elb_id>                         # ELB ID. Replace it with the actual value.
                            -    kubernetes.io/elb.class: union                   # Load balancer type
                            +    kubernetes.io/elb.class: union                   # Load balancer type
                                 kubernetes.io/elb.session-affinity-mode: SOURCE_IP          # The sticky session type is source IP address.
                                 kubernetes.io/elb.session-affinity-option: '{"persistence_timeout": "30"}'     # Stickiness duration (min)
                             spec:
                            @@ -272,7 +272,7 @@ metadata:
                               name: nginx
                               annotations:
                                 kubernetes.io/elb.id: <your_elb_id>                         # ELB ID. Replace it with the actual value.
                            -    kubernetes.io/elb.class: union                   # Load balancer type
                            +    kubernetes.io/elb.class: union                   # Load balancer type
                                 kubernetes.io/elb.health-check-flag: 'on'                   # Enable the ELB health check function.
                                 kubernetes.io/elb.health-check-option: '{
                                   "protocol":"TCP",
                            @@ -289,10 +289,10 @@ spec:
                                 protocol: TCP 
                                 targetPort: 80
                               type: LoadBalancer
                            -
                          • For details about how to use kubernetes.io/elb.health-check-options, see Configuring Health Check for Multiple Ports.
                          +
                        • For details about how to use kubernetes.io/elb.health-check-options, see Configuring Health Check on Multiple Service Ports.
                        -

                        HTTP Protocol

                        -
                        Table 4 Annotations for using HTTP protocols

                        Parameter

                        +

                        HTTP or HTTPS

                        +
                        @@ -306,7 +306,9 @@ spec: - @@ -315,7 +317,8 @@ spec: - @@ -323,7 +326,7 @@ spec:
                        Table 4 Annotations for using HTTP or HTTPS

                        Parameter

                        Type

                        String

                        Layer-7 forwarding configuration port used by the Service.

                        +

                        If a Service is HTTP/HTTPS-compliant, configure the protocol and port number in the format of "protocol:port",

                        +

                        where,

                        +
                        • protocol: specifies the protocol used by the listener port. The value can be http or https.
                        • ports: service ports specified by spec.ports[].port.

                        v1.19.16 or later

                        String

                        HTTP certificate used by the Service for Layer-7 forwarding.

                        +

                        ID of an ELB certificate, which is used as the HTTPS server certificate.

                        +

                        To obtain the certificate, log in to the CCE console, choose Service List > Networking > Elastic Load Balance, and click Certificates in the navigation pane. In the load balancer list, copy the ID under the target certificate name.

                        v1.19.16 or later

                        -

                        For details about the application scenarios, see Service Using HTTP or HTTPS.

                        +

                        For details, see Configuring an HTTP or HTTPS Service.

                        Dynamic Adjustment of the Weight of the Backend ECS

                        - @@ -356,7 +359,7 @@ metadata: name: nginx annotations: kubernetes.io/elb.id: <your_elb_id> # ELB ID. Replace it with the actual value. - kubernetes.io/elb.class: union # Load balancer type + kubernetes.io/elb.class: union # Load balancer type kubernetes.io/elb.adaptive-weight: 'true' # Enable dynamic adjustment of the weight of the backend ECS. spec: selector: @@ -369,8 +372,8 @@ spec: type: LoadBalancer -

                        Pass-through Capability

                        -
                        Table 5 Annotations for dynamically adjusting the weight of the backend ECS

                        Parameter

                        @@ -340,7 +343,7 @@ spec:

                        String

                        Dynamically adjusts the weight of the load balancer backend ECS based on pods. The requests received by each pod are more balanced.

                        +

                        Dynamically adjust the weight of the ELB backend ECS based on the number pods on the node. The requests received by each pod are more balanced.

                        • true: enabled
                        • false: disabled

                        This parameter applies only to clusters of v1.21 or later and is invalid in passthrough networking.

                        Table 6 Annotations for pass-through capability

                        Parameter

                        +

                        Passthrough Capability

                        +
                        @@ -392,14 +395,14 @@ spec:
                        Table 6 Annotations for passthrough capability

                        Parameter

                        Type

                        -

                        For details about the application scenarios, see Enabling Passthrough Networking for LoadBalancer Services.

                        +

                        For details, see Enabling Passthrough Networking for LoadBalancer Services.

                        Host Network

                        - - @@ -407,11 +410,11 @@ spec: - - @@ -425,7 +428,7 @@ metadata: name: nginx annotations: kubernetes.io/elb.id: <your_elb_id> # ELB ID. Replace it with the actual value. - kubernetes.io/elb.class: union # Load balancer type + kubernetes.io/elb.class: union # Load balancer type kubernetes.io/hws-hostNetwork: 'true' # The load balancer forwards the request to the host network. spec: selector: @@ -441,9 +444,9 @@ spec:

                        Timeout

                        Table 7 Annotations for host network

                        Parameter

                        Type

                        +

                        Type

                        Description

                        +

                        Description

                        Supported Cluster Version

                        kubernetes.io/hws-hostNetwork

                        String

                        +

                        String

                        If the pod uses hostNetwork, the ELB forwards the request to the host network after this annotation is used.

                        +

                        If the pod uses hostNetwork, the ELB forwards the request to the host network after this annotation is used.

                        Options:

                        -
                        • true: enabled
                        • false (default): disabled
                        +
                        • true: enabled
                        • false (default): disabled

                        v1.9 or later

                        - - @@ -451,21 +454,22 @@ spec: - - -
                        Table 8 Annotation for configuring timeout

                        Parameter

                        Type

                        +

                        Type

                        Description

                        +

                        Description

                        Supported Cluster Version

                        kubernetes.io/elb.keepalive_timeout

                        Integer

                        +

                        String

                        Timeout for client connections. If there are no requests reaching the load balancer after the timeout duration elapses, the load balancer will disconnect the connection with the client and establish a new connection when there is a new request.

                        +

                        Timeout for client connections. If there are no requests reaching the load balancer during the timeout duration, the load balancer will disconnect the connection from the client and establish a new connection when there is a new request.

                        Value:

                        • For TCP listeners, the value ranges from 10 to 4000 (in seconds). The default value is 300.
                        • For HTTP, HTTPS, and TERMINATED_HTTPS listeners, the value ranges from 10 to 4000 (in seconds). The default value is 60.
                        • For UDP listeners, this parameter does not take effect.

                        v1.19.16-r30, v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, and later

                        +

                        Dedicated load balancers: v1.19.16-r30, v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, or later

                        +

                        Shared load balancers: v1.23.13-r0, v1.25.8-r0, v1.27.5-r0, v1.28.3-r0, or later

                        -

                        For details about the application scenarios, see Configuring Timeout for a LoadBalancer Service.

                        +

                        For details, see Configuring Timeout for a Service.

                        -

                        Data Structure

                        +

                        Parameters for Automatically Creating a Load Balancer

                        + + + + + - @@ -579,6 +595,7 @@ spec: @@ -589,6 +606,7 @@ spec: @@ -606,6 +624,16 @@ spec: ] + + + + +
                        Table 9 elb.autocreate data structure

                        Parameter

                        Mandatory

                        @@ -516,7 +520,7 @@ spec:

                        Bandwidth mode.

                        • traffic: billed by traffic
                        -

                        Default: traffic

                        +

                        Default: traffic

                        bandwidth_size

                        @@ -551,15 +555,26 @@ spec:

                        The specific type varies with regions. For details, see the EIP console.

                        vip_subnet_cidr_id

                        +

                        No

                        +

                        String

                        +

                        Subnet where a load balancer is located. The subnet must belong to the VPC where the cluster resides.

                        +

                        If this parameter is not specified, the ELB load balancer and the cluster are in the same subnet.

                        +

                        This field can be specified only for clusters of v1.21 or later.

                        +

                        vip_address

                        No

                        String

                        Specifies the private IP address of the load balancer. Only IPv4 addresses are supported.

                        +

                        Private IP address of the load balancer. Only IPv4 addresses are supported.

                        The IP address must be in the ELB CIDR block. If this parameter is not specified, an IP address will be automatically assigned from the ELB CIDR block.

                        -

                        This parameter is available only for clusters of v1.23.11-r0, v1.25.6-r0, v1.27.3-r0, or later versions.

                        +

                        This parameter is available only in clusters of v1.23.11-r0, v1.25.6-r0, v1.27.3-r0, or later versions.

                        available_zone

                        @@ -569,6 +584,7 @@ spec:

                        Array of strings

                        AZ where the load balancer is located.

                        +

                        You can obtain all supported AZs by getting the AZ list.

                        This parameter is available only for dedicated load balancers.

                        String

                        Flavor name of the layer-4 load balancer.

                        +

                        You can obtain all supported types by getting the flavor list.

                        This parameter is available only for dedicated load balancers.

                        String

                        Flavor name of the layer-7 load balancer.

                        +

                        You can obtain all supported types by getting the flavor list.

                        This parameter is available only for dedicated load balancers. The value of this parameter must be the same as that of l4_flavor_name, that is, both are elastic specifications or fixed specifications.

                        ipv6_vip_virsubnet_id

                        +

                        No

                        +

                        String

                        +

                        Specifies the ID of the IPv6 subnet where the load balancer resides. IPv6 must be enabled for the corresponding subnet. This parameter is mandatory only when the dual-stack clusters are used.

                        +

                        This parameter is available only for dedicated load balancers.

                        +
                        @@ -675,84 +703,84 @@ spec:
                        -
                        Table 11 Data structure description of the elb.health-check-options field

                        Parameter

                        +
                        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/cce/umn/cce_10_0386.html b/docs/cce/umn/cce_10_0386.html index 85175ea6..2df22c79 100644 --- a/docs/cce/umn/cce_10_0386.html +++ b/docs/cce/umn/cce_10_0386.html @@ -3,47 +3,47 @@

                        Labels and Annotations

                        Pod Annotations

                        CCE allows you to add annotations to a YAML file to realize some advanced pod functions. The following table describes the annotations you can add.

                        -
                        Table 11 elb.health-check-options

                        Parameter

                        Mandatory

                        +

                        Mandatory

                        Type

                        +

                        Type

                        Description

                        +

                        Description

                        target_service_port

                        +

                        target_service_port

                        Yes

                        +

                        Yes

                        String

                        +

                        String

                        Port for health check specified by spec.ports. The value consists of the protocol and port number, for example, TCP:80.

                        +

                        Port for health check specified by spec.ports. The value consists of the protocol and port number, for example, TCP:80.

                        monitor_port

                        +

                        monitor_port

                        No

                        +

                        No

                        String

                        +

                        String

                        Re-specified port for health check. If this parameter is not specified, the service port is used by default.

                        +

                        Re-specified port for health check. If this parameter is not specified, the service port is used by default.

                        NOTE:

                        Ensure that the port is in the listening state on the node where the pod is located. Otherwise, the health check result will be affected.

                        delay

                        +

                        delay

                        No

                        +

                        No

                        String

                        +

                        String

                        Health check interval (s)

                        +

                        Health check interval (s)

                        Value range: 1 to 50. Default value: 5

                        timeout

                        +

                        timeout

                        No

                        +

                        No

                        String

                        +

                        String

                        Health check timeout, in seconds.

                        +

                        Health check timeout, in seconds.

                        Value range: 1 to 50. Default value: 10

                        max_retries

                        +

                        max_retries

                        No

                        +

                        No

                        String

                        +

                        String

                        Maximum number of health check retries.

                        +

                        Maximum number of health check retries.

                        Value range: 1 to 10. Default value: 3

                        protocol

                        +

                        protocol

                        No

                        +

                        No

                        String

                        +

                        String

                        Health check protocol.

                        +

                        Health check protocol.

                        Default value: protocol of the associated Service

                        Value options: TCP, UDP, or HTTP

                        path

                        +

                        path

                        No

                        +

                        No

                        String

                        +

                        String

                        Health check URL. This parameter needs to be configured when the protocol is HTTP.

                        +

                        Health check URL. This parameter needs to be configured when the protocol is HTTP.

                        Default value: /

                        Value range: 1-80 characters

                        Table 1 Pod annotations

                        Annotation

                        +
                        - - - - - - - - - - - - - - @@ -52,7 +52,6 @@

                        Pod Labels

                        When you create a workload on the console, the following labels are added to the pod by default. The value of app is the workload name.

                        -

                        Example YAML:

                        ...
                         spec:
                        @@ -69,7 +68,7 @@ spec:
                               ...

                        You can also add other labels to the pod for affinity and anti-affinity scheduling. In the following figure, three pod labels (release, env, and role) are defined for workload APP 1, APP 2, and APP 3. The values of these labels vary with workload.

                        • APP 1: [release:alpha;env:development;role:frontend]
                        • APP 2: [release:beta;env:testing;role:frontend]
                        • APP 3: [release:alpha;env:production;role:backend]
                        -
                        Figure 1 Label example
                        +
                        Figure 1 Label example

                        For example, if key/value is set to role/backend, APP 3 will be selected for affinity scheduling. For details, see Workload Affinity (podAffinity).

                        diff --git a/docs/cce/umn/cce_10_0397.html b/docs/cce/umn/cce_10_0397.html index 4f6426f7..aea6e76c 100644 --- a/docs/cce/umn/cce_10_0397.html +++ b/docs/cce/umn/cce_10_0397.html @@ -23,7 +23,7 @@ -
                        Table 1 Pod annotations

                        Annotation

                        Description

                        +

                        Description

                        Default Value

                        +

                        Default Value

                        kubernetes.AOM.log.stdout

                        +

                        kubernetes.AOM.log.stdout

                        Standard output parameter. If not specified, the standard log output of all containers is reported to AOM. You can collect stdout logs from certain containers or ignore them at all.

                        +

                        Standard output parameter. If not specified, the standard log output of all containers is reported to AOM. You can collect stdout logs from certain containers or ignore them at all.

                        Example:

                        • Collecting none of the stdout logs:
                          kubernetes.AOM.log.stdout: '[]'
                        • Collecting stdout logs of container-1 and container-2:
                          kubernetes.AOM.log.stdout: '["container-1","container-2"]'

                        None

                        +

                        None

                        metrics.alpha.kubernetes.io/custom-endpoints

                        +

                        metrics.alpha.kubernetes.io/custom-endpoints

                        Parameter for reporting AOM monitoring metrics that you specify.

                        +

                        Parameter for reporting AOM monitoring metrics that you specify.

                        For details, see Monitoring Custom Metrics on AOM.

                        None

                        +

                        None

                        kubernetes.io/ingress-bandwidth

                        +

                        kubernetes.io/ingress-bandwidth

                        Ingress bandwidth of a pod.

                        +

                        Ingress bandwidth of a pod.

                        For details, see Configuring QoS for a Pod.

                        None

                        +

                        None

                        kubernetes.io/egress-bandwidth

                        +

                        kubernetes.io/egress-bandwidth

                        Egress bandwidth of a pod.

                        +

                        Egress bandwidth of a pod.

                        For details, see Configuring QoS for a Pod.

                        None

                        +

                        None

                        Max. Unavailable Pods (maxUnavailable)

                        Specifies the maximum number of pods that can be unavailable compared with spec.replicas. The default value is 25%

                        +

                        Specifies the maximum number of pods that can be unavailable compared with spec.replicas. The default value is 25%.

                        For example, if spec.replicas is set to 4, at least three pods exist during the upgrade. That is, the deletion is performed at a step of 1. The value can also be set to an absolute number.

                        This parameter is supported only by Deployments and DaemonSets.

                        diff --git a/docs/cce/umn/cce_10_0398.html b/docs/cce/umn/cce_10_0398.html index 7c80f150..bc88e165 100644 --- a/docs/cce/umn/cce_10_0398.html +++ b/docs/cce/umn/cce_10_0398.html @@ -1,7 +1,7 @@ -

                        Headless Service

                        -

                        The preceding types of Services allow internal and external pod access, but not the following scenarios:

                        +

                        Headless Services

                        +

                        Services allow internal and external pod access, but not the following scenarios:

                        • Accessing all pods at the same time
                        • Pods in a Service accessing each other

                        This is where headless Service come into service. A headless Service does not create a cluster IP address, and the DNS records of all pods are returned during query. In this way, the IP addresses of all pods can be queried. StatefulSets use headless Services to support mutual access between pods.

                        apiVersion: v1
                        diff --git a/docs/cce/umn/cce_10_0400.html b/docs/cce/umn/cce_10_0400.html
                        index 3edaaad1..5f4e0b1d 100644
                        --- a/docs/cce/umn/cce_10_0400.html
                        +++ b/docs/cce/umn/cce_10_0400.html
                        @@ -1,23 +1,23 @@
                         
                         
                        -

                        Accessing Public Networks from a Container

                        -

                        Containers can access public networks in either of the following ways:

                        -
                        • Bind an EIP to the node where the container is located if the network model is VPC or tunnel.
                        • Bind an EIP to the pod. (This function applies only to Cloud Native 2.0 clusters. To do so, manually bind an EIP to the ENI or sub-ENI of the pod on the VPC console. This method is not recommended because the IP address of a pod changes after the pod is rescheduled. As a result, the new pod cannot access the public network.)
                        • Configure SNAT rules through NAT Gateway.
                        -

                        You can use NAT Gateway to enable container pods in a VPC to access public networks. NAT Gateway provides source network address translation (SNAT), which translates private IP addresses to a public IP address by binding an elastic IP address (EIP) to the gateway, providing secure and efficient access to the Internet. Figure 1 shows the SNAT architecture. The SNAT function allows the container pods in a VPC to access the Internet without being bound to an EIP. SNAT supports a large number of concurrent connections, which makes it suitable for applications involving a large number of requests and connections.

                        -
                        Figure 1 SNAT
                        +

                        Accessing the Internet from a Container

                        +

                        Containers can access the Internet in either of the following ways:

                        +
                        • Bind an EIP to the node where the container is located if the network model is VPC or tunnel.
                        • Bind an EIP to the pod. (This function applies only to Cloud Native 2.0 clusters. To do so, manually bind an EIP to the ENI or sub-ENI of the pod on the VPC console. This method is not recommended because the IP address of a pod changes after the pod is rescheduled. As a result, the new pod cannot access the Internet.)
                        • Configure SNAT rules through NAT Gateway.
                        +

                        You can use NAT Gateway to enable container pods in a VPC to access the Internet. NAT Gateway provides source network address translation (SNAT), which translates private IP addresses to a public IP address by binding an elastic IP address (EIP) to the gateway, providing secure and efficient access to the Internet. Figure 1 shows the SNAT architecture. The SNAT function allows the container pods in a VPC to access the Internet without being bound to an EIP. SNAT supports a large number of concurrent connections, which makes it suitable for applications involving a large number of requests and connections.

                        +
                        Figure 1 SNAT

                        To enable a container pod to access the Internet, perform the following steps:

                        -
                        1. Assign an EIP.

                          1. Log in to the management console.
                          2. Click in the upper left corner of the management console and select a region and a project.
                          3. Click at the upper left corner and choose Networking > Elastic IP in the expanded list.
                          4. On the EIPs page, click Assign EIP.
                          5. Configure parameters as required.

                            Set Region to the region where container pods are located.

                            +
                            1. Assign an EIP.

                              1. Log in to the management console.
                              2. Click in the upper left corner of the management console and select a region and a project.
                              3. Click in the upper left corner and choose Networking > Elastic IP in the expanded list.
                              4. On the EIPs page, click Assign EIP.
                              5. Configure parameters as required.

                                Set Region to the region where container pods are located.

                              -

                            2. Create a NAT gateway.

                              1. Log in to the management console.
                              2. Click in the upper left corner of the management console and select a region and a project.
                              3. Click at the upper left corner and choose Networking > NAT Gateway in the expanded list.
                              4. On the displayed page, click Create Public NAT Gateway in the upper right corner.
                              5. Configure parameters as required.

                                Select the same VPC.

                                +

                              6. Create a NAT gateway.

                                1. Log in to the management console.
                                2. Click in the upper left corner of the management console and select a region and a project.
                                3. Click in the upper left corner and choose Networking > NAT Gateway in the expanded list.
                                4. On the Public Network Gateways page, click Create Public NAT Gateway in the upper right corner.
                                5. Configure parameters as required.

                                  Select the same VPC.

                                -

                              7. Configure an SNAT rule and bind the EIP to the subnet.

                                1. Log in to the management console.
                                2. Click in the upper left corner of the management console and select a region and a project.
                                3. Click at the upper left corner and choose Networking > NAT Gateway in the expanded list.
                                4. On the page displayed, click the name of the NAT gateway for which you want to add the SNAT rule.
                                5. On the SNAT Rules tab page, click Add SNAT Rule.
                                6. Set parameters as required.
                                +

                              8. Configure an SNAT rule and bind the EIP to the subnet.

                                1. Log in to the management console.
                                2. Click in the upper left corner of the management console and select a region and a project.
                                3. Click in the upper left corner and choose Networking > NAT Gateway in the expanded list.
                                4. On the page displayed, click the name of the NAT gateway for which you want to add the SNAT rule.
                                5. On the SNAT Rules tab page, click Add SNAT Rule.
                                6. Set parameters as required.

                                SNAT rules take effect by CIDR block. As different container network models use different communication modes, the subnet needs to be selected according to the following rules:

                                • Tunnel network and VPC network: Select the subnet where the node is located, that is, the subnet selected during node creation.
                                • Cloud Native Network 2.0: Select the subnet where the container is located, that is, the container subnet selected during cluster creation.

                                If there are multiple CIDR blocks, you can create multiple SNAT rules or customize a CIDR block as long as the CIDR block contains the container subnet (Cloud Native 2.0 network) or the node subnet.

                                -

                                After the SNAT rule is configured, workloads can access public networks from the container. Public networks can be pinged from the container.

                                +

                                After the SNAT rule is configured, workloads can access the Internet from the container. The Internet can be pinged from the container.

                            diff --git a/docs/cce/umn/cce_10_0402.html b/docs/cce/umn/cce_10_0402.html index f8b7e58f..9eaebf17 100644 --- a/docs/cce/umn/cce_10_0402.html +++ b/docs/cce/umn/cce_10_0402.html @@ -30,7 +30,7 @@ NAME READY STATUS RESTARTS AGE IP NODE nginx-6fdf99c8b-6wwft 1/1 Running 0 3m41s 10.1.0.55 10.1.0.55 <none> <none>

                        Precautions

                        If a pod uses the host network, it occupies a host port. The pod IP is the host IP. To use the host network, you must confirm pods do not conflict with each other in terms of the host ports they occupy. Do not use the host network unless you know exactly which host port is used by which pod.

                        -

                        When using the host network, you access the node to access a pod on it. Therefore, allow access from the security group port of the node. Otherwise, the access fails.

                        +

                        When using the host network, you access a pod on a node through a node port. Therefore, allow access from the security group port of the node. Otherwise, the access fails.

                        In addition, using the host network requires you to reserve host ports for the pods. When using a Deployment to deploy pods of the hostNetwork type, ensure that the number of pods does not exceed the number of nodes. Otherwise, multiple pods will be scheduled onto the node, and they will fail to start due to port conflicts. For example, in the preceding example nginx YAML, if two pods (setting replicas to 2) are deployed in a cluster with only one node, one pod cannot be created. The pod logs will show that the Nginx cannot be started because the port is occupied.

                        Do not schedule multiple pods that use the host network on the same node. Otherwise, when a ClusterIP Service is created to access a pod, the cluster IP address cannot be accessed.

                        diff --git a/docs/cce/umn/cce_10_0405.html b/docs/cce/umn/cce_10_0405.html index 7343edcc..5569ad39 100644 --- a/docs/cce/umn/cce_10_0405.html +++ b/docs/cce/umn/cce_10_0405.html @@ -1,64 +1,128 @@ -

                        Release Notes for CCE Cluster Versions

                        -

                        Version 1.27

                        In CCE v1.27 and later versions, all nodes support only the containerd container engine.

                        -
                        - -
                        Table 1 Release notes for the v1.27 patch

                        CCE Cluster Patch Version

                        +

                        Patch Version Release Notes

                        +

                        Version 1.28

                        +
                        - - - - - - - - - - - - - - - - - - - - - - + + + + +
                        Table 1 Release notes for the v1.28 patch

                        CCE Cluster Patch Version

                        Kubernetes Version

                        +

                        Kubernetes Version

                        Feature Updates

                        +

                        Feature Updates

                        Optimization

                        +

                        Optimization

                        Vulnerability Fixing

                        +

                        Vulnerability Fixing

                        v1.27.3-r4

                        +

                        v1.28.3-r0

                        v1.27.4

                        +

                        v1.28.3

                        -

                        +
                        LoadBalancer Services and ingresses allow you to:
                        • Configure SNI.
                        • Enable HTTP/2.
                        • Configure idle timeout, request timeout, and response timeout.
                        • Obtain the listener port number and the number of the port requested by the client from the request header of an HTTP packet, and rewrite X-Forwarded-Host.
                        +

                        -

                        +

                        None

                        Fixed CVE-2024-21626 issues.

                        +

                        Fixed some security issues.

                        v1.27.2-r0

                        +

                        v1.28.2-r0

                        v1.27.2

                        +

                        v1.28.3

                        • Volcano supports node pool affinity scheduling.
                        • Volcano supports workload rescheduling.
                        +
                        • You can configure an ELB blocklist/trustlist for access control when creating a Service or ingress.

                        None

                        +

                        None

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.27.1-r10

                        +

                        v1.28.1-r4

                        v1.27.2

                        +

                        v1.28.3

                        None

                        +

                        None

                        The events generated during node pool scaling have been optimized.

                        +

                        None

                        Fixed some security issues.

                        +

                        Fixed CVE-2024-21626 issues.

                        v1.27.1-r0

                        +

                        v1.28.1-r0

                        v1.27.2

                        +

                        v1.28.3

                        CCE clusters of v1.27 are released for the first time. For more information, see Kubernetes 1.27 Release Notes.

                        +

                        CCE clusters of v1.28 are released for the first time. For more information, see Kubernetes 1.28 Release Notes.

                        +
                        • The prefix and suffix of a node name can be customized in node pools.
                        • In CCE Turbo clusters, you can create container networks for workloads and specify pod subnets.
                        • LoadBalancer ingresses support gRPC.
                        • LoadBalancer Services allow you to specify a private IP address for a load balancer during Service creation using YAML.
                        +
                        • Accelerated the startup speed for creating a large number of Kata containers in a CCE Turbo cluster.
                        • Improved the stability when Kata containers are repeatedly created or deleted in a CCE Turbo cluster.
                        +

                        None

                        +
                        +
                        +

                        +
                        +

                        Version 1.27

                        In CCE v1.27 and later versions, all nodes support only the containerd container engine.

                        +
                        + +
                        + + + + + + + + + + + + + + + + + + + + + + + + + + + - - @@ -68,93 +132,93 @@

                        Version 1.25

                        All nodes in the CCE clusters of version 1.25, except the ones running EulerOS 2.5, use containerd by default.

                        -
                        Table 2 Release notes for the v1.27 patch

                        CCE Cluster Patch Version

                        +

                        Kubernetes Version

                        +

                        Feature Updates

                        +

                        Optimization

                        +

                        Vulnerability Fixing

                        +

                        v1.27.3-r4

                        +

                        v1.27.4

                        +

                        None

                        +

                        None

                        +

                        Fixed CVE-2024-21626 issues.

                        +

                        v1.27.2-r0

                        +

                        v1.27.2

                        +
                        • Volcano supports node pool affinity scheduling.
                        • Volcano supports workload rescheduling.
                        +

                        None

                        +

                        Fixed some security issues.

                        +

                        v1.27.1-r10

                        +

                        v1.27.2

                        +

                        None

                        +

                        Optimized the events generated during node pool scaling.

                        +

                        Fixed some security issues.

                        +

                        v1.27.1-r0

                        +

                        v1.27.2

                        +

                        CCE clusters of v1.27 are released for the first time. For more information, see Kubernetes 1.27 Release Notes.

                        • Both soft eviction and hard eviction are supported in node pool configurations.

                        None

                        +

                        None

                        None

                        +

                        None

                        Table 2 Release notes for the v1.25 patch

                        CCE Cluster Patch Version

                        +
                        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -162,105 +226,105 @@

                        Version 1.23

                        -
                        Table 3 Release notes for the v1.25 patch

                        CCE Cluster Patch Version

                        Kubernetes Version

                        +

                        Kubernetes Version

                        Feature Updates

                        +

                        Feature Updates

                        Optimization

                        +

                        Optimization

                        Vulnerability Fixing

                        +

                        Vulnerability Fixing

                        v1.25.6-r4

                        +

                        v1.25.6-r4

                        v1.25.10

                        +

                        v1.25.10

                        -

                        +

                        None

                        -

                        +

                        None

                        Fixed CVE-2024-21626 issues.

                        +

                        Fixed CVE-2024-21626 issues.

                        v1.25.5-r0

                        +

                        v1.25.5-r0

                        v1.25.5

                        +

                        v1.25.5

                        • Volcano supports node pool affinity scheduling.
                        • Volcano supports workload rescheduling.
                        +
                        • Volcano supports node pool affinity scheduling.
                        • Volcano supports workload rescheduling.

                        None

                        +

                        None

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.25.4-r10

                        +

                        v1.25.4-r10

                        v1.25.5

                        +

                        v1.25.5

                        None

                        +

                        None

                        The events generated during node pool scaling have been optimized.

                        +

                        Optimized the events generated during node pool scaling.

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.25.4-r0

                        +

                        v1.25.4-r0

                        v1.25.5

                        +

                        v1.25.5

                        • Both soft eviction and hard eviction are supported in node pool configurations.
                        • TMS tags can be added to automatically created EVS disks to facilitate cost management.
                        +
                        • Both soft eviction and hard eviction are supported in node pool configurations.
                        • TMS tags can be added to automatically created EVS disks to facilitate cost management.

                        None

                        +

                        None

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.25.3-r10

                        +

                        v1.25.3-r10

                        v1.25.5

                        +

                        v1.25.5

                        The timeout interval can be configured for a load balancer.

                        +

                        The timeout interval can be configured for a load balancer.

                        High-frequency parameters of kube-apiserver are configurable.

                        +

                        High-frequency parameters of kube-apiserver are configurable.

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.25.3-r0

                        +

                        v1.25.3-r0

                        v1.25.5

                        +

                        v1.25.5

                        None

                        +

                        None

                        Enhanced network stability of CCE Turbo clusters when their specifications are modified.

                        +

                        Enhanced network stability of CCE Turbo clusters when their specifications are modified.

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.25.1-r0

                        +

                        v1.25.1-r0

                        v1.25.5

                        +

                        v1.25.5

                        CCE clusters of v1.25 are released for the first time. For more information, see Kubernetes 1.25 Release Notes.

                        +

                        CCE clusters of v1.25 are released for the first time. For more information, see Kubernetes 1.25 Release Notes.

                        None

                        +

                        None

                        None

                        +

                        None

                        Table 3 Release notes for the v1.23 patch

                        CCE Cluster Patch Version

                        +
                        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -268,105 +332,105 @@

                        Version 1.21

                        -
                        Table 4 Release notes for the v1.23 patch

                        CCE Cluster Patch Version

                        Kubernetes Version

                        +

                        Kubernetes Version

                        Feature Updates

                        +

                        Feature Updates

                        Optimization

                        +

                        Optimization

                        Vulnerability Fixing

                        +

                        Vulnerability Fixing

                        v1.23.11-r4

                        +

                        v1.23.11-r4

                        v1.23.17

                        +

                        v1.23.17

                        -

                        +

                        None

                        -

                        +

                        None

                        Fixed CVE-2024-21626 issues.

                        +

                        Fixed CVE-2024-21626 issues.

                        v1.23.10-r0

                        +

                        v1.23.10-r0

                        v1.23.11

                        +

                        v1.23.11

                        • Volcano supports node pool affinity scheduling.
                        • Volcano supports workload rescheduling.
                        +
                        • Volcano supports node pool affinity scheduling.
                        • Volcano supports workload rescheduling.

                        -

                        +

                        None

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.23.9-r10

                        +

                        v1.23.9-r10

                        v1.23.11

                        +

                        v1.23.11

                        -

                        +

                        None

                        The events generated during node pool scaling have been optimized.

                        +

                        Optimized the events generated during node pool scaling.

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.23.9-r0

                        +

                        v1.23.9-r0

                        v1.23.11

                        +

                        v1.23.11

                        • Both soft eviction and hard eviction are supported in node pool configurations.
                        • TMS tags can be added to automatically created EVS disks to facilitate cost management.
                        +
                        • Both soft eviction and hard eviction are supported in node pool configurations.
                        • TMS tags can be added to automatically created EVS disks to facilitate cost management.

                        None

                        +

                        None

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.23.8-r10

                        +

                        v1.23.8-r10

                        v1.23.11

                        +

                        v1.23.11

                        The timeout interval can be configured for a load balancer.

                        +

                        The timeout interval can be configured for a load balancer.

                        High-frequency parameters of kube-apiserver are configurable.

                        +

                        High-frequency parameters of kube-apiserver are configurable.

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.23.8-r0

                        +

                        v1.23.8-r0

                        v1.23.11

                        +

                        v1.23.11

                        None

                        +

                        None

                        • Enhanced Docker reliability during upgrades.
                        • Optimized node time synchronization.
                        +
                        • Enhanced Docker reliability during upgrades.
                        • Optimized node time synchronization.

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.23.5-r0

                        +

                        v1.23.5-r0

                        v1.23.11

                        +

                        v1.23.11

                        • Fault detection and isolation are supported on GPU nodes.
                        • Security groups can be customized by cluster.
                        • CCE Turbo clusters support ENIs pre-binding by node.
                        • containerd is supported.
                        +
                        • Fault detection and isolation are supported on GPU nodes.
                        • Security groups can be customized by cluster.
                        • CCE Turbo clusters support ENIs pre-binding by node.
                        • containerd is supported.
                        • The ETCD version of the master node has been upgraded to the Kubernetes version 3.5.6.
                        • Scheduling is optimized so that pods are evenly distributed across AZs after pods are scaled in.
                        • Optimized the memory usage of kube-apiserver when CRDs are frequently updated.
                        +
                        • Upgraded the etcd version of the master node to the Kubernetes version 3.5.6.
                        • Optimized scheduling so that pods are evenly distributed across AZs after pods are scaled in.
                        • Optimized the memory usage of kube-apiserver when CRDs are frequently updated.

                        Fixed some security issues and the following CVE vulnerabilities:

                        +

                        Fixed some security issues and the following CVE vulnerabilities:

                        v1.23.1-r0

                        +

                        v1.23.1-r0

                        v1.23.4

                        +

                        v1.23.4

                        CCE clusters of v1.23 are released for the first time. For more information, see Kubernetes 1.23 Release Notes.

                        +

                        CCE clusters of v1.23 are released for the first time. For more information, see Kubernetes 1.23 Release Notes.

                        None

                        +

                        None

                        None

                        +

                        None

                        Table 4 Release notes for the v1.21 patch

                        CCE Cluster Patch Version

                        +
                        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -374,117 +438,117 @@

                        Version 1.19

                        -
                        Table 5 Release notes for the v1.21 patch

                        CCE Cluster Patch Version

                        Kubernetes Version

                        +

                        Kubernetes Version

                        Feature Updates

                        +

                        Feature Updates

                        Optimization

                        +

                        Optimization

                        Vulnerability Fixing

                        +

                        Vulnerability Fixing

                        v1.21.12-r4

                        +

                        v1.21.12-r4

                        v1.21.14

                        +

                        v1.21.14

                        -

                        +

                        None

                        -

                        +

                        None

                        Fixed CVE-2024-21626 issues.

                        +

                        Fixed CVE-2024-21626 issues.

                        v1.21.11-r20

                        +

                        v1.21.11-r20

                        v1.21.14

                        +

                        v1.21.14

                        • Volcano supports node pool affinity scheduling.
                        • Volcano supports workload rescheduling.
                        +
                        • Volcano supports node pool affinity scheduling.
                        • Volcano supports workload rescheduling.

                        None

                        +

                        None

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.21.11-r10

                        +

                        v1.21.11-r10

                        v1.21.14

                        +

                        v1.21.14

                        None

                        +

                        None

                        The events generated during node pool scaling have been optimized.

                        +

                        Optimized the events generated during node pool scaling.

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.21.11-r0

                        +

                        v1.21.11-r0

                        v1.21.14

                        +

                        v1.21.14

                        • Both soft eviction and hard eviction are supported in node pool configurations.
                        • TMS tags can be added to automatically created EVS disks to facilitate cost management.
                        +
                        • Both soft eviction and hard eviction are supported in node pool configurations.
                        • TMS tags can be added to automatically created EVS disks to facilitate cost management.

                        None

                        +

                        None

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.21.10-r10

                        +

                        v1.21.10-r10

                        v1.21.14

                        +

                        v1.21.14

                        The timeout interval can be configured for a load balancer.

                        +

                        The timeout interval can be configured for a load balancer.

                        High-frequency parameters of kube-apiserver are configurable.

                        +

                        High-frequency parameters of kube-apiserver are configurable.

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.21.10-r0

                        +

                        v1.21.10-r0

                        v1.21.14

                        +

                        v1.21.14

                        None

                        +

                        None

                        • Enhanced Docker reliability during upgrades.
                        • Optimized node time synchronization.
                        • Enhanced the stability of the Docker runtime for pulling images after nodes are restarted.
                        +
                        • Enhanced Docker reliability during upgrades.
                        • Optimized node time synchronization.
                        • Enhanced the stability of the Docker runtime for pulling images after nodes are restarted.

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.21.7-r0

                        +

                        v1.21.7-r0

                        v1.21.14

                        +

                        v1.21.14

                        • Fault detection and isolation are supported on GPU nodes.
                        • Security groups can be customized by cluster.
                        • CCE Turbo clusters support ENIs pre-binding by node.
                        +
                        • Fault detection and isolation are supported on GPU nodes.
                        • Security groups can be customized by cluster.
                        • CCE Turbo clusters support ENIs pre-binding by node.

                        Improved the stability of LoadBalancer Services/ingresses with a large number of connections.

                        +

                        Improved the stability of LoadBalancer Services/ingresses with a large number of connections.

                        Fixed some security issues and the following CVE vulnerabilities:

                        +

                        Fixed some security issues and the following CVE vulnerabilities:

                        v1.21.1-r0

                        +

                        v1.21.1-r0

                        v1.21.7

                        +

                        v1.21.7

                        CCE clusters of v1.21 are released for the first time. For more information, see Kubernetes 1.21 Release Notes.

                        +

                        CCE clusters of v1.21 are released for the first time. For more information, see Kubernetes 1.21 Release Notes.

                        None

                        +

                        None

                        None

                        +

                        None

                        Table 5 Release notes for the v1.19 patch

                        CCE Cluster Patch Version

                        +
                        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/cce/umn/cce_10_0415.html b/docs/cce/umn/cce_10_0415.html index 5add2b72..806f301f 100644 --- a/docs/cce/umn/cce_10_0415.html +++ b/docs/cce/umn/cce_10_0415.html @@ -2,17 +2,17 @@

                        CronHPA Policies

                        There are predictable and unpredictable traffic peaks for some services. For such services, CCE CronHPA allows you to scale resources in fixed periods. It can work with HPA policies to periodically adjust the HPA scaling scope, implementing workload scaling.

                        -

                        +

                        CronHPA can periodically adjust the maximum and minimum numbers of pods in the HPA policy or directly adjust the number of pods of a Deployment.

                        Prerequisites

                        The add-on CCE Advanced HPA of v1.2.13 or later has been installed.

                        Using CronHPA to Adjust the HPA Scaling Scope

                        CronHPA can periodically scale out/in pods in HPA policies to satisfy complex services.

                        HPA and CronHPA associate scaling objects using the scaleTargetRef field. If a Deployment is the scaling object for both CronHPA and HPA, the two scaling policies are independent of each other. The operation performed later overwrites the operation performed earlier. As a result, the scaling effect does not meet the expectation.

                        -

                        -

                        When CronHPA and HPA are used together, CronHPA rules take effect based on the HPA policy. CronHPA uses HPA to perform operations on the Deployment. Understanding the following parameters can better understand the working principle of the CronHPA.

                        +

                        +

                        When CronHPA and HPA are used together, CronHPA rules take effect based on the HPA policy. CronHPA uses HPA to perform operations on the Deployment. Understanding the following parameters can better understand the working rules of the CronHPA.

                        • targetReplicas: Number of pods set for CronHPA. When CronHPA takes effect, this parameter adjusts the maximum or minimum number of pods in HPA policies to adjust the number of Deployment pods.
                        • minReplicas: Minimum number of Deployment pods.
                        • maxReplicas: Maximum number of Deployment pods.
                        • replicas: Number of pods in a Deployment before the CronHPA policy takes effect.

                        When the CronHPA rule takes effect, the maximum or minimum number of pods are adjusted by comparing the number of targetReplicas with the actual number of pods and combining the minimum or maximum number of pods in the HPA policy.

                        -
                        Figure 1 CronHPA scaling scenarios
                        +
                        Figure 1 CronHPA scaling scenarios

                        Figure 1 shows possible scaling scenarios. The following examples detail how CronHPA modifies the number of pods in HPAs.

                        Table 6 Release notes for the v1.19 patch

                        CCE Cluster Patch Version

                        Kubernetes Version

                        +

                        Kubernetes Version

                        Feature Updates

                        +

                        Feature Updates

                        Optimization

                        +

                        Optimization

                        Vulnerability Fixing

                        +

                        Vulnerability Fixing

                        1.19.16-r84

                        +

                        1.19.16-r84

                        v1.19.16

                        +

                        v1.19.16

                        -

                        +

                        None

                        -

                        +

                        None

                        Fixed CVE-2024-21626 issues.

                        +

                        Fixed CVE-2024-21626 issues.

                        v1.19.16-r60

                        +

                        v1.19.16-r60

                        v1.19.16

                        +

                        v1.19.16

                        • Volcano supports node pool affinity scheduling.
                        • Volcano supports workload rescheduling.
                        +
                        • Volcano supports node pool affinity scheduling.
                        • Volcano supports workload rescheduling.

                        None

                        +

                        None

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.19.16-r50

                        +

                        v1.19.16-r50

                        v1.19.16

                        +

                        v1.19.16

                        None

                        +

                        None

                        The events generated during node pool scaling have been optimized.

                        +

                        Optimized the events generated during node pool scaling.

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.19.16-r40

                        +

                        v1.19.16-r40

                        v1.19.16

                        +

                        v1.19.16

                        • Both soft eviction and hard eviction are supported in node pool configurations.
                        • TMS tags can be added to automatically created EVS disks to facilitate cost management.
                        +
                        • Both soft eviction and hard eviction are supported in node pool configurations.
                        • TMS tags can be added to automatically created EVS disks to facilitate cost management.

                        None

                        +

                        None

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.19.16-r30

                        +

                        v1.19.16-r30

                        v1.19.16

                        +

                        v1.19.16

                        The timeout interval can be configured for a load balancer.

                        +

                        The timeout interval can be configured for a load balancer.

                        High-frequency parameters of kube-apiserver are configurable.

                        +

                        High-frequency parameters of kube-apiserver are configurable.

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.19.16-r20

                        +

                        v1.19.16-r20

                        v1.19.16

                        +

                        v1.19.16

                        None

                        +

                        None

                        • Cloud Native 2.0 Networks allow you to specify subnets for a namespace.
                        • Enhanced the stability of the Docker runtime for pulling images after nodes are restarted.
                        • Optimized the performance of CCE Turbo clusters in allocating ENIs if not all ENIs are pre-bound.
                        +
                        • Cloud Native 2.0 Networks allow you to specify subnets for a namespace.
                        • Enhanced the stability of the Docker runtime for pulling images after nodes are restarted.
                        • Optimized the performance of CCE Turbo clusters in allocating ENIs if not all ENIs are pre-bound.

                        Fixed some security issues.

                        +

                        Fixed some security issues.

                        v1.19.16-r4

                        +

                        v1.19.16-r4

                        v1.19.16

                        +

                        v1.19.16

                        • Fault detection and isolation are supported on GPU nodes.
                        • Security groups can be customized by cluster.
                        • CCE Turbo clusters support ENIs pre-binding by node.
                        +
                        • Fault detection and isolation are supported on GPU nodes.
                        • Security groups can be customized by cluster.
                        • CCE Turbo clusters support ENIs pre-binding by node.
                        • Scheduling is optimized on taint nodes.
                        • Enhanced the long-term running stability of containerd when cores are bound.
                        • Improved the stability of LoadBalancer Services/ingresses with a large number of connections.
                        • Optimized the memory usage of kube-apiserver when CRDs are frequently updated.
                        +
                        • Scheduling is optimized on taint nodes.
                        • Enhanced the long-term running stability of containerd when cores are bound.
                        • Improved the stability of LoadBalancer Services/ingresses with a large number of connections.
                        • Optimized the memory usage of kube-apiserver when CRDs are frequently updated.

                        Fixed some security issues and the following CVE vulnerabilities:

                        +

                        Fixed some security issues and the following CVE vulnerabilities:

                        v1.19.16-r0

                        +

                        v1.19.16-r0

                        v1.19.16

                        +

                        v1.19.16

                        None

                        +

                        None

                        Enhanced the stability in updating LoadBalancer Services when workloads are upgraded and nodes are scaled in or out.

                        +

                        Enhanced the stability in updating LoadBalancer Services when workloads are upgraded and nodes are scaled in or out.

                        Fixed some security issues and the following CVE vulnerabilities:

                        +

                        Fixed some security issues and the following CVE vulnerabilities:

                        v1.19.10-r0

                        +

                        v1.19.10-r0

                        v1.19.10

                        +

                        v1.19.10

                        CCE clusters of v1.19 are released for the first time. For more information, see Kubernetes 1.19 Release Notes.

                        +

                        CCE clusters of v1.19 are released for the first time. For more information, see Kubernetes 1.19 Release Notes.

                        None

                        +

                        None

                        None

                        +

                        None

                        Table 1 CronHPA scaling parameters

                        Scenario

                        @@ -157,7 +157,7 @@

                        Using the CCE console

                        -
                        1. Log in to the CCE console and click the cluster name to access the cluster console.
                        2. Choose Workloads in the navigation pane. Locate the target workload and choose More > Auto Scaling in the Operation column.
                        3. Set Policy Type to HPA+CronHPA to enable HPA and CronHPA policies.

                          CronHPA periodically adjusts the maximum and minimum numbers of pods using the HPA policy.

                          +
                          1. Log in to the CCE console and click the cluster name to access the cluster console.
                          2. Choose Workloads in the navigation pane. Locate the target workload and choose More > Auto Scaling in the Operation column.
                          3. Set Policy Type to HPA+CronHPA and enable HPA and CronHPA policies.

                            CronHPA periodically adjusts the maximum and minimum numbers of pods using the HPA policy.

                          4. Configure the HPA policy. For details, see HPA Policies.

                            @@ -208,7 +208,7 @@
                            Table 2 HPA policy

                            Parameter

                            -

                          5. Click in the CronHPA policy rule. In the dialog box displayed, configure scaling policy parameters.

                            +

                          6. Click in the CronHPA policy rule. In the dialog box displayed, configure scaling policy parameters.

                            @@ -316,7 +320,7 @@ spec:

                            Using CronHPA to Directly Adjust the Number of Deployment Pods

                            CronHPA adjusts associated Deployments separately to periodically adjust the number of Deployment pods. The method is as follows:

                            Using the CCE console

                            1. Log in to the CCE console and click the cluster name to access the cluster console.
                            2. Choose Workloads in the navigation pane. Locate the target workload and choose More > Auto Scaling in the Operation column.
                            3. Set Policy Type to HPA+CronHPA, disable HPA, and enable CronHPA.

                              CronHPA periodically adjusts the number of workload pods.

                              -

                            4. Click in the CronHPA policy rule. In the dialog box displayed, configure scaling policy parameters.

                              +

                            5. Click in the CronHPA policy rule. In the dialog box displayed, configure scaling policy parameters.

                            Table 3 CronHPA policy parameters

                            Parameter

                            Description

                            @@ -223,6 +223,8 @@

                            Trigger Time

                            You can select a specific time every day, every week, every month, or every year.

                            +
                            NOTE:

                            This time indicates the local time of where the node is deployed.

                            +

                            Enable

                            @@ -255,7 +257,7 @@ metadata: name: ccetest namespace: default spec: - scaleTargetRef: # Associate the HPA policy + scaleTargetRef: # Associate an HPA policy. apiVersion: autoscaling/v1 kind: HorizontalPodAutoscaler name: hpa-test @@ -305,7 +307,9 @@ spec:

                            spec.rules

                            CronHPA policy rule. Multiple rules can be added. The following fields can be configured for each rule:

                            -
                            • ruleName: CronHPA rule name, which must be unique.
                            • schedule: Running time and period of a job. For details, see Cron, for example, 0 * * * * or @hourly.
                            • targetReplicas: indicates the number of pods to be scaled in or out.
                            • disable: The value can be true or false. false indicates that the rule takes effect, and true indicates that the rule does not take effect.
                            +
                            • ruleName: CronHPA rule name, which must be unique.
                            • schedule: Running time and period of a job. For details, see Cron, for example, 0 * * * * or @hourly.
                              NOTE:

                              This time indicates the local time of where the node is deployed.

                              +
                              +
                            • targetReplicas: indicates the number of pods to be scaled in or out.
                            • disable: The value can be true or false. false indicates that the rule takes effect, and true indicates that the rule does not take effect.
                            Table 5 CronHPA policy parameters

                            Parameter

                            Description

                            @@ -331,6 +335,8 @@ spec:

                            Trigger Time

                            You can select a specific time every day, every week, every month, or every year.

                            +
                            NOTE:

                            This time indicates the local time of where the node is deployed.

                            +

                            Enable

                            diff --git a/docs/cce/umn/cce_10_0423.html b/docs/cce/umn/cce_10_0423.html index 8a95d315..7f36b2ec 100644 --- a/docs/cce/umn/cce_10_0423.html +++ b/docs/cce/umn/cce_10_0423.html @@ -9,7 +9,7 @@ - diff --git a/docs/cce/umn/cce_10_0425.html b/docs/cce/umn/cce_10_0425.html index 5138e6ac..3157259b 100644 --- a/docs/cce/umn/cce_10_0425.html +++ b/docs/cce/umn/cce_10_0425.html @@ -1,65 +1,140 @@

                            NUMA Affinity Scheduling

                            -

                            Background

                            When the node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether the pod is throttled and which CPU cores are available at scheduling time. Many workloads are not sensitive to this migration and thus work fine without any intervention. However, in workloads where CPU cache affinity and scheduling latency significantly affect workload performance, the kubelet allows alternative CPU management policies to determine some placement preferences on the node.

                            +

                            Background

                            When a node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether the pod is throttled and which CPU cores are available at scheduling time. Many workloads are not sensitive to this migration and work fine without any intervention. However, in workloads where CPU cache affinity and scheduling latency significantly affect workload performance, additional latency will occur when CPU cores are from different NUMA nodes. To resolve this issue, kubelet allows you to use Topology Manager to replace the CPU management policies to determine node allocation.

                            Both the CPU Manager and Topology Manager are kubelet components, but they have the following limitations:

                            • The scheduler is not topology-aware. Therefore, the workload may be scheduled on a node and then fail on the node due to the Topology Manager. This is unacceptable for TensorFlow jobs. If any worker or ps failed on node, the job will fail.
                            • The managers are node-level that results in an inability to match the best node for NUMA topology in the whole cluster.
                            -

                            For more information, see https://github.com/volcano-sh/volcano/blob/master/docs/design/numa-aware.md.

                            Volcano targets to lift the limitation to make scheduler NUMA topology aware so that:

                            • Pods are not scheduled to the nodes that NUMA topology does not match.
                            • Pods are scheduled to the best node for NUMA topology.
                            +

                            For more information, see https://github.com/volcano-sh/volcano/blob/master/docs/design/numa-aware.md.

                            -

                            Application Scope

                            • Support CPU resource topology scheduling
                            • Support pod-level topology policies
                            +

                            Application Scope

                            • CPU resource topology scheduling
                            • Pod-level topology policies
                            -

                            Scheduling Prediction

                            For pods with the topology policy, predicate the matched node list.

                            +

                            Pod Scheduling Prediction

                            After a topology policy is configured for pods, Volcano predicts matched nodes based on the topology policy. The scheduling process is as follows:

                            +
                            1. Volcano filters nodes with the same policy based on the topology policy configured for pods. The topology policy provided by Volcano is the same as that provided by the topology manager.
                            2. Among the nodes where the same policy applies, Volcano selects the nodes whose CPU topology meets the policy requirements for scheduling.
                            -
                            + + +

                            policy

                            +
                            - + + + - - + - - + - - + - - +

                            Volcano Topology Policy

                            action

                            +

                            Node Scheduling

                            +

                            1. Filter nodes with the same policy.

                            +

                            2. Check whether node's CPU topology meets the policy requirements.

                            none

                            +

                            none

                            1. No filter action

                            +

                            No filtering:

                            +
                            • none: schedulable
                            • best-effort: schedulable
                            • restricted: schedulable
                            • single-numa-node: schedulable
                            +

                            None

                            best-effort

                            +

                            best-effort

                            1. Filter out the node with the topology policy best-effort.

                            +

                            Filter the nodes with the best-effort topology policy.

                            +
                            • none: unschedulable
                            • best-effort: schedulable
                            • restricted: unschedulable
                            • single-numa-node: unschedulable
                            +

                            Best-effort scheduling:

                            +

                            Pods are preferentially scheduled to a single NUMA node. If a single NUMA node cannot meet the requested CPU cores, the pods can be scheduled to multiple NUMA nodes.

                            restricted

                            +

                            restricted

                            1. Filter out the node with the topology policy restricted.

                            -

                            2. Filter out the node that the CPU topology meets the CPU requirements for restricted.

                            +

                            Filter the nodes with the restricted topology policy.

                            +
                            • none: unschedulable
                            • best-effort: unschedulable
                            • restricted: schedulable
                            • single-numa-node: unschedulable
                            +

                            Restricted scheduling:

                            +
                            • If the upper CPU limit of a single NUMA node is greater than or equal to the requested CPU cores, pods can only be scheduled to a single NUMA node. If the remaining CPU cores of a single NUMA node are insufficient, the pods cannot be scheduled.
                            • If the upper CPU limit of a single NUMA node is less than the requested CPU cores, pods can be scheduled to multiple NUMA nodes.

                            single-numa-node

                            +

                            single-numa-node

                            1. Filter out the node with the topology policy single-numa-node.

                            -

                            2. Filter out the node that the CPU topology meets the CPU requirements for single-numa-node.

                            +

                            Filter the nodes with the single-numa-node topology policy.

                            +
                            • none: unschedulable
                            • best-effort: unschedulable
                            • restricted: unschedulable
                            • single-numa-node: schedulable
                            +

                            Pods can only be scheduled to a single NUMA node.

                            -
                            Figure 1 Comparison of NUMA scheduling policies
                            +

                            For example, two NUMA nodes provide resources, each with a total of 32 CPU cores. The following table lists resource allocation.

                            + +
                            + + + + + + + + + + + + + + + + + + + + + + + + + + +

                            Worker Node

                            +

                            Node Topology Policy

                            +

                            Total CPU Cores on NUMA Node 1

                            +

                            Total CPU Cores on NUMA Node 2

                            +

                            Node 1

                            +

                            best-effort

                            +

                            16

                            +

                            16

                            +

                            Node 2

                            +

                            restricted

                            +

                            16

                            +

                            16

                            +

                            Node 3

                            +

                            restricted

                            +

                            16

                            +

                            16

                            +

                            Node 4

                            +

                            single-numa-node

                            +

                            16

                            +

                            16

                            +
                            -

                            Scheduling Priority

                            Topology policy aims to schedule pods to the optimal node. In this example, each node is scored to sort out the optimal node.

                            +

                            Figure 1 shows the scheduling of a pod after a topology policy is configured.

                            +
                            • When 9 CPU cores are requested by a pod and the best-effort topology policy is used, Volcano selects node 1 whose topology policy is also best-effort, and this policy allows the pod to be scheduled to multiple NUMA nodes. Therefore, the requested 9 CPU cores will be allocated to two NUMA nodes, and the pod can be scheduled to node 1.
                            • When 9 CPU cores are requested by a pod and the restricted topology policy is used, Volcano selects nodes 2 and 3 whose topology policy is also restricted, and each node provides a total of 9 CPU cores. However, the remaining CPU cores on node 2 or 3 are less than the requested. Therefore, the pod cannot be scheduled.
                            • When 17 CPU cores are requested by a pod and the restricted topology policy is used, Volcano selects nodes 2 and 3 whose topology policy is also restricted, this policy allows the pod to be scheduled to multiple NUMA nodes, and the upper CPU limit of the both nodes is less than 17. Then, the pod can be scheduled to node 3.
                            • When 17 CPU cores are requested by a pod and the single-numa-node topology policy is used, Volcano selects nodes whose topology policy is also single-numa-node. However, no node can provide a total of 17 CPU cores. Therefore, the pod cannot be scheduled.
                            +
                            Figure 1 Comparison of NUMA scheduling policies
                            +
                            +

                            Scheduling Priority

                            A topology policy aims to schedule pods to the optimal node. In this example, each node is scored to sort out the optimal node.

                            Principle: Schedule pods to the worker nodes that require the fewest NUMA nodes.

                            The scoring formula is as follows:

                            -

                            score = weight * (100 - 100 * numaNodeNum / maxNumaNodeNum)

                            -

                            Parameter description:

                            -
                            • weight: indicates the weight of NUMA Aware Plugin.
                            • numaNodeNum: indicates the number of NUMA nodes required for running the pod on the worker node.
                            • maxNumaNodeNum: indicates the maximum number of NUMA nodes in a pod of all worker nodes.
                            +

                            score = weight x (100 - 100 x numaNodeNum/maxNumaNodeNum)

                            +

                            Parameters:

                            +
                            • weight: the weight of NUMA Aware Plugin.
                            • numaNodeNum: the number of NUMA nodes required for running the pod on worker nodes.
                            • maxNumaNodeNum: the maximum number of NUMA nodes required for running the pod among all worker nodes.
                            +

                            For example, three nodes meet the CPU topology policy for a pod and the weight of NUMA Aware Plugin is set to 10.

                            +
                            • Node A: One NUMA node provides the CPU resources required by the pod (numaNodeNum = 1).
                            • Node B: Two NUMA nodes provide the CPU resources required by the pod (numaNodeNum = 2).
                            • Node C: Four NUMA nodes provide the CPU resources required by the pod (numaNodeNum = 4).
                            +

                            According to the preceding formula, maxNumaNodeNum is 4.

                            +
                            • score(Node A) = 10 x (100 - 100 x 1/4) = 750
                            • score(Node B) = 10 x (100 - 100 x 2/4) = 500
                            • score(Node C) = 10 x (100 - 100 x 4/4) = 0
                            +

                            Therefore, the optimal node is Node A.

                            -

                            Enabling Volcano to Support NUMA Affinity Scheduling

                            1. Enable the CPU management policy. For details, see Enabling the CPU Management Policy.
                            2. Configure a CPU topology policy.

                              1. Log in to the CCE console, click the cluster name, access the cluster details page, and choose Nodes in the navigation pane. On the page displayed, click the Node Pools tab. Choose More > Manage in the Operation column of the target node pool.
                              2. Change the value of topology-manager-policy under kubelet to the required CPU topology policy. As shown in the following figure, the CPU topology policy is best-effort.

                                The valid topology policies are none, best-effort, restricted, and single-numa-node. For details about these policies, see Scheduling Prediction.

                                -

                                +

                                Enabling NUMA Affinity Scheduling for Volcano

                                1. Enable static CPU management. For details, see Enabling the CPU Management Policy.
                                2. Configure a CPU topology policy.

                                  1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Nodes. On the right of the page, click the Node Pools tab and choose More > Manage in the Operation column of the target node pool.
                                  2. Change the kubelet Topology Management Policy (topology-manager-policy) value to the required CPU topology policy.

                                    Valid topology policies include none, best-effort, restricted, and single-numa-node. For details, see Pod Scheduling Prediction.

                                3. Enable the numa-aware add-on and the resource_exporter function.

                                  Volcano 1.7.1 or later

                                  1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Add-ons. On the right of the page, locate the Volcano add-on and click Edit. In the Parameters area, configure Volcano scheduler parameters.
                                    {
                                         "ca_cert": "",
                                         "default_scheduler_conf": {
                                    -        "actions": "allocate, backfill",
                                    +        "actions": "allocate, backfill, preempt",
                                             "tiers": [
                                                 {
                                                     "plugins": [
                                    @@ -160,7 +235,7 @@ metadata:
                                       namespace: kube-system
                                     data:
                                       default-scheduler.conf: |-
                                    -    actions: "allocate, backfill"
                                    +    actions: "allocate, backfill, preempt"
                                         tiers:
                                         - plugins:
                                           - name: priority
                                    @@ -190,7 +265,7 @@ data:
                                     

                            -

                            Using Volcano to Support NUMA Affinity Scheduling

                            1. Configure NUMA affinity for Deployments. The following is an example:

                              kind: Deployment
                              +

                              Using Volcano to Configure NUMA Affinity Scheduling

                              1. Refer to the following examples for configuration.

                                1. Example 1: Configure NUMA affinity for a Deployment.
                                  kind: Deployment
                                   apiVersion: apps/v1
                                   metadata:
                                     name: numa-tset
                                  @@ -204,7 +279,7 @@ spec:
                                         labels:
                                           app: numa-tset
                                         annotations:
                                  -        volcano.sh/numa-topology-policy: single-numa-node    # set the topology policy
                                  +        volcano.sh/numa-topology-policy: single-numa-node    # Configure the topology policy.
                                       spec:
                                         containers:
                                           - name: container-1
                                  @@ -218,7 +293,7 @@ spec:
                                                 memory: 2048Mi
                                         imagePullSecrets:
                                         - name: default-secret
                                  -

                                2. Create a Volcano job and use NUMA affinity.

                                  apiVersion: batch.volcano.sh/v1alpha1
                                  +
                                3. Example 2: Create a Volcano job and enable NUMA affinity for it.
                                  apiVersion: batch.volcano.sh/v1alpha1
                                   kind: Job
                                   metadata:
                                     name: vj-test
                                  @@ -241,18 +316,66 @@ spec:
                                                     cpu: 20
                                                     memory: "100Mi"
                                             restartPolicy: OnFailure
                                  -

                                4. Check the NUMA usage.

                                  # Check the CPU usage of the current node.
                                  -lscpu
                                  +
                                +

                              2. Analyze NUMA scheduling.

                                The following table shows example NUMA nodes.

                                + +
                                + + + + + + + + + + + + + + + + + + + + + +

                                Worker Node

                                +

                                Topology Manager Policy

                                +

                                Allocatable CPU Cores on NUMA Node 0

                                +

                                Allocatable CPU Cores on NUMA Node 1

                                +

                                Node 1

                                +

                                single-numa-node

                                +

                                16

                                +

                                16

                                +

                                Node 2

                                +

                                best-effort

                                +

                                16

                                +

                                16

                                +

                                Node 3

                                +

                                best-effort

                                +

                                20

                                +

                                20

                                +
                                +
                                +

                                In the preceding examples,

                                +
                                • In example 1, 2 CPU cores are requested by a pod, and the single-numa-node topology policy is used. Therefore, the pod will be scheduled to node 1 with the same policy.
                                • In example 2, 20 CPU cores are requested by a pod, and the best-effort topology policy is used. The pod will be scheduled to node 3 because it can allocate all the requested 20 CPU cores onto one NUMA node, while node 2 can do so on two NUMA nodes.
                                +

                              +
                              +

                              Checking NUMA Node Usage

                              Run the lscpu command to check the CPU usage of the current node.

                              +
                              # Check the CPU usage of the current node.
                              +lscpu
                               ...
                               CPU(s):              32
                               NUMA node(s):        2
                               NUMA node0 CPU(s):   0-15
                              -NUMA node1 CPU(s):   16-31
                              -
                              -# Check the CPU allocation of the current node.
                              -cat /var/lib/kubelet/cpu_manager_state
                              -{"policyName":"static","defaultCpuSet":"0,10-15,25-31","entries":{"777870b5-c64f-42f5-9296-688b9dc212ba":{"container-1":"16-24"},"fb15e10a-b6a5-4aaa-8fcd-76c1aa64e6fd":{"container-1":"1-9"}},"checksum":318470969}
                              -

                            +NUMA node1 CPU(s): 16-31 +

                            Then, check the NUMA node usage.

                            +
                            # Check the CPU allocation of the current node.
                            +cat /var/lib/kubelet/cpu_manager_state
                            +{"policyName":"static","defaultCpuSet":"0,10-15,25-31","entries":{"777870b5-c64f-42f5-9296-688b9dc212ba":{"container-1":"16-24"},"fb15e10a-b6a5-4aaa-8fcd-76c1aa64e6fd":{"container-1":"1-9"}},"checksum":318470969}
                            +

                            The preceding example shows that two containers are running on the node. One container uses CPU cores 1 to 9 of NUMA node 0, and the other container uses CPU cores 16 to 24 of NUMA node 1.

                            diff --git a/docs/cce/umn/cce_10_0430.html b/docs/cce/umn/cce_10_0430.html index 6e9261fa..69a94625 100644 --- a/docs/cce/umn/cce_10_0430.html +++ b/docs/cce/umn/cce_10_0430.html @@ -8,7 +8,7 @@

                            When you create a cluster, select a proper CIDR block for each network. Ensure that the CIDR blocks do not conflict with each other and have sufficient available IP addresses. You cannot change the container network model after the cluster is created. Plan the container network model properly in advance.

                            You are advised to learn about the cluster network and container network models before creating a cluster. For details, see Container Network Models.

                            -

                            Master Nodes and Cluster Scale

                            When you create a cluster on CCE, you can have one or three master nodes. Three master nodes can create a cluster in HA mode.

                            +

                            Master Nodes and Cluster Scale

                            When you create a cluster on CCE, you can have one or three master nodes. Three master nodes will be deployed in a cluster for HA.

                            The master node specifications decide the number of nodes that can be managed by a cluster. You can select the cluster management scale, for example, 50 or 200 nodes.

                            Cluster Lifecycle

                            @@ -43,6 +43,11 @@

                            A cluster is being upgraded.

                            Resizing

                            +

                            The cluster flavor is being changed.

                            +

                            Unavailable

                            A cluster is unavailable.

                            diff --git a/docs/cce/umn/cce_10_0431.html b/docs/cce/umn/cce_10_0431.html index 1ae9a2a4..638b4de9 100644 --- a/docs/cce/umn/cce_10_0431.html +++ b/docs/cce/umn/cce_10_0431.html @@ -6,35 +6,40 @@

                            Solution

                            1. The node is unavailable. Preferentially recover the node.

                              If a node is unavailable, log in to the CCE console and click the cluster name to access the cluster console. Then, choose Nodes in the navigation pane and click the Nodes tab. Ensure that the node is in the Running state. A node in the Installing or Deleting state cannot be upgraded.

                              If a node is unavailable, recover the node and retry the check task.

                              -
                            2. The container engine of the node does not support the upgrade.

                              This issue typically occurs when a cluster of an earlier version is upgraded to v1.27 or later. Clusters of v1.27 or later support only the containerd runtime. If your node runtime is not containerd, the upgrade cannot be performed. In this case, reset the node and change the node runtime to containerd.

                            3. The node OS does not support the upgrade.

                              The following table lists the node OSs that support the upgrade. You can reset the node OS to an available OS in the list.

                              -
                              - + + + + diff --git a/docs/cce/umn/cce_10_0442.html b/docs/cce/umn/cce_10_0442.html index 2887b2d8..a53049d1 100644 --- a/docs/cce/umn/cce_10_0442.html +++ b/docs/cce/umn/cce_10_0442.html @@ -1,6 +1,6 @@ -

                              Node CCE Agent Versions

                              +

                              CCE Agent Versions

                              Check Items

                              Check whether cce-agent on the current node is of the latest version.

                              Solution

                              • Scenario 1: The error message "you cce-agent no update, please restart it" is displayed.

                                cce-agent does not need to be updated but is not restarted. In this case, log in to the node and manually restart cce-agent.

                                @@ -11,7 +11,7 @@

                                Solution

                                1. Log in to a node where the check succeeded, obtain the path of the cce-agent configuration file, and obtain the OBS address.
                                  cat `ps aux | grep cce-agent | grep -v grep | awk -F '-f ' '{print $2}'`

                                  The OBS configuration address field in the configuration file is packageFrom.addr.

                                  -
                                  Figure 1 OBS address
                                  +
                                  Figure 1 OBS address
                                2. Log in to a where the check failed, obtain the OBS address again by referring to the previous step, and check whether the OBS addresses are the same. If they are different, change the OBS address of the abnormal node to the correct address.
                                3. Run the following commands to download the latest binary file:
                                  • x86
                                    curl -k "https://{OBS address you have obtained}/cluster-versions/base/cce-agent" > /tmp/cce-agent
                                  • Arm
                                    curl -k "https://{OBS address you have obtained}/cluster-versions/base/cce-agent-arm" > /tmp/cce-agent-arm
                                  diff --git a/docs/cce/umn/cce_10_0445.html b/docs/cce/umn/cce_10_0445.html index aa790e53..e06a4291 100644 --- a/docs/cce/umn/cce_10_0445.html +++ b/docs/cce/umn/cce_10_0445.html @@ -7,7 +7,7 @@

                                  Solution

                                  During the node upgrade, the key disks store the upgrade component package, and the /tmp directory stores temporary files.

                                  • Scenario 1: Master node disks fail to meet the upgrade requirements.

                                    Contact technical support.

                                    -
                                  • Scenario 2: Worker node disks fail to meet the upgrade requirements.

                                    Run the following command to check the usage of each key disk. After ensuring that the available space meets the requirements and check again.

                                    +
                                  • Scenario 2: Worker node disks fail to meet the upgrade requirements.

                                    Check the usage of each key disk. After ensuring that the available space meets the requirements, check again.

                                    • Disk partition of Docker: at least 1 GB of available space
                                      df -h /var/lib/docker
                                    • Disk partition of containerd: at least 1 GB of available space
                                      df -h /var/lib/containerd
                                    • Disk partition of kubelet: at least 1 GB of available space
                                      df -h /mnt/paas/kubernetes/kubelet
                                      diff --git a/docs/cce/umn/cce_10_0448.html b/docs/cce/umn/cce_10_0448.html index f946a8ff..b80d4a1c 100644 --- a/docs/cce/umn/cce_10_0448.html +++ b/docs/cce/umn/cce_10_0448.html @@ -3,7 +3,7 @@

                                      Kubelet

                                      Check Items

                                      Check whether the kubelet on the node is running properly.

                                      -

                                      Solution

                                      • Scenario 1: The kubelet status is abnormal.

                                        If the kubelet malfunctions, the node is unavailable. Restore the node and check again. For details, see

                                        +

                                        Solution

                                        • Scenario 1: The kubelet status is abnormal.

                                          If the kubelet malfunctions, the node is unavailable. Restore the node and check again. For details, see What Should I Do If a Cluster Is Available But Some Nodes Are Unavailable?

                                        • Scenario 2: The cce-pause version is incorrect.

                                          The version of the pause container image on which kubelet depends is not cce-pause:3.1. If you continue the upgrade, pods will restart in batches. Currently, the upgrade is not supported. Contact technical support.

                                        diff --git a/docs/cce/umn/cce_10_0450.html b/docs/cce/umn/cce_10_0450.html index f67fea13..52f585a9 100644 --- a/docs/cce/umn/cce_10_0450.html +++ b/docs/cce/umn/cce_10_0450.html @@ -5,11 +5,11 @@

                                      Solution

                                      • Scenario 1: ntpd is running abnormally.

                                        Log in to the node and run the systemctl status ntpd command to obtain the running status of ntpd. If the command output is abnormal, run the systemctl restart ntpd command and obtain the status again.

                                        The normal command output is as follows:

                                        -
                                        Figure 1 Running status of ntpd
                                        +
                                        Figure 1 Running status of ntpd

                                        If the problem persists after ntpd is restarted, contact technical support.

                                      • Scenario 2: chronyd is running abnormally.

                                        Log in to the node and run the systemctl status chronyd command to obtain the running status of chronyd. If the command output is abnormal, run the systemctl restart chronyd command and obtain the status again.

                                        The normal command output is as follows:

                                        -
                                        Figure 2 Running status of chronyd
                                        +
                                        Figure 2 Running status of chronyd

                                        If the problem persists after chronyd is restarted, contact technical support.

                                      diff --git a/docs/cce/umn/cce_10_0451.html b/docs/cce/umn/cce_10_0451.html index b790c648..53d7dd1f 100644 --- a/docs/cce/umn/cce_10_0451.html +++ b/docs/cce/umn/cce_10_0451.html @@ -3,8 +3,12 @@

                                      Node OS

                                      Check Items

                                      Check whether the OS kernel version of the node is supported by CCE.

                                      -

                                      Solution

                                      CCE nodes run depending on the initial standard kernel version when they are created. CCE has performed comprehensive compatibility tests based on this kernel version. A non-standard kernel version may cause unexpected compatibility issues during a node upgrade and the upgrade may fail. For details, see High-Risk Operations and Solutions.

                                      -

                                      This type of nodes should not be upgraded. Reset the node to the standard kernel version before the upgrade by following the instructions in Resetting a Node.

                                      +

                                      Solution

                                      • Case 1: The node image is not a standard CCE image.

                                        CCE nodes run depending on the initial standard kernel version specified when they are created. CCE has performed comprehensive compatibility tests based on this kernel version. A non-standard kernel version may lead to unexpected compatibility issues during a node upgrade and the upgrade may fail. For details, see High-Risk Operations and Solutions.

                                        +

                                        Do not directly upgrade this type of nodes. Instead, reset the nodes to a standard kernel version and then upgrade the nodes.

                                        +
                                      +
                                      • Case 2: An image of a special version is defective.

                                        A EulerOS release 2.8 (Arm) image of v1.17 is used in the source version. Such an image is defective because the docker exec command will be affected after Docker is restarted. When the cluster version is upgraded, the Docker version will be updated and Docker will be restarted. To resolve this issue, do as follows:

                                        +
                                        1. Empty and isolate the affected nodes before upgrading the cluster.
                                        2. Upgrade the version to v1.19 or later and reset the nodes to replace the image with one of a later version, for example, EulerOS release 2.9.
                                        +
                                      diff --git a/docs/cce/umn/cce_10_0456.html b/docs/cce/umn/cce_10_0456.html index 623de719..ff764316 100644 --- a/docs/cce/umn/cce_10_0456.html +++ b/docs/cce/umn/cce_10_0456.html @@ -5,7 +5,7 @@

                                      Solution

                                      Log in to the node and run the systemctl is-active systemd-journald command to obtain the running status of journald. If the command output is abnormal, run the systemctl restart systemd-journald command and obtain the status again.

                                      The normal command output is as follows:

                                      -
                                      Figure 1 Running status of journald
                                      +
                                      Figure 1 Running status of journald

                                      If the problem persists after journald is restarted, contact technical support.

                                      diff --git a/docs/cce/umn/cce_10_0459.html b/docs/cce/umn/cce_10_0459.html index 602069be..e55e41e2 100644 --- a/docs/cce/umn/cce_10_0459.html +++ b/docs/cce/umn/cce_10_0459.html @@ -4,7 +4,7 @@

                                      Check Items

                                      Check whether inaccessible mount points exist on the node.

                                      Solution

                                      Scenario: There are inaccessible mount points on the node.

                                      -

                                      If NFS (such as OBS parallel file systems and SFS) is used by the node and the node is disconnected with the NFS server, the mount point would be inaccessible and all processes that access this mount point are in D state.

                                      +

                                      If NFS (such as obsfs or SFS) is used by the node and the node is disconnected from the NFS server, the mount point would be inaccessible and all processes that access this mount point are in D state.

                                      1. Log in to the node.
                                      2. Run the following commands on the node in sequence:

                                        - df -h
                                         - for dir in `df -h | grep -v "Mounted on" | awk "{print \\$NF}"`;do cd $dir; done && echo "ok"

                                      3. If ok is returned, no problem occurs.

                                        Otherwise, start another terminal and run the following command to check whether the previous command is in the D state:
                                        - ps aux | grep "D "
                                        diff --git a/docs/cce/umn/cce_10_0460.html b/docs/cce/umn/cce_10_0460.html index 17ef3f4e..82123e88 100644 --- a/docs/cce/umn/cce_10_0460.html +++ b/docs/cce/umn/cce_10_0460.html @@ -19,7 +19,7 @@

                                      Solution

                                      Scenario 1: The node is skipped during the cluster upgrade.

                                      -
                                      1. Configure the kubectl command. For details, see Connecting to a Cluster Using kubectl.
                                      2. Check the kubelet version of the corresponding node. The following information is expected:

                                        Figure 1 kubelet version
                                        +
                                        1. Configure the kubectl command. For details, see Connecting to a Cluster Using kubectl.
                                        2. Check the kubelet version of the corresponding node. The following information is expected:

                                          Figure 1 kubelet version

                                          If the version of the node is different from that of other nodes, the node is skipped during the upgrade. Reset the node and upgrade the cluster again. For details about how to reset a node, see Resetting a Node.

                                          Resetting a node will reset all node labels, which may affect workload scheduling. Before resetting a node, check and retain the labels that you have manually added to the node.

                                          diff --git a/docs/cce/umn/cce_10_0462.html b/docs/cce/umn/cce_10_0462.html index 9bd06739..3dd6e8a5 100644 --- a/docs/cce/umn/cce_10_0462.html +++ b/docs/cce/umn/cce_10_0462.html @@ -23,7 +23,7 @@
                              - @@ -60,104 +60,107 @@
                              Table 1 OSs that support the upgrade

                              OS

                              +
                              - - - - - + + +
                              Table 1 OSs that support the upgrade

                              OS

                              Constraint

                              +

                              Constraint

                              EulerOS 2.x

                              +

                              EulerOS 2.x

                              If the target version is earlier than v1.27, there are no constraints.

                              +

                              If the target version is earlier than v1.27, there are no constraints.

                              If the target version is v1.27 or later, only EulerOS 2.9 and EulerOS 2.10 support the upgrade.

                              Ubuntu

                              +

                              Ubuntu

                              If the check result shows that the upgrade is not supported due to regional restrictions, contact technical support.

                              +

                              If the check result shows that the upgrade is not supported due to regional restrictions, contact technical support.

                              NOTE:

                              If the target version is v1.27 or later, only Ubuntu 22.04 supports the upgrade.

                              HCE OS 2.0

                              +

                              If the check result shows that the upgrade is not supported due to regional restrictions, contact technical support.

                              +
                              -
                            4. The affected node belongs to the default node pool but it is configured with a non-default node pool label, which will affect the upgrade.

                              If a node is migrated from a node pool to the default node pool, the node pool label cce.cloud.com/cce-nodepool is retained, affecting the cluster upgrade. Check whether load scheduling on the node depends on the label.

                              +
                            5. The affected node belongs to the default node pool but it is configured with a non-default node pool label, which will affect the upgrade.

                              If a node is migrated from a common node pool to the default node pool, the cce.cloud.com/cce-nodepool label will affect the cluster upgrade. Check whether load scheduling on the node depends on the label.

                              • If no, delete the label.
                              • If yes, modify the load balancing policy, remove the dependency, and then delete the label.
                            6. The node is marked with a CNIProblem taint. Preferentially recover the node.

                              The node contains a taint whose key is node.cloudprovider.kubernetes.io/cni-problem, and the effect is NoSchedule. The taint is added by the NPD add-on. Upgrade the NPD add-on to the latest version and check again. If the problem persists, contact technical support.

                            7. The Kubernetes node corresponding to the affected node does not exist.

                              It is possible that the node is being deleted. Check again later.

                              +
                            8. The OS running on the master node is EulerOS 2.5, which does not support the cluster to be upgraded to v1.27.5-r0.

                              You can upgrade the cluster to v1.25 or v1.28. If necessary, contact technical support.

                            9. diff --git a/docs/cce/umn/cce_10_0433.html b/docs/cce/umn/cce_10_0433.html index 93b6bec7..47485908 100644 --- a/docs/cce/umn/cce_10_0433.html +++ b/docs/cce/umn/cce_10_0433.html @@ -4,12 +4,12 @@

                              Check Items

                              Check the following items:

                              • Check whether the add-on status is normal.
                              • Check whether the add-on support the target version.
                              -

                              Solution

                              • Scenario 1: The add-on malfunctions.

                                Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane and obtain add-ons. Then, handle malfunctional add-ons.

                                -
                              • Scenario 2: The target cluster version does not support the current add-on version.

                                The add-on cannot be automatically upgraded with the cluster due to compatibility issues. In this case, log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane and manually upgrade the add-on.

                                -
                              • Scenario 3: After the add-on is upgraded to the latest version, it is still not supported by the target cluster version.

                                Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane and manually uninstall the add-on. For details about the supported add-on versions and substitutions, see the Help document.

                                +

                                Solution

                                • Scenario 1: The add-on malfunctions.

                                  Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane and obtain add-ons. Then, handle malfunctional add-ons.

                                  +
                                • Scenario 2: The target cluster version does not support the current add-on version.

                                  The add-on cannot be automatically upgraded with the cluster due to compatibility issues. In this case, log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane and manually upgrade the add-on.

                                  +
                                • Scenario 3: After the add-on is upgraded to the latest version, it is still not supported by the target cluster version.

                                  Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane and manually uninstall the add-on. For details about the supported add-on versions and substitutions, see the Help document.

                                • Scenario 4: The add-on configuration does not meet the upgrade requirements. Upgrade the add-on and try again.

                                  The following error information is displayed during the pre-upgrade check:

                                  please upgrade addon [ ] in the page of addon managecheck and try again
                                  -

                                  In this case, log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane and manually upgrade the add-on.

                                  +

                                  In this case, log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane and manually upgrade the add-on.

                              diff --git a/docs/cce/umn/cce_10_0436.html b/docs/cce/umn/cce_10_0436.html index f4ea6e20..45d2683f 100644 --- a/docs/cce/umn/cce_10_0436.html +++ b/docs/cce/umn/cce_10_0436.html @@ -3,7 +3,10 @@

                              Node Pools

                              Check Items

                              • Check the node pool status.
                              • Check whether the node pool OS or container runtime is supported after the upgrade.
                              -

                              Solution

                              • Scenario: The node pool malfunctions.

                                Log in to the CCE console and click the cluster name to access the cluster console. Choose Nodes in the navigation pane, click the Node Pools tab, locate the row containing the target node pool, and view its statuses. If the node pool is being scaled, wait until the node pool scaling is complete.

                                +

                                Solution

                                • Scenario: The node pool malfunctions.

                                  Log in to the CCE console and click the cluster name to access the cluster console. Choose Nodes in the navigation pane and view the status of the affected node pool on the Node Pools tab. If the node pool is being scaled, wait until the node pool scaling is complete.

                                  +
                                • Scenario: The node pool OS is not supported.

                                  The runtime and OS vary depending on the cluster version. This issue typically occurs when a cluster of an earlier version is upgraded to v1.27 or later.

                                  +

                                  Log in to the CCE console and click the cluster name to access the cluster console. Choose Nodes in the navigation pane, view the status of the affected node pool on the Node Pools tab, and click Upgrade. Change the supported OSs based on the pre-upgrade check result, and click OK.

                                  +

                                  If there are nodes in the affected node pool, choose More > Synchronize in the operation column to synchronize the OS of the existing nodes. For details, see Synchronizing Node Pools.

                              diff --git a/docs/cce/umn/cce_10_0437.html b/docs/cce/umn/cce_10_0437.html index 07632b88..eea8d853 100644 --- a/docs/cce/umn/cce_10_0437.html +++ b/docs/cce/umn/cce_10_0437.html @@ -9,7 +9,6 @@
                              • The security group name is cluster name-node-xxx. This security group is associated with the worker nodes.
                              • The security group name is cluster name-control-xxx. This security group is associated with the master nodes.

                              Click the node security group and ensure that the following rules are configured to allow the master node to access the node using ICMP.

                              -

                              If the preceding security group rule is unavailable, add the rule with the following configurations to the node security group: Set Protocol & Port to Protocols/ICMP and All, and Source to Security group and the master security group.

                              diff --git a/docs/cce/umn/cce_10_0441.html b/docs/cce/umn/cce_10_0441.html index d640082d..a31f832d 100644 --- a/docs/cce/umn/cce_10_0441.html +++ b/docs/cce/umn/cce_10_0441.html @@ -12,7 +12,15 @@

                              v1.23 to v1.25

                              +

                              v1.23 or v1.25

                              +

                              Upgraded to v1.27

                              +

                              Docker is no longer recommended. Use containerd instead. For details, see Container Engine.

                              +

                              This item has been included in the pre-upgrade check.

                              +

                              v1.23 to v1.25

                              Since Kubernetes v1.25, PodSecurityPolicy has been replaced by pod Security Admission. For details, see Configuring Pod Security Admission.

                              Command

                              crictl

                              +

                              crictl/ctr

                              docker

                            -

                            Mapping between Node OSs and Container Engines

                            -
                            Table 2 Node OSs and container engines in CCE clusters

                            OS

                            +

                            Mapping between Node OSs and Container Engines

                            • VPC network clusters of v1.23 or later versions support containerd. Tunnel network clusters of v1.23.2-r0 or later versions support containerd.
                            +
                            + +
                            - - - - - - - - - - - - - - - - - - - - - - - -
                            Table 2 Node OSs and container engines in CCE clusters

                            OS

                            Kernel Version

                            +

                            Kernel Version

                            Container Engine

                            +

                            Container Engine

                            Container Storage Rootfs

                            +

                            Container Storage Rootfs

                            Container Runtime

                            +

                            Container Runtime

                            EulerOS 2.5

                            +

                            EulerOS 2.5

                            3.x

                            +

                            3.x

                            Docker

                            +

                            Docker

                            Device Mapper

                            +

                            Device Mapper

                            runC

                            +

                            runC

                            EulerOS 2.9

                            +

                            EulerOS 2.9

                            4.x

                            +

                            4.x

                            Docker

                            +

                            Docker

                            Clusters of v1.23 and later support containerd.

                            OverlayFS

                            +

                            OverlayFS

                            runC

                            +

                            runC

                            Ubuntu 22.04

                            +

                            Ubuntu 22.04

                            4.x

                            +

                            4.x

                            Docker

                            +

                            Docker

                            Clusters of v1.23 and later support containerd.

                            OverlayFS

                            +

                            OverlayFS

                            runC

                            +

                            runC

                            HCE OS 2.0

                            +

                            HCE OS 2.0

                            5.x

                            +

                            5.x

                            Docker

                            +

                            Docker

                            containerd

                            OverlayFS

                            +

                            OverlayFS

                            runC

                            +

                            runC

                            -
                            Table 3 Node OSs and container engines in CCE Turbo clusters

                            Node Type

                            +
                            - - - - - - - - - - - - - @@ -166,240 +169,255 @@

                            Common Commands of containerd and Docker

                            containerd does not support Docker APIs and Docker CLI, but you can run crictl commands to implement similar functions.

                            -
                            Table 3 Node OSs and container engines in CCE Turbo clusters

                            Node Type

                            OS

                            +

                            OS

                            Kernel Version

                            +

                            Kernel Version

                            Container Engine

                            +

                            Container Engine

                            Container Storage Rootfs

                            +

                            Container Storage Rootfs

                            Container Runtime

                            +

                            Container Runtime

                            ECS (VM)

                            -

                            +

                            ECS (VM)

                            +

                            EulerOS 2.9

                            +

                            EulerOS 2.9

                            4.x

                            +

                            4.x

                            Docker

                            -

                            containerd

                            +

                            Docker

                            +

                            containerd

                            +

                            OverlayFS

                            -

                            +

                            OverlayFS

                            +

                            runC

                            -

                            +

                            runC

                            +

                            HCE OS 2.0

                            +

                            HCE OS 2.0

                            5.x

                            +

                            5.x

                            Table 4 Image-related commands

                            No.

                            +
                            - - - + + + - - - - - - - - - - - - - - - - - - - -
                            Table 4 Image-related commands

                            Operation

                            Docker Command

                            +

                            Docker Command

                            containerd Command

                            +

                            containerd Command

                            Remarks

                            +

                            docker

                            +

                            crictl

                            +

                            ctr

                            1

                            +

                            List local images.

                            docker images [Option] [Image name[:Tag]]

                            +

                            docker images

                            crictl images [Option] [Image name[:Tag]]

                            +

                            crictl images

                            List local images.

                            +

                            ctr -n k8s.io i ls

                            2

                            +

                            Pull images.

                            docker pull [Option] Image name[:Tag|@DIGEST]

                            +

                            docker pull

                            crictl pull [Option] Image name[:Tag|@DIGEST]

                            +

                            crictl pull

                            Pull images.

                            +

                            ctr -n k8s.io i pull

                            3

                            +

                            Push images.

                            docker push

                            +

                            docker push

                            None

                            +

                            None

                            Pushing images.

                            +

                            ctr -n k8s.io i push

                            4

                            +

                            Delete a local image.

                            docker rmi [Option] Image...

                            +

                            docker rmi

                            crictl rmi [Option] Image ID...

                            +

                            crictl rmi

                            Delete a local image.

                            +

                            ctr -n k8s.io i rm

                            5

                            +

                            Check images.

                            docker inspect Image ID

                            +

                            docker inspect

                            crictl inspecti Image ID

                            +

                            crictl inspecti

                            Check images.

                            +

                            None

                            -
                            Table 5 Container-related commands

                            No.

                            +
                            - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                            Table 5 Container-related commands

                            Operation

                            Docker Command

                            +

                            Docker Command

                            containerd Command

                            +

                            containerd Command

                            Remarks

                            +

                            docker

                            +

                            crictl

                            +

                            ctr

                            1

                            +

                            List containers.

                            docker ps [Option]

                            +

                            docker ps

                            crictl ps [Option]

                            +

                            crictl ps

                            List containers.

                            +

                            ctr -n k8s.io c ls

                            2

                            +

                            Create a container.

                            docker create [Option]

                            +

                            docker create

                            crictl create [Option]

                            +

                            crictl create

                            Create a container.

                            +

                            ctr -n k8s.io c create

                            3

                            +

                            Start a container.

                            docker start [Option] Container ID...

                            +

                            docker start

                            crictl start [Option] Container ID...

                            +

                            crictl start

                            Start a container.

                            +

                            ctr -n k8s.io run

                            4

                            +

                            Stop a container.

                            docker stop [Option] Container ID...

                            +

                            docker stop

                            crictl stop [Option] Container ID...

                            +

                            crictl stop

                            Stop a container.

                            +

                            None

                            5

                            +

                            Delete a container.

                            docker rm [Option] Container ID...

                            +

                            docker rm

                            crictl rm [Option] Container ID...

                            +

                            crictl rm

                            Delete a container.

                            +

                            ctr -n k8s.io c del

                            6

                            +

                            Connect to a container.

                            docker attach [Option] Container ID

                            +

                            docker attach

                            crictl attach [Option] Container ID

                            +

                            crictl attach

                            Connect to a container.

                            +

                            None

                            7

                            +

                            Access the container.

                            docker exec [Option] Container ID Startup command [Parameter...]

                            +

                            docker exec

                            crictl exec [Option] Container ID Startup command [Parameter...]

                            +

                            crictl exec

                            Access the container.

                            +

                            None

                            8

                            +

                            Query container details.

                            docker inspect [Option] Container name|ID...

                            +

                            docker inspect

                            crictl inspect [Option] Container ID...

                            +

                            crictl inspect

                            Query container details.

                            +

                            ctr -n k8s.io c info

                            9

                            +

                            View container logs.

                            docker logs [Option] Container ID

                            +

                            docker logs

                            crictl logs [Option] Container ID

                            +

                            crictl logs

                            View container logs.

                            +

                            None

                            10

                            +

                            Check the resource usage of the container.

                            docker stats [Option] Container ID...

                            +

                            docker stats

                            crictl stats [Option] Container ID

                            +

                            crictl stats

                            Check the resource usage of the container.

                            +

                            None

                            11

                            +

                            Update container resource limits.

                            docker update [Option] Container ID...

                            +

                            docker update

                            crictl update [Option] Container ID...

                            +

                            crictl update

                            Update container resource limits.

                            +

                            None

                            -
                            Table 6 Pod-related commands

                            No.

                            +
                            - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - @@ -408,13 +426,13 @@

                            Containers created and started by containerd are immediately deleted by kubelet. containerd does not support suspending, resuming, restarting, renaming, and waiting for containers, nor Docker image build, import, export, comparison, push, search, and labeling. containerd does not support file copy. You can log in to the image repository by modifying the configuration file of containerd.

                            -

                            Differences in Tracing

                            • Docker (Kubernetes 1.23 and earlier versions):

                              kubelet --> docker shim (in the kubelet process) --> docker --> containerd

                              +

                              Differences in Tracing

                              • Docker (Kubernetes 1.23 and earlier):

                                kubelet --> docker shim (in the kubelet process) --> docker --> containerd

                              • Docker (community solution for Kubernetes v1.24 or later):

                                kubelet --> cri-dockerd (kubelet uses CRI to connect to cri-dockerd) --> docker--> containerd

                              • containerd:

                                kubelet --> cri plugin (in the containerd process) --> containerd

                              Although Docker has added functions such as swarm cluster, docker build, and Docker APIs, it also introduces bugs. Compared with containerd, Docker has one more layer of calling. Therefore, containerd is more resource-saving and secure.

                              -

                              Container Engine Version Description

                              • Docker
                                • EulerOS/CentOS: docker-engine 18.9.0, a Docker version customized for CCE. Security vulnerabilities will be fixed in a timely manner.
                                +

                                Container Engine Versions

                                • Docker
                                  • EulerOS/CentOS: docker-engine 18.9.0, a Docker version customized for CCE. Security vulnerabilities will be fixed in a timely manner.
                                • containerd: 1.6.14
                              diff --git a/docs/cce/umn/cce_10_0463.html b/docs/cce/umn/cce_10_0463.html index ddd06679..193947a4 100644 --- a/docs/cce/umn/cce_10_0463.html +++ b/docs/cce/umn/cce_10_0463.html @@ -14,9 +14,10 @@
                            - - - @@ -400,23 +400,65 @@ - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/cce/umn/cce_10_0550.html b/docs/cce/umn/cce_10_0550.html index ba980a3d..e9b9020d 100644 --- a/docs/cce/umn/cce_10_0550.html +++ b/docs/cce/umn/cce_10_0550.html @@ -27,7 +27,7 @@ - @@ -117,11 +117,23 @@ - - - + + + + + + diff --git a/docs/cce/umn/cce_10_0552.html b/docs/cce/umn/cce_10_0552.html index c2eb119a..b5d5f051 100644 --- a/docs/cce/umn/cce_10_0552.html +++ b/docs/cce/umn/cce_10_0552.html @@ -17,13 +17,13 @@ spec: memory: "200Mi" cpu: "1"

                            This feature is built on the optimized CPU scheduling in the HCE OS 2.0 kernel. When the CPU usage preferentially used by a container exceeds 85%, the container is automatically allocated to other CPUs with low usage to ensure the response capability of applications.

                            -

                            +

                            • When enhanced CPU policy is enabled, the application performance is better than that of the none) policy but worse than that of the static policy.
                            • CPU would not be exclusively used by burstable pods, it is still in the shared CPU pool. When the burstable pods are in the low tide, other pods can share this CPU.

                            Constraints

                            To use this feature, the following conditions must be met:

                            -
                            • The cluster version is v1.23 or later.
                            • The node OS is HCE OS 2.0.
                            +
                            • The cluster version must be v1.23 or later.
                            • The node OS is HCE OS 2.0.
                            • The CPU management policy cannot take effect on physical cloud server nodes.
                            -

                            Procedure

                            1. Log in to the CCE console.
                            2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
                            3. Select a node pool whose OS is HCE OS 2.0 and click Manage in the Operation column.
                            4. In the Manage Components window that is displayed, change the cpu-manager-policy value of the kubelet component to enhanced-static.
                            5. Click OK.
                            +

                            Procedure

                            1. Log in to the CCE console.
                            2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
                            3. Select a node pool whose OS is HCE OS 2.0 and click Manage in the Operation column.
                            4. In the Manage Components window that is displayed, change the cpu-manager-policy value of the kubelet component to enhanced-static.
                            5. Click OK.

                            Verification

                            Take a node with 8 vCPUs and 32 GB memory as an example. Deploy a workload whose CPU request is 1 and limit is 2 in the cluster in advance.

                            1. Log in to a node in the node pool and view the /var/lib/kubelet/cpu_manager_state output.

                              cat /var/lib/kubelet/cpu_manager_state
                              @@ -33,10 +33,13 @@ spec:

                            2. Check the cgroup setting of cpuset.preferred_cpus of the container. The output is the ID of the CPU that is preferentially used.

                              cat /sys/fs/cgroup/cpuset/kubepods/burstable/pod {pod uid} / {Container ID} /cpuset.preferred_cpus
                              • {pod uid} indicates the pod UID, which can be obtained by running the following command on the host that has been connected to the cluster using kubectl:
                                kubectl get po {pod name} -n {namespace} -ojsonpath='{.metadata.uid}{"\n"}'

                                In the preceding command, {pod name} and {namespace} indicate the pod name and the namespace to which the pod belongs.

                                -
                              • {Container id} must be a complete container ID. You can run the following command on the node where the container is running to obtain the container ID:
                                Docker node pool:
                                docker ps --no-trunc | grep {pod name} | grep -v cce-pause | awk '{print $1}'
                                +
                              • {Container id} must be a complete container ID. You can run the following command on the node where the container is running to obtain the container ID:
                                Docker node pool: In the command, {pod name} indicates the pod name.
                                docker ps --no-trunc | grep {pod name} | grep -v cce-pause | awk '{print $1}'
                                -

                                containerd node pool:

                                -
                                crictl ps --no-trunc | grep {pod name} | grep -v cce-pause | awk '{print $1}'
                                +

                                containerd node pool: In the command, {pod name} indicates the pod name, {pod id} indicates the pod ID, and {container name} indicates the container name.

                                +
                                # Obtain the pod ID.
                                +crictl pods | grep {pod name} | awk '{print $1}'
                                +# Obtain the complete container ID.
                                +crictl ps --no-trunc | grep {pod id} | grep {container name} | awk '{print $1}'

                              A complete example is as follows:

                              cat /sys/fs/cgroup/cpuset/kubepods/burstable/pod6739f6f2-ebe5-48ae-945a-986d5d8919b9/5ba5603434b95fd22d36fba6a5f1c44eba83c18c2e1de9b52ac9b52e93547a13/cpuset.preferred_cpus
                              diff --git a/docs/cce/umn/cce_10_0553.html b/docs/cce/umn/cce_10_0553.html index e7245c8b..737b9b8f 100644 --- a/docs/cce/umn/cce_10_0553.html +++ b/docs/cce/umn/cce_10_0553.html @@ -6,7 +6,7 @@ diff --git a/docs/cce/umn/cce_10_0557.html b/docs/cce/umn/cce_10_0557.html index 7792f796..3735e7e5 100644 --- a/docs/cce/umn/cce_10_0557.html +++ b/docs/cce/umn/cce_10_0557.html @@ -1,8 +1,8 @@

                              Overview

                              -

                              Kubernetes logs allow you to locate and rectify faults. This section describes how you can manage Kubernetes logs generated for CCE:

                              - +

                              Kubernetes logs allow you to locate and rectify faults. This section describes how you can manage Kubernetes logs:

                              +
                            Table 6 Pod-related commands

                            Operation

                            Docker Command

                            +

                            Docker Command

                            containerd Command

                            +

                            containerd Command

                            Remarks

                            +

                            docker

                            +

                            crictl

                            +

                            ctr

                            1

                            +

                            List pods.

                            None

                            +

                            None

                            crictl pods [Option]

                            +

                            crictl pods

                            List pods.

                            +

                            None

                            2

                            +

                            View pod details.

                            None

                            +

                            None

                            crictl inspectp [Option] Pod ID...

                            +

                            crictl inspectp

                            View pod details.

                            +

                            None

                            3

                            +

                            Start a pod.

                            None

                            +

                            None

                            crictl start [Option] Pod ID...

                            +

                            crictl start

                            Start a pod.

                            +

                            None

                            4

                            +

                            Run a pod.

                            None

                            +

                            None

                            crictl runp [Option] Pod ID...

                            +

                            crictl runp

                            Run a pod.

                            +

                            None

                            5

                            +

                            Stop a pod.

                            None

                            +

                            None

                            crictl stopp [Option] Pod ID...

                            +

                            crictl stopp

                            Stop a pod.

                            +

                            None

                            6

                            +

                            Delete a pod.

                            None

                            +

                            None

                            crictl rmp [Option] Pod ID...

                            +

                            crictl rmp

                            Delete a pod.

                            +

                            None

                            Node type used to run containers

                            Bare-metal server (BMS)

                            +

                            ECS (PM)

                            VM

                            +

                            ECS (VM)

                            +

                            ECS (PM)

                            Container engine

                            diff --git a/docs/cce/umn/cce_10_0476.html b/docs/cce/umn/cce_10_0476.html new file mode 100644 index 00000000..90628a30 --- /dev/null +++ b/docs/cce/umn/cce_10_0476.html @@ -0,0 +1,221 @@ + + +

                            Node OS

                            +

                            This section describes the mappings between released cluster versions and OS versions.

                            +

                            Node OSs and Cluster Versions

                            +
                            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                            Table 1 Mapping between node OS versions and cluster versions

                            OS

                            +

                            Cluster Version

                            +

                            CCE Standard Cluster

                            +

                            CCE Turbo Cluster

                            +

                            Latest Kernel

                            +

                            VPC Network

                            +

                            Tunnel Network

                            +

                            Cloud Native 2.0 Network

                            +

                            HCE OS 2.0

                            +

                            +

                            +

                            v1.28

                            +

                            √

                            +

                            √

                            +

                            √

                            +

                            5.10.0-60.18.0.50.r865_35.hce2.x86_64

                            +

                            v1.27

                            +

                            √

                            +

                            Supported in v1.27.3-r0 or later.

                            +

                            √

                            +

                            5.10.0-60.18.0.50.r865_35.hce2.x86_64

                            +

                            v1.25

                            +

                            √

                            +

                            Supported in v1.25.6-r0 or later.

                            +

                            √

                            +

                            5.10.0-60.18.0.50.r865_35.hce2.x86_64

                            +

                            Ubuntu 22.04

                            +

                            v1.28

                            +

                            √

                            +

                            x

                            +

                            √

                            +

                            5.15.0-53-generic

                            +

                            v1.27

                            +

                            √

                            +

                            x

                            +

                            √

                            +

                            5.15.0-53-generic

                            +

                            v1.25

                            +

                            √

                            +

                            x

                            +

                            √

                            +

                            5.15.0-53-generic

                            +

                            EulerOS release 2.9

                            +

                            v1.28

                            +

                            √

                            +

                            √

                            +

                            √

                            +

                            4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

                            +

                            v1.27

                            +

                            √

                            +

                            √

                            +

                            √

                            +

                            4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

                            +

                            v1.25

                            +

                            √

                            +

                            √

                            +

                            √

                            +

                            4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

                            +

                            v1.23

                            +

                            √

                            +

                            √

                            +

                            √

                            +

                            4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

                            +

                            v1.21

                            +

                            √

                            +

                            √

                            +

                            √

                            +

                            4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

                            +

                            v1.19

                            +

                            √

                            +

                            √

                            +

                            √

                            +

                            4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

                            +

                            EulerOS release 2.5

                            +

                            v1.25

                            +

                            √

                            +

                            √

                            +

                            √

                            +

                            3.10.0-862.14.1.5.h687.eulerosv2r7.x86_64

                            +

                            v1.23

                            +

                            √

                            +

                            √

                            +

                            √

                            +

                            3.10.0-862.14.1.5.h687.eulerosv2r7.x86_64

                            +

                            v1.21

                            +

                            √

                            +

                            √

                            +

                            √

                            +

                            3.10.0-862.14.1.5.h687.eulerosv2r7.x86_64

                            +

                            v1.19

                            +

                            √

                            +

                            √

                            +

                            √

                            +

                            3.10.0-862.14.1.5.h687.eulerosv2r7.x86_64

                            +
                            +
                            +
                            +
                            +
                            + +
                            + diff --git a/docs/cce/umn/cce_10_0477.html b/docs/cce/umn/cce_10_0477.html index 06bbf6ae..50abc311 100644 --- a/docs/cce/umn/cce_10_0477.html +++ b/docs/cce/umn/cce_10_0477.html @@ -11,7 +11,7 @@

                            Diagnosis

                            Perform the following steps to check your CCE clusters of v1.21 or later:

                            1. Use kubectl to connect to the cluster and run the kubectl get --raw "/metrics" | grep stale command to obtain the metrics. Check the metric named serviceaccount_stale_tokens_total.

                              If the value is greater than 0, some workloads in the cluster may be using an earlier client-go version. In this case, check whether this problem occurs in your deployed applications. If yes, upgrade client-go to the version specified by the community as soon as possible. The version must be at least two major versions of the CCE cluster. For example, if your cluster version is 1.23, the Kubernetes dependency library version must be at least 1.19.

                              -

                              +

                            diff --git a/docs/cce/umn/cce_10_0487.html b/docs/cce/umn/cce_10_0487.html index e08c55be..0607b7c1 100644 --- a/docs/cce/umn/cce_10_0487.html +++ b/docs/cce/umn/cce_10_0487.html @@ -9,7 +9,7 @@

                            Based on the check result, it is detected that your cluster calls a deprecated API of the target cluster version using kubectl or other applications. You can rectify the fault before the upgrade. Otherwise, the API will be intercepted by kube-apiserver after the upgrade. For details about each deprecated API, see Deprecated APIs.

                            Case Study

                            Ingresses of the extensions/v1beta1 and networking.k8s.io/v1beta1 APIs are deprecated in Kubernetes v1.22. If you upgrade a cluster from v1.19 or v1.21 to v1.23, existing resources are not affected, but the v1beta1 API may be intercepted in the creation and editing scenarios.

                            -

                            For details about the YAML configuration structure changes, see Using kubectl to Create an ELB Ingress.

                            +

                            For details about the YAML configuration structure changes, see Using kubectl to Create a LoadBalancer Ingress.

                            diff --git a/docs/cce/umn/cce_10_0493.html b/docs/cce/umn/cce_10_0493.html index bbe20430..2ed84628 100644 --- a/docs/cce/umn/cce_10_0493.html +++ b/docs/cce/umn/cce_10_0493.html @@ -22,7 +22,7 @@ exit(1); " > corefile_record.txt cat corefile_record.txt

                          7. Compare the output differences between 2 and 3.

                            diff corefile_now.txt corefile_record.txt -y;
                            -
                            Figure 1 Viewing output differences
                            +
                            Figure 1 Viewing output differences

                          8. Return to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane, select CoreDNS, and click Upgrade.

                            To retain custom configurations, use either of the following methods:
                            • (Recommended) Set parameterSyncStrategy to inherit. In this case, custom settings are automatically inherited. The system automatically parses, identifies, and inherits custom parameters.
                            • Set parameterSyncStrategy to force. Manually enter the differential configuration. For details, see CoreDNS.

                          9. Click OK. After the add-on upgrade is complete, check whether all CoreDNS instances are available and whether Corefile meets the expectation.

                            kubectl get cm -nkube-system coredns -o jsonpath='{.data.Corefile}'
                            diff --git a/docs/cce/umn/cce_10_0495.html b/docs/cce/umn/cce_10_0495.html index 005a8ccf..77d943d8 100644 --- a/docs/cce/umn/cce_10_0495.html +++ b/docs/cce/umn/cce_10_0495.html @@ -3,7 +3,7 @@

                            Key Commands of Nodes

                            Check Items

                            Whether some key commands that the node upgrade depends on are working

                            -

                            Solution

                            • Scenario 1: The package manager command fails to be executed.

                              The rpm or dpkg command fails to be executed. Log in to the node and check whether the following commands are available:

                              +

                              Solution

                              • Scenario 1: Executing the package manager command failed.

                                Executing the rpm or dpkg command failed. In this case, log in to the affected node and check whether the following commands are available:

                                • rpm:
                                  rpm -qa
                                • dpkg:
                                  dpkg -l
                                @@ -12,7 +12,9 @@
                                systemctl status kubelet
                              -

                              +
                              • Scenario 3: Executing the Python command failed.
                                Check whether the command can be executed on the node.
                                /usr/bin/python --version 
                                +
                                +
                          10. Solution

                            How Do I Check Whether a Disk Is Shared?

                            -
                            1. Log in to the target node based on the check information.
                            2. Run the lsblk command to check whether vgpaas-share is mounted to /mnt/paas. If yes, a shared disk is used.

                              Figure 1 Checking whether a shared disk is used
                              +
                              1. Log in to the target node based on the check information.
                              2. Run the lsblk command to check whether vgpaas-share is mounted to /mnt/paas. If yes, a shared disk is used.

                                Figure 1 Checking whether a shared disk is used

                              What Can I Do If an Error Occurred in a Node Mounting Check?

                              1. Cancel the manually modified mount point.
                              2. Cancel the modification on the default soft link.
                              diff --git a/docs/cce/umn/cce_10_0500.html b/docs/cce/umn/cce_10_0500.html index 5093010b..e61b3f47 100644 --- a/docs/cce/umn/cce_10_0500.html +++ b/docs/cce/umn/cce_10_0500.html @@ -6,7 +6,7 @@

                              Solution

                              Solution 1: Delete the Service that is associated with a load balancer without a private IPv4 address.

                              Solution 2: Bind a private IP address to the load balancer without a private IPv4 address. The procedure is as follows:

                              1. Obtain the load balancer associated with the target Service.

                                • Method 1: Obtain the load balancer ID based on the pre-upgrade check log. Go to the ELB console and filter load balancers by load balancer ID.
                                  elbs (ids: [*****]) without ipv4 private ip, please bind private ip tothese elbs and try again
                                  -
                                • Method 2: Log in to the CCE console and click the cluster name to access the cluster console. Then, choose Services & Ingresses in the navigation pane and click the name of the target load balancer to go to the ELB page.
                                +
                              2. Method 2: Log in to the CCE console and click the cluster name to access the cluster console. Then, choose Services & Ingresses in the navigation pane and click the name of the target load balancer to go to the ELB page.
                              3. Check whether the load balancer has a private IPv4 address.

                              4. Bind a private IP address to the load balancer without a private IPv4 address.

                                1. Log in to the CCE console and click the name of the target load balancer.
                                2. On the Summary tab, click Bind next to Private IPv4 address.
                                3. Configure the subnet and IPv4 address, and click OK.

                                diff --git a/docs/cce/umn/cce_10_0510.html b/docs/cce/umn/cce_10_0510.html index 2de0bc31..9dd8ab57 100644 --- a/docs/cce/umn/cce_10_0510.html +++ b/docs/cce/umn/cce_10_0510.html @@ -1,9 +1,9 @@ -

                                containerd Pod Restart Risk

                                -

                                Check Items

                                Check whether the service containers running on a node that uses containerd are restart when the node's containerd is upgraded.

                                +

                                Check containerd pod restart risk

                                +

                                Check Items

                                Check whether the service container running on the node may restart when the containerd component is upgraded on the node that uses containerd in the current cluster.

                                -

                                Solutions

                                Upgrade the cluster when the impact on services is controllable (for example, during off-peak hours) to minimize the impact. If you need help, contact O&M personnel.

                                +

                                Solution

                                Ensure that the cluster is upgraded when the impact on services is controllable (for example, during off-peak hours) to mitigate the impact of service container restart. If you need help, contact O&M personnel.

                                diff --git a/docs/cce/umn/cce_10_0511.html b/docs/cce/umn/cce_10_0511.html index ced8a56a..acdcf802 100644 --- a/docs/cce/umn/cce_10_0511.html +++ b/docs/cce/umn/cce_10_0511.html @@ -1,12 +1,12 @@ -

                                Key Parameters of the GPU Add-on

                                -

                                Check Items

                                Check whether some configurations of the CCE AI Suite add-on installed in a cluster are intrusively modified. If yes, the upgrade may fail.

                                +

                                Key GPU Add-on Parameters

                                +

                                Check Items

                                Check whether the configuration of the CCE AI Suite add-on in a cluster has been intrusively modified. If so, upgrading the cluster may fail.

                                -

                                Solutions

                                1. Use kubectl to access the cluster.
                                2. Run the following command to obtain the add-on instance details:

                                  kubectl get ds nvidia-driver-installer -nkube-system -oyaml
                                  -

                                3. Check whether the value of UpdateStrategy is changed to OnDelete. If yes, change it to RollingUpdate.
                                4. Check whether the value of NVIDIA_DRIVER_DOWNLOAD_URL is the same as the add-on IP address on the add-on details page. If no, change the value on the web page.
                                +

                                Solution

                                1. Use kubectl to access the cluster.
                                2. Run the following command to obtain the add-on instance details:

                                  kubectl get ds nvidia-driver-installer -nkube-system -oyaml
                                  +

                                3. Check whether the UpdateStrategy value is changed to OnDelete. If so, change it back to RollingUpdate.
                                4. Check whether the NVIDIA_DRIVER_DOWNLOAD_URL value is the same as the add-on IP address on the add-on details page. If no, change the value on the web page.
                                -

                                +

                            11

                            Node CCE Agent Versions

                            +

                            CCE Agent Versions

                            Check whether cce-agent on the current node is of the latest version.

                            56

                            containerd Pod Restart Risk

                            +

                            Check containerd pod restart risk

                            Check whether the service containers running on a node that uses containerd are restart when the node's containerd is upgraded.

                            +

                            Check whether the service container running on the node may restart when the containerd component is upgraded on the node that uses containerd in the current cluster.

                            57

                            +

                            57

                            Key Parameters of the GPU Add-on

                            +

                            Key GPU Add-on Parameters

                            Check whether some configurations of the CCE AI Suite add-on installed in a cluster are intrusively modified. If yes, the upgrade may fail.

                            +

                            Check whether the configuration of the CCE AI Suite add-on in a cluster has been intrusively modified. If so, upgrading the cluster may fail.

                            58

                            +

                            58

                            GPU/NPU Pod Rebuilding Risk

                            +

                            GPU or NPU Pod Rebuild Risks

                            Check whether the node on which GPU/NPU service containers run is rebuilt when kubelet is restarted during the upgrade of the current cluster. If yes, the services will be affected.

                            +

                            Check whether GPU or NPU service pods are rebuilt in a cluster when kubelet is restarted during the upgrade of the cluster.

                            +

                            59

                            +

                            ELB Listener Access Control

                            +

                            Check whether the access control of the ELB listener has been configured for the Service in the current cluster using annotations and whether the configurations are correct.

                            +

                            60

                            +

                            Master Node Flavor

                            +

                            Check whether the flavor of the master nodes in the cluster is the same as the actual flavor of these nodes.

                            +

                            61

                            +

                            Subnet Quota of Master Nodes

                            +

                            Check whether the number of available IP addresses in the cluster subnet supports rolling upgrade.

                            +

                            62

                            +

                            Node Runtime

                            +

                            Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.

                            +

                            63

                            +

                            Node Pool Runtime

                            +

                            Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.

                            +

                            64

                            +

                            Number of Node Images

                            +

                            Check the number of images on your node. If the number is greater than 1000, Docker startup may be slow.

                            - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

                            Source

                            -

                            Destination

                            -

                            Destination Type

                            -

                            Possible Fault

                            -
                            • Pods (inside a cluster)
                            • Nodes (inside a cluster)
                            • Cloud servers outside the cluster but in the same VPC as the cluster
                            • Outside the VPC to which the cluster belongs
                            -

                            Public IP address of Service ELB

                            -

                            Cluster traffic load balancing entry

                            -

                            -

                            -

                            Private IP address of Service ELB

                            -

                            Cluster traffic load balancing entry

                            -

                            -

                            -

                            Public IP address of ingress ELB

                            -

                            Cluster traffic load balancing entry

                            -

                            -

                            -

                            Private IP address of ingress ELB

                            -

                            Cluster traffic load balancing entry

                            -

                            -

                            -

                            Public IP address of NodePort Service

                            -

                            Cluster traffic entry

                            -

                            The kube proxy configuration is overwritten. This fault has been rectified in the upgrade process.

                            -

                            Private IP address of NodePort Service

                            -

                            Cluster traffic entry

                            -

                            -

                            -

                            ClusterIP Service

                            -

                            Service network plane

                            -

                            -

                            -

                            Non NodePort Service port

                            -

                            Container network

                            -

                            -

                            -

                            Cross-node pods

                            -

                            Container network plane

                            -

                            -

                            -

                            Pods on the same node

                            -

                            Container network plane

                            -

                            -

                            -

                            Service and pod domain names are resolved by CoreDNS.

                            -

                            Domain name resolution

                            -

                            -

                            -

                            External domain names are resolved based on the CoreDNS hosts configuration.

                            -

                            Domain name resolution

                            -

                            After CoreDNS is upgraded, the configuration is overwritten. This fault has been rectified in the add-on upgrade process.

                            -

                            External domain names are resolved based on the CoreDNS upstream server.

                            -

                            Domain name resolution

                            -

                            After CoreDNS is upgraded, the configuration is overwritten. This fault has been rectified in the add-on upgrade process.

                            -

                            External domain names are not resolved by CoreDNS.

                            -

                            Domain name resolution

                            -

                            -

                            -
                            -
                            - - - - diff --git a/docs/cce/umn/cce_10_0564.html b/docs/cce/umn/cce_10_0564.html deleted file mode 100644 index 3a0927df..00000000 --- a/docs/cce/umn/cce_10_0564.html +++ /dev/null @@ -1,18 +0,0 @@ - - -

                            Node Label and Taint Check

                            -

                            Check Items

                            • Check whether custom node labels are lost.
                            • Check whether there are any unexpected taints newly added on the node, which will affect workload scheduling.
                            -
                            -

                            Procedure

                            1. Log in to the CCE console and click the cluster name to access the cluster console.
                            2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab, select all nodes, and click Labels and Taints to view the labels and taints of the current node.
                            -
                            -

                            Solution

                            Custom labels will not be changed during a cluster upgrade. If you find that labels are lost or added unexpectedly, contact technical support.

                            -

                            If you find a new taint (node.kubernetes.io/upgrade) on a node, the node may be skipped during the upgrade. For details, see Node Skipping Check.

                            -

                            If you find that other taints are added to the node, contact technical support.

                            -
                            -
                            - - diff --git a/docs/cce/umn/cce_10_0566.html b/docs/cce/umn/cce_10_0566.html index 4fee0f1e..ec81b1bd 100644 --- a/docs/cce/umn/cce_10_0566.html +++ b/docs/cce/umn/cce_10_0566.html @@ -5,7 +5,7 @@

                            Procedure

                            After creating a node based on New Node Check, create a DaemonSet workload to create pods on each node.

                            1. Log in to the CCE console and click the cluster name to access the cluster console.
                            2. In the navigation pane, choose Workloads. On the displayed page, click Create Workload or Create from YAML in the upper right corner. For details about how to create a DaemonSet, see Creating a DaemonSet.

                              It is a good practice to use the image for routine tests as the base image. You can deploy minimum pods for an application by referring to the following YAML file.

                              -

                              In this test, YAML deploys DaemonSet in the default namespace, uses ngxin:perl as the base image, requests 10 MB CPU and 10 MiB memory, and limits 100 MB CPU and 50 MiB memory.

                              +

                              In this test, YAML deploys DaemonSet in the default namespace, uses ngxin:perl as the base image, requests 10m vCPUs and 10 MiB memory, and limits 100 MB CPU and 50 MiB memory.

                              apiVersion: apps/v1
                               kind: DaemonSet
                              diff --git a/docs/cce/umn/cce_10_0601.html b/docs/cce/umn/cce_10_0601.html
                              index b3d5e933..5e326932 100644
                              --- a/docs/cce/umn/cce_10_0601.html
                              +++ b/docs/cce/umn/cce_10_0601.html
                              @@ -1,7 +1,7 @@
                               
                               
                               

                              Migrating Nodes from Docker to containerd

                              -

                              Context

                              Kubernetes has removed dockershim from v1.24 and does not support Docker by default. CCE will continue to support Docker in v1.25 but just till v1.27. The following steps show you how to migrate nodes from Docker to containerd.

                              +

                              Context

                              Kubernetes has removed dockershim from v1.24 and does not support Docker by default. CCE is going to stop the support for Docker. Change the node container engine from Docker to containerd.

                              Prerequisites

                              @@ -13,7 +13,7 @@

                              Migrating a Node Pool

                              You can copy a node pool, set the container engine of the new node pool to containerd, and keep other configurations the same as those of the original Docker node pool.

                              1. Log in to the CCE console and click the cluster name to access the cluster console.
                              2. In the navigation pane, choose Nodes. On the Node Pools tab page, locate the Docker node pool to be copied and choose More > Copy in the Operation column.

                                -

                              3. On the Compute Settings area, set Container Engine to containerd and modify other parameters as required.

                                +

                              4. On the Compute Settings area, set Container Engine to containerd and modify other parameters as required.

                              5. Scale the number of created containerd node pools to the number of original Docker node pools and delete nodes from the Docker node pools one by one.

                                Rolling migration is preferred. That is, add some containerd nodes and then delete some Docker nodes until the number of nodes in the new containerd node pool is the same as that in the original Docker node pool.

                                If you have set node affinity for the workloads deployed on the original Docker nodes or node pool, set affinity policies for the workloads to run on the new containerd nodes or node pool.

                                diff --git a/docs/cce/umn/cce_10_0602.html b/docs/cce/umn/cce_10_0602.html index 0c0fcb1f..d76b7b13 100644 --- a/docs/cce/umn/cce_10_0602.html +++ b/docs/cce/umn/cce_10_0602.html @@ -10,7 +10,7 @@

                                Method 2: Enabling it in an existing cluster

                                1. Log in to the CCE console and click the name of an existing cluster whose version is v1.23 or later.
                                2. On the Overview page, check the master node information. If overload control is not enabled, a message will be displayed. You can click Enable to enable the function.
                              -

                              Disabling Cluster Overload Control

                              1. Log in to the CCE console and go to an existing cluster whose version is v1.23 or later.
                              2. In the navigation pane, chooseSettings.
                              3. On the Cluster Access Configuration tab, set cluster overload control (support-overload) to false.
                              4. Click OK.
                              +

                              Disabling Cluster Overload Control

                              1. Log in to the CCE console and go to an existing cluster whose version is v1.23 or later.
                              2. In the navigation pane, choose Settings.
                              3. On the Cluster Access tab page, disable overload control.
                              4. Click OK.
                              diff --git a/docs/cce/umn/cce_10_0603.html b/docs/cce/umn/cce_10_0603.html new file mode 100644 index 00000000..f5a3068b --- /dev/null +++ b/docs/cce/umn/cce_10_0603.html @@ -0,0 +1,96 @@ + + +

                              Configuring a Static IP Address for a Pod

                              +

                              Application Scenarios

                              In Cloud Native Network 2.0, each pod is associated with an ENI, providing a static IP address to the StatefulSet pods (container ENI). This is a common practice in access control, service registration, service discovery, and log audit of static IP addresses.

                              +

                              For example, if a StatefulSet service needs to control the access of a cloud database, you can fix the pod IP address of the service and configure the security group of the cloud database to allow only the service IP address to access the database.

                              +
                              +

                              Constraints

                              • You can configure a static IP address for a pod only in CCE Turbo clusters of the following versions:
                                • v1.25: v1.25.3-r0 or later
                                • v1.25 or later
                                +
                              • Currently, only StatefulSet pods or pods without ownerReferences can be configured with static IP addresses. Deployments, DaemonSets, and other types of workloads cannot be configured with static IP addresses. In addition, pods with HostNetwork configured cannot be configured with static IP addresses.
                              • Do not configure static IP addresses for services that do not have specific requirements on pod IP addresses. Otherwise, the pod rebuilding takes a longer time and the IP address usage decreases.
                              • The annotations of the static IP address of the pod object cannot be directly modified. Otherwise, the modification does not take effect in the background. To modify the annotations, modify the annotations configuration in the spec.template field of the corresponding StatefulSet workload.
                              • If there are no ENIs left on the node where the pod with a static IP address is rebuilt and scheduled (the pre-bound ENIs also occupy the ENI quota), the static IP address ENIs preempt the pre-bound ENIs. In this case, the pod starts slightly slowly. If a node uses a static IP address, properly configure the dynamic pre-binding policy for the node to ensure that not all ENIs are pre-bound.
                              +
                              +

                              Using the CCE Console

                              When creating a workload on the console, you can set the static IP address for a pod in the Advanced Settings > Network Configuration area.

                              +
                              • Whether to enable fixed IP addresses: After the function is enabled, the pod IP address does not change each time the pod is restarted.
                              • Recycling Interval: Retention period of related IP addresses after the pod is deleted. During this interval, the original pod IP address cannot be used by other pods.
                              +

                              +
                              +

                              Using kubectl

                              You can add annotations to a StatefulSet to enable or disable the static IP address function of the pod.

                              +
                              apiVersion: apps/v1
                              +kind: StatefulSet
                              +metadata:
                              +  name: nginx
                              +spec:
                              +  serviceName: nginx
                              +  replicas: 3
                              +  selector:
                              +    matchLabels:
                              +      app: nginx
                              +  template:
                              +    metadata:
                              +      labels:
                              +        app: nginx
                              +      annotations:
                              +        pod.alpha.kubernetes.io/initialized: 'true'
                              +        yangtse.io/static-ip: 'true'
                              +        yangtse.io/static-ip-expire-no-cascading: 'false'
                              +        yangtse.io/static-ip-expire-duration: 5m
                              +    spec:
                              +      containers:
                              +        - name: container-0
                              +          image: nginx:alpine
                              +          resources:
                              +            limits:
                              +              cpu: 100m
                              +              memory: 200Mi
                              +            requests:
                              +              cpu: 100m
                              +              memory: 200Mi
                              +      imagePullSecrets:
                              +        - name: default-secret
                              + +
                              + + + + + + + + + + + + + + + + + + + + + +
                              Table 1 Annotations of the pod's static IP address

                              Annotation

                              +

                              Default Value

                              +

                              Description

                              +

                              Value Range

                              +

                              yangtse.io/static-ip

                              +

                              false

                              +

                              Specifies whether to enable the static IP address of a pod. This function is supported only for StatefulSet pods or pods without ownerReferences. This function is disabled by default.

                              +

                              false or true

                              +

                              yangtse.io/static-ip-expire-duration

                              +

                              5m

                              +

                              Specifies the interval for reclaiming the expired ENI of the static IP address after the pod with a static IP address is deleted.

                              +

                              The time format is Go time type, for example, 1h30m and 5m. For details, see Go time type.

                              +

                              yangtse.io/static-ip-expire-no-cascading

                              +

                              false

                              +

                              Specifies whether to disable cascading reclamation of StatefulSet workloads.

                              +

                              The default value is false, indicating that the corresponding static IP address ENI will be deleted with the StatefulSet workload. If you want to retain the static IP address for a new StatefulSet with the same name during the interval for reclaiming the expired ENI, set the value to true.

                              +

                              false or true

                              +
                              +
                              +
                              +
                              +
                              + +
                              + diff --git a/docs/cce/umn/cce_10_0604.html b/docs/cce/umn/cce_10_0604.html new file mode 100644 index 00000000..bedcdf0c --- /dev/null +++ b/docs/cce/umn/cce_10_0604.html @@ -0,0 +1,32 @@ + + +

                              Configuring Shared Bandwidth for a Pod with IPv6 Dual-Stack ENIs

                              +

                              Application Scenarios

                              By default, pods with IPv6 dual-stack ENIs can access only the IPv6 private network. To access the public network, configure shared bandwidth for such pods.

                              +
                              +

                              Constraints

                              • Only CCE Turbo clusters are supported and the following constraints must be met:
                                • IPv6 dual stack has been enabled for the cluster.
                                • The cluster version is v1.23.8-r0, v1.25.3-r0, or later.
                                +
                              • The number of IPv6 ENIs that can be added to a shared bandwidth is limited by the tenant quota. The default value is 20.
                              • HostNetwork Pods are not supported.
                              • All types of workloads are supported. When configuring IPv6 shared bandwidth for workloads with the replicas attribute, such as Deployment and StatefulSet, ensure that the number of replicas and the maximum number of pods during the upgrade are less than the remaining quota of IPv6 ENIs that can be added to the shared bandwidth.
                              • IPv6 dual-stack pod configured with shared bandwidth: When a pod is created, the CNI returns a success message only after the IPv6 dual-stack ENI is inserted into the shared bandwidth. When a pod is deleted, the IPv6 dual-stack ENI is removed from the shared bandwidth after the pod is completely deleted or the pod is in the deleting status for 30 seconds.
                              • If the IPv6 dual-stack ENI corresponding to the pod fails to be added to the shared bandwidth, an alarm event FailedIPv6InsertBandwidth is generated on the pod, for example, when the quota is exceeded or flow control is triggered. Rectify the fault based on the alarm event.
                              • On the Shared Bandwidths page of the EIP console, go to the target shared bandwidth details page, and click the IPv6 Addresses tab. The IPv6 dual-stack ENI whose Associated Instance is CCE is displayed. Do not remove the ENI directly on the page or using VPC APIs, otherwise, your services may be affected.
                              +
                              +

                              Using the CCE Console

                              When creating a workload, you can set the IPv6 shared bandwidth on the Advanced Settings > Network Configuration area.

                              +

                              +

                              +
                              +

                              Using kubectl

                              You can add annotations to a Deployment to specify the shared bandwidth to be added to the IPv6 dual-stack ENI of the pod. The following is an example:

                              +
                              ...
                              +spec:
                              +  selector:
                              +    matchLabels:
                              +      app: demo
                              +      version: v1
                              +  template:
                              +    metadata:
                              +      annotations:
                              +        yangtse.io/ipv6-bandwidth-id: "xxx"
                              +
                              • yangtse.io/ipv6-bandwidth-id: specifies the ID of the shared bandwidth. The IPv6 dual-stack ENIs corresponding to the pod will be added to the shared bandwidth. You can query the ID on the Shared Bandwidths page on the EIP console.
                              +
                              +
                              +
                              + +
                              + diff --git a/docs/cce/umn/cce_10_0605.html b/docs/cce/umn/cce_10_0605.html index ebdff2b7..0987cbc6 100644 --- a/docs/cce/umn/cce_10_0605.html +++ b/docs/cce/umn/cce_10_0605.html @@ -1,8 +1,8 @@

                              Draining a Node

                              -

                              Scenario

                              After you enable the nodal drainage function on the console, the system sets the node to be non-schedulable and securely evicts all pods that comply with Nodal Drainage Rules on the node. Subsequent new pods will not be scheduled to the node.

                              -

                              When a node is faulty, this function helps you quickly isolate the faulty node. The evicted pods will be transferred from the workload controller to another node that can be scheduled properly.

                              +

                              Scenario

                              After you enable nodal drainage on the console, CCE configures the node to be non-schedulable and securely evicts all pods that comply with Nodal Drainage Rules on the node. Subsequent new pods will not be scheduled to this node.

                              +

                              When a node becomes faulty, nodal drainage quickly isolates the faulty node. The pods evicted from the faulty node will be scheduled by the workload controller to other nodes that are running properly.

                              Constraints

                              • Only clusters of the following versions support the nodal drainage function:
                                • v1.21: v1.21.10-r0 or later
                                • v1.23: v1.23.8-r0 or later
                                • v1.25: v1.25.3-r0 or later
                                • v1.25 or later
                              • To use the nodal drainage function, an IAM user must have at least one of the following permissions. For details, see Namespace Permissions (Kubernetes RBAC-based).
                                • cluster-admin (administrator): read and write permissions on all resources in all namespaces.
                                • drainage-editor: drain a node.
                                • drainage-viewer: view the nodal drainage status but cannot drain a node.
                                diff --git a/docs/cce/umn/cce_10_0614.html b/docs/cce/umn/cce_10_0614.html index 3ae8a4a9..37b92af5 100644 --- a/docs/cce/umn/cce_10_0614.html +++ b/docs/cce/umn/cce_10_0614.html @@ -1,14 +1,14 @@

                                Using an Existing EVS Disk Through a Static PV

                                -

                                CCE allows you to create a PV using an existing EVS disk. After the PV is created, you can create a PVC and bind it to the PV. This mode applies to scenarios where the underlying storage is available.

                                -

                                Prerequisites

                                • You have created a cluster and installed the CCE Container Storage (Everest) add-on in the cluster.
                                • You have created an EVS disk that meets the following requirements:
                                  • The existing EVS disk cannot be a system disk, DSS disk, or shared disk.
                                  • The device type of the EVS disk must be SCSI (the default device type is VBD when you purchase an EVS disk).
                                  • The EVS disk must be available and not used by other resources.
                                  • The AZ of the EVS disk must be the same as that of the cluster node. Otherwise, the EVS disk cannot be mounted and the pod cannot start.
                                  • If the EVS disk is encrypted, the key must be available.
                                  • EVS disks that have been partitioned are not supported.
                                  -
                                • If you want to create a cluster using commands, use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                +

                                CCE allows you to create a PV using an existing EVS disk. After the PV is created, you can create a PVC and bind it to the PV. This mode applies if the underlying storage is available.

                                +

                                Prerequisites

                                • You have created a cluster and installed the CCE Container Storage (Everest) add-on in the cluster.
                                • You have created an EVS disk that meets the following requirements:
                                  • The existing EVS disk cannot be a system disk, DSS disk, or shared disk.
                                  • The EVS disk must be of the SCSI type (the default disk type is VBD when you create an EVS disk).
                                  • The EVS disk must be available and not used by other resources.
                                  • The AZ of the EVS disk must be the same as that of the cluster node. Otherwise, the EVS disk cannot be mounted and the pod cannot start.
                                  • If the EVS disk is encrypted, the key must be available.
                                  • EVS disks that have been partitioned are not supported.
                                  +
                                • Before creating a cluster using commands, ensure kubectl is used to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                                -

                                Constraints

                                • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple tasks. Data sharing of a shared disk is not supported between nodes in a CCE cluster. If an EVS disk is attacked to multiple nodes, I/O conflicts and data cache conflicts may occur. Therefore, create only one pod when creating a Deployment that uses EVS disks.
                                • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS disks attached, the existing pods cannot be read or written when a new pod is scheduled to another node.

                                  For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS disks attached, a new pod cannot be started because EVS disks cannot be attached.

                                  +

                                  Constraints

                                  • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple tasks. Data sharing of a shared disk is not supported between nodes in a CCE cluster. If an EVS disk is attacked to multiple nodes, I/O conflicts and data cache conflicts may occur. Therefore, create only one pod when creating a Deployment that uses EVS disks.
                                  • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

                                    For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volumes mounted, a new pod cannot be started because EVS disks cannot be attached.

                                  -

                                  Using an Existing EVS Disk on the Console

                                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                                  2. Statically create a PVC and PV.

                                    1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters. +

                                      Using an Existing EVS Disk on the Console

                                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                                      2. Statically create a PVC and PV.

                                        1. Choose Storage in the navigation pane and click the PVCs tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters.
                                          - @@ -63,7 +63,7 @@

                                          a: The parameter is available when Creation Method is set to Use existing.

                                          b: The parameter is available when Creation Method is set to Create new.

                                          -
                                        2. Click Create to create a PVC and a PV.

                                          You can choose Storage in the navigation pane and view the created PVC and PV on the PersistentVolumeClaims (PVCs) and PersistentVolumes (PVs) tab pages, respectively.

                                          +
                                        3. Click Create to create a PVC and a PV.

                                          You can choose Storage in the navigation pane and view the created PVC and PV on the PVCs and PVs tab pages, respectively.

                                        4. Create an application.

                                          1. In the navigation pane on the left, click Workloads. In the right pane, click the StatefulSets tab.
                                          2. Click Create Workload in the upper right corner. On the displayed page, click Data Storage in the Container Settings area and click Add Volume to select PVC.
                                            Mount and use storage volumes, as shown in Table 1. For details about other parameters, see Workloads.
                                        5. Parameter

                                          Description

                                          @@ -33,7 +33,7 @@

                                          PVa

                                          Select an existing PV in the cluster. Create a PV in advance. For details, see "Creating a storage volume" in Related Operations.

                                          +

                                          Select an existing PV volume in the cluster. Create a PV in advance. For details, see "Creating a storage volume" in Related Operations.

                                          You do not need to specify this parameter in this example.

                                          @@ -217,7 +217,7 @@ spec: - @@ -232,7 +232,7 @@ metadata: namespace: default annotations: everest.io/disk-volume-type: SAS # EVS disk type. - everest.io/crypt-key-id: <your_key_id> # (Optional) Encryption key ID. Mandatory for an encrypted disk. + everest.io/crypt-key-id: <your_key_id> # (Optional) Encryption key ID. Mandatory for an encrypted disk. labels: failure-domain.beta.kubernetes.io/region: <your_region> # Region of the node where the application is to be deployed. @@ -243,7 +243,7 @@ spec: resources: requests: storage: 10Gi # EVS disk capacity, ranging from 1 to 32768. The value must be the same as the storage size of the existing PV. - storageClassName: csi-disk # Storage class type for EVS disks. + storageClassName: csi-disk # The storage class is EVS. volumeName: pv-evs # PV name.
                                          Table 1 Mounting a storage volume

                                          Parameter

                                          @@ -108,7 +108,7 @@

                                          -

                                          (kubectl) Using an Existing EVS Disk

                                          1. Use kubectl to connect to the cluster.
                                          2. Create a PV. If a PV has been created in your cluster, skip this step.

                                            1. Create the pv-evs.yaml file.
                                              apiVersion: v1
                                              +

                                              (kubectl) Using an Existing EVS Disk

                                              1. Use kubectl to access the cluster.
                                              2. Create a PV. If a PV has been created in your cluster, skip this step.

                                                1. Create the pv-evs.yaml file.
                                                  apiVersion: v1
                                                   kind: PersistentVolume
                                                   metadata:
                                                     annotations:
                                                  @@ -122,7 +122,7 @@ spec:
                                                     accessModes:
                                                       - ReadWriteOnce     # Access mode. The value must be ReadWriteOnce for EVS disks.
                                                     capacity:
                                                  -    storage: 10Gi       # EVS disk capacity, in the unit of Gi. The value ranges from 1 to 32768.
                                                  +    storage: 10Gi       # EVS disk capacity, in the unit of GiB. The value ranges from 1 to 32768.
                                                     csi:
                                                       driver: disk.csi.everest.io     # Dependent storage driver for the mounting.
                                                       fsType: ext4    # Must be the same as that of the original file system of the disk.
                                                  @@ -131,7 +131,7 @@ spec:
                                                         everest.io/disk-mode: SCSI           # Device type of the EVS disk. Only SCSI is supported.
                                                         everest.io/disk-volume-type: SAS     # EVS disk type.
                                                         storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
                                                  -      everest.io/crypt-key-id: <your_key_id>    # (Optional) Encryption key ID. Mandatory for an encrypted disk.
                                                  +      everest.io/crypt-key-id: <your_key_id>    # (Optional) Encryption key ID. Mandatory for an encrypted disk.
                                                   
                                                     persistentVolumeReclaimPolicy: Delete    # Reclaim policy.
                                                     storageClassName: csi-disk              # Storage class name. The value must be csi-disk for EVS disks.
                                                  @@ -158,7 +158,7 @@ spec:

                                          Yes

                                          Region where the cluster is located.

                                          -

                                          For details about the value of region, see Regions and Endpoints.

                                          +

                                          For details about the value of region, see Regions and Endpoints.

                                          failure-domain.beta.kubernetes.io/zone

                                          @@ -166,7 +166,7 @@ spec:

                                          Yes

                                          AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

                                          -

                                          For details about the value of zone, see Regions and Endpoints.

                                          +

                                          For details about the value of zone, see Regions and Endpoints.

                                          fsType

                                          @@ -174,7 +174,7 @@ spec:

                                          Yes

                                          Configure the file system type. The value defaults to ext4.

                                          -
                                          The value can be ext4 or xfs. The restrictions on using xfs are as follows:
                                          • The nodes should run CentOS 7 or Ubuntu 22.04, and the Everest version in the cluster should be 2.3.2 or later.
                                          • Only common containers are supported.
                                          +
                                          The value can be ext4 or xfs. The restrictions on using xfs are as follows:
                                          • The nodes must run CentOS 7 or Ubuntu 22.04, and the Everest version in the cluster must be 2.3.2 or later.
                                          • Only common containers are supported.

                                          Yes

                                          The storage class name for EVS disks is csi-disk.

                                          +

                                          The storage class for EVS disks is csi-disk.

                                          @@ -388,7 +388,7 @@ spec: - @@ -396,21 +396,21 @@ spec: - - - diff --git a/docs/cce/umn/cce_10_0615.html b/docs/cce/umn/cce_10_0615.html index 406e6f2c..f5ef0c34 100644 --- a/docs/cce/umn/cce_10_0615.html +++ b/docs/cce/umn/cce_10_0615.html @@ -2,12 +2,12 @@

                                          Using an EVS Disk Through a Dynamic PV

                                          CCE allows you to specify a StorageClass to automatically create an EVS disk and the corresponding PV. This function is applicable when no underlying storage volume is available.

                                          -

                                          Prerequisites

                                          +

                                          Prerequisites

                                          -

                                          Constraints

                                          • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple tasks. Data sharing of a shared disk is not supported between nodes in a CCE cluster. If an EVS disk is attacked to multiple nodes, I/O conflicts and data cache conflicts may occur. Therefore, create only one pod when creating a Deployment that uses EVS disks.
                                          • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS disks attached, the existing pods cannot be read or written when a new pod is scheduled to another node.

                                            For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS disks attached, a new pod cannot be started because EVS disks cannot be attached.

                                            +

                                            Constraints

                                            • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple tasks. Data sharing of a shared disk is not supported between nodes in a CCE cluster. If an EVS disk is attacked to multiple nodes, I/O conflicts and data cache conflicts may occur. Therefore, create only one pod when creating a Deployment that uses EVS disks.
                                            • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

                                              For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volumes mounted, a new pod cannot be started because EVS disks cannot be attached.

                                            -

                                            (Console) Automatically Creating an EVS Disk

                                            1. Log in to the CCE console and click the cluster name to access the cluster console.
                                            2. Dynamically create a PVC and PV.

                                              1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters. +

                                                (Console) Automatically Creating an EVS Disk

                                                1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                2. Dynamically create a PVC and PV.

                                                  1. Choose Storage in the navigation pane and click the PVCs tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters.
                                          Table 3 Key parameters

                                          Parameter

                                          @@ -259,7 +259,7 @@ spec:

                                          Yes

                                          Region where the cluster is located.

                                          -

                                          For details about the value of region, see Regions and Endpoints.

                                          +

                                          For details about the value of region, see Regions and Endpoints.

                                          failure-domain.beta.kubernetes.io/zone

                                          @@ -267,7 +267,7 @@ spec:

                                          Yes

                                          AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

                                          -

                                          For details about the value of zone, see Regions and Endpoints.

                                          +

                                          For details about the value of zone, see Regions and Endpoints.

                                          storage

                                          @@ -290,7 +290,7 @@ spec:

                                          Yes

                                          Storage class name, which must be the same as the storage class of the PV in 1.

                                          -

                                          The storage class name of the EVS volumes is csi-disk.

                                          +

                                          The storage class for EVS disks is csi-disk.

                                          Create a PV on the CCE console.

                                          1. Choose Storage in the navigation pane and click the PersistentVolumes (PVs) tab. Click Create Volume in the upper right corner. In the dialog box displayed, configure the parameters.
                                            • Volume Type: Select EVS.
                                            • EVS: Click Select EVS. On the displayed page, select the EVS disk that meets your requirements and click OK.
                                            • PV Name: Enter the PV name, which must be unique in the same cluster.
                                            • Access Mode: EVS disks support only ReadWriteOnce, indicating that a storage volume can be mounted to one node in read/write mode. For details, see Volume Access Modes.
                                            • Reclaim Policy: Delete or Retain. For details, see PV Reclaim Policy.
                                            +
                                          1. Choose Storage in the navigation pane and click the PVs tab. Click Create PersistentVolume in the upper right corner. In the dialog box displayed, configure parameters.
                                            • Volume Type: Select EVS.
                                            • EVS: Click Select EVS. On the displayed page, select the EVS disk that meets your requirements and click OK.
                                            • PV Name: Enter the PV name, which must be unique in the same cluster.
                                            • Access Mode: EVS disks support only ReadWriteOnce, indicating that a storage volume can be mounted to one node in read/write mode. For details, see Volume Access Modes.
                                            • Reclaim Policy: Delete or Retain is supported. For details, see PV Reclaim Policy.
                                          2. Click Create.

                                          Quickly expand the capacity of a mounted EVS disk on the CCE console.

                                          1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click More in the Operation column of the target PVC and select Scale-out.
                                          2. Enter the capacity to be added and click OK.
                                          +
                                          1. Choose Storage in the navigation pane and click the PVCs tab. Click More in the Operation column of the target PVC and select Scale-out.
                                          2. Enter the capacity to be added and click OK.

                                          Viewing events

                                          You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time of the PVC or PV.

                                          1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                          2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).
                                          +
                                          1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                          2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).

                                          Viewing a YAML file

                                          You can view, copy, and download the YAML files of a PVC or PV.

                                          1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                          2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                          +
                                          1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                          2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                          - @@ -44,7 +44,7 @@ - -

                                          Parameter

                                          Description

                                          @@ -26,7 +26,7 @@

                                          Creation Method

                                          • If no underlying storage is available, select Dynamically provision to create a PVC, PV, and underlying storage on the console in cascading mode.
                                          • If underlying storage is available, create a PV or use an existing PV to statically create a PVC based on whether a PV is available. For details, see Using an Existing EVS Disk Through a Static PV.
                                          +
                                          • If no underlying storage is available, select Dynamically provision to create a PVC, PV, and underlying storage on the console in cascading mode.
                                          • If underlying storage is available, create a storage volume or use an existing storage volume to statically create a PVC based on whether a PV is available. For details, see Using an Existing EVS Disk Through a Static PV.

                                          In this example, select Dynamically provision.

                                          Disk Type

                                          Select an EVS disk type.

                                          +

                                          Select an EVS disk type. EVS disk types vary depending on regions. Obtain the available EVS types on the console.

                                          Access Mode

                                          @@ -59,13 +59,13 @@

                                          Encryption

                                          You can select Encryption and an encryption key to encrypt underlying storage. Before using the encryption function, check whether the region where the EVS disk is located supports disk encryption.

                                          +

                                          Configure whether to encrypt underlying storage. If you select Enabled (key), an encryption key must be configured. Before using encryption, check whether the region where the EVS disk is located supports disk encryption.

                                          Resource Tag

                                          You can add resource tags to classify resources, which is supported only when the Everest version in the cluster is 2.1.39 or later.

                                          -

                                          You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

                                          +

                                          You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

                                          CCE automatically creates system tags CCE-Cluster-ID={Cluster ID}, CCE-Cluster-Name={Cluster name}, and CCE-Namespace={Namespace name}. These tags cannot be modified.

                                          NOTE:

                                          After a dynamic PV of the EVS type is created, the resource tags cannot be updated on the CCE console. To update these resource tags, go to the EVS console.

                                          @@ -74,7 +74,7 @@
                                          -
                                        6. Click Create.

                                          You can choose Storage in the navigation pane and view the created PVC and PV on the PersistentVolumeClaims (PVCs) and PersistentVolumes (PVs) tab pages, respectively.

                                          +
                                        7. Click Create.

                                          You can choose Storage in the navigation pane and view the created PVC and PV on the PVCs and PVs tab pages, respectively.

                                      3. Create an application.

                                        1. In the navigation pane on the left, click Workloads. In the right pane, click the StatefulSets tab.
                                        2. Click Create Workload in the upper right corner. On the displayed page, click Data Storage in the Container Settings area and click Add Volume to select PVC.
                                          Mount and use storage volumes, as shown in Table 1. For details about other parameters, see Workloads.
                                          Table 1 Mounting a storage volume

                                          Parameter

                                          @@ -119,16 +119,16 @@

                                          -

                                          (kubectl) Automatically Creating an EVS Disk

                                          1. Use kubectl to connect to the cluster.
                                          2. Use StorageClass to dynamically create a PVC and PV.

                                            1. Create the pvc-evs-auto.yaml file.
                                              apiVersion: v1
                                              +

                                              (kubectl) Automatically Creating an EVS Disk

                                              1. Use kubectl to access the cluster.
                                              2. Use StorageClass to dynamically create a PVC and PV.

                                                1. Create the pvc-evs-auto.yaml file.
                                                  apiVersion: v1
                                                   kind: PersistentVolumeClaim
                                                   metadata:
                                                     name: pvc-evs-auto
                                                     namespace: default
                                                     annotations:
                                                         everest.io/disk-volume-type: SAS    # EVS disk type.
                                                  -    everest.io/crypt-key-id: <your_key_id>    # (Optional) Encryption key ID. Mandatory for an encrypted disk.
                                                  +    everest.io/crypt-key-id: <your_key_id>    # (Optional) Encryption key ID. Mandatory for an encrypted disk.
                                                   
                                                  -    everest.io/disk-volume-tags: '{"key1":"value1","key2":"value2"}' # (Optional) Custom resource tags
                                                  +    everest.io/disk-volume-tags: '{"key1":"value1","key2":"value2"}' # (Optional) Custom resource tags
                                                       csi.storage.k8s.io/fstype: xfs    # (Optional) Set the file system type to xfs. If it is left blank, ext4 is used by default.
                                                     labels:
                                                       failure-domain.beta.kubernetes.io/region: <your_region>   # Region of the node where the application is to be deployed.
                                                  @@ -139,7 +139,7 @@ spec:
                                                     resources:
                                                       requests:
                                                         storage: 10Gi             # EVS disk capacity, ranging from 1 to 32768.
                                                  -  storageClassName: csi-disk    # Storage class type for EVS disks.
                                                  + storageClassName: csi-disk # The storage class is EVS.
                                              @@ -154,7 +154,7 @@ spec: - @@ -211,7 +211,7 @@ spec: - @@ -309,21 +309,21 @@ spec: - - - diff --git a/docs/cce/umn/cce_10_0616.html b/docs/cce/umn/cce_10_0616.html index a839dd03..8aecab97 100644 --- a/docs/cce/umn/cce_10_0616.html +++ b/docs/cce/umn/cce_10_0616.html @@ -3,7 +3,7 @@

                                              Dynamically Mounting an EVS Disk to a StatefulSet

                                              Application Scenarios

                                              Dynamic mounting is available only for creating a StatefulSet. It is implemented through a volume claim template (volumeClaimTemplates field) and depends on the storage class to dynamically provision PVs. In this mode, each pod in a multi-pod StatefulSet is associated with a unique PVC and PV. After a pod is rescheduled, the original data can still be mounted to it based on the PVC name. In the common mounting mode for a Deployment, if ReadWriteMany is supported, multiple pods of the Deployment will be mounted to the same underlying storage.

                                              -

                                              Prerequisites

                                              +

                                              Prerequisites

                                              (Console) Dynamically Mounting an EVS Disk

                                              1. Log in to the CCE console and click the cluster name to access the cluster console.
                                              2. In the navigation pane on the left, click Workloads. In the right pane, click the StatefulSets tab.
                                              3. Click Create Workload in the upper right corner. On the displayed page, click Data Storage in the Container Settings area and click Add Volume to select VolumeClaimTemplate (VTC).
                                              4. Click Create PVC. In the dialog box displayed, configure the PVC parameters.

                                                Click Create.
                                              Table 2 Key parameters

                                              Parameter

                                              Yes

                                              Region where the cluster is located.

                                              -

                                              For details about the value of region, see Regions and Endpoints.

                                              +

                                              For details about the value of region, see Regions and Endpoints.

                                              failure-domain.beta.kubernetes.io/zone

                                              @@ -162,7 +162,7 @@ spec:

                                              Yes

                                              AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

                                              -

                                              For details about the value of zone, see Regions and Endpoints.

                                              +

                                              For details about the value of zone, see Regions and Endpoints.

                                              everest.io/disk-volume-type

                                              @@ -195,8 +195,8 @@ spec:

                                              No

                                              This field is optional. It specifies the file system type. The default value is ext4.

                                              -
                                              The value can be ext4 or xfs. The restrictions on using xfs are as follows:
                                              • The nodes should run CentOS 7 or Ubuntu 22.04, and the Everest version in the cluster should be 2.3.2 or later.
                                              • Only common containers are supported.
                                              +

                                              This field is optional. It specifies the file system type. The default value is ext4.

                                              +
                                              The value can be ext4 or xfs. The restrictions on using xfs are as follows:
                                              • The nodes must run CentOS 7 or Ubuntu 22.04, and the Everest version in the cluster must be 2.3.2 or later.
                                              • Only common containers are supported.

                                              Yes

                                              The storage class name of the EVS volumes is csi-disk.

                                              +

                                              The storage class for EVS disks is csi-disk.

                                              Quickly expand the capacity of a mounted EVS disk on the CCE console.

                                              1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click More in the Operation column of the target PVC and select Scale-out.
                                              2. Enter the capacity to be added and click OK.
                                              +
                                              1. Choose Storage in the navigation pane and click the PVCs tab. Click More in the Operation column of the target PVC and select Scale-out.
                                              2. Enter the capacity to be added and click OK.

                                              Viewing events

                                              You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time of the PVC or PV.

                                              1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                              2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).
                                              +
                                              1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                              2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).

                                              Viewing a YAML file

                                              You can view, copy, and download the YAML files of a PVC or PV.

                                              1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                              2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                              +
                                              1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                              2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                              - -

                                              Parameter

                                              @@ -41,7 +41,7 @@

                                              Disk Type

                                              Select an EVS disk type.

                                              +

                                              Select an EVS disk type. EVS disk types vary depending on regions. Obtain the available EVS types on the console.

                                              Access Mode

                                              @@ -56,7 +56,7 @@

                                              Encryption

                                              You can select Encryption and an encryption key to encrypt underlying storage. Only EVS disks and SFS file systems support encryption.

                                              +

                                              Configure whether to encrypt underlying storage. If you select Enabled (key), an encryption key must be configured. Before using encryption, check whether the region where the EVS disk is located supports disk encryption.

                                              Resource Tag

                                              @@ -105,7 +105,7 @@

                                            2. Dynamically mount and use storage volumes. For details about other parameters, see Creating a StatefulSet. After the configuration, click Create Workload.

                                              After the workload is created, the data in the container mount directory will be persistently stored. Verify the storage by referring to Verifying Data Persistence.

                                            3. -

                                              Dynamically Mounting an EVS Volume Using kubectl

                                              1. Use kubectl to connect to the cluster.
                                              2. Create a file named statefulset-evs.yaml. In this example, the EVS volume is mounted to the /data path.

                                                apiVersion: apps/v1
                                                +

                                                Dynamically Mounting an EVS Volume Using kubectl

                                                1. Use kubectl to access the cluster.
                                                2. Create a file named statefulset-evs.yaml. In this example, the EVS volume is mounted to the /data path.

                                                  apiVersion: apps/v1
                                                   kind: StatefulSet
                                                   metadata:
                                                     name: statefulset-evs
                                                  @@ -137,9 +137,9 @@ spec:
                                                           namespace: default
                                                           annotations:
                                                             everest.io/disk-volume-type: SAS    # EVS disk type.
                                                  -          everest.io/crypt-key-id: <your_key_id>    #  (Optional) Encryption key ID. Mandatory for an encrypted disk.
                                                  +          everest.io/crypt-key-id: <your_key_id>    # (Optional) Encryption key ID. Mandatory for an encrypted disk.
                                                   
                                                  -          everest.io/disk-volume-tags: '{"key1":"value1","key2":"value2"}' # (Optional) Custom resource tags
                                                  +          everest.io/disk-volume-tags: '{"key1":"value1","key2":"value2"}' # (Optional) Custom resource tags
                                                             csi.storage.k8s.io/fstype: xfs    # (Optional) Set the file system type to xfs. If it is left blank, ext4 is used by default.
                                                           labels:
                                                             failure-domain.beta.kubernetes.io/region: <your_region>   # Region of the node where the application is to be deployed.
                                                  @@ -150,7 +150,7 @@ spec:
                                                           resources:
                                                             requests:
                                                               storage: 10Gi             # EVS disk capacity, ranging from 1 to 32768.
                                                  -        storageClassName: csi-disk    # Storage class type for EVS disks.
                                                  +        storageClassName: csi-disk    # The storage class is EVS.
                                                   ---
                                                   apiVersion: v1
                                                   kind: Service
                                                  @@ -171,76 +171,76 @@ spec:
                                                         protocol: TCP
                                                     type: ClusterIP
                                                  -
                                                  Table 2 Key parameters

                                                  Parameter

                                                  +
                                                  - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -289,21 +289,21 @@ statefulset-evs-1 1/1 Running 0 28s - - - diff --git a/docs/cce/umn/cce_10_0619.html b/docs/cce/umn/cce_10_0619.html index 07fcc1b6..04e4a87a 100644 --- a/docs/cce/umn/cce_10_0619.html +++ b/docs/cce/umn/cce_10_0619.html @@ -1,13 +1,13 @@

                                                  Using an Existing SFS File System Through a Static PV

                                                  -

                                                  SFS is a network-attached storage (NAS) that provides shared, scalable, and high-performance file storage. It applies to large-capacity expansion and cost-sensitive services. This section describes how to use an existing SFS file system to statically create PVs and PVCs and implement data persistence and sharing in workloads.

                                                  -

                                                  Prerequisites

                                                  +

                                                  SFS is a network-attached storage (NAS) that provides shared, scalable, and high-performance file storage. It applies to large-capacity expansion and cost-sensitive services. This section describes how to use an existing SFS file system to statically create PVs and PVCs for data persistence and sharing in workloads.

                                                  +

                                                  Prerequisites

                                                  Constraints

                                                  • Multiple PVs can use the same SFS or SFS Turbo file system with the following restrictions:
                                                    • Do not mount all PVCs/PVs that use the same underlying SFS or SFS Turbo file system to a pod. This leads to a pod startup failure because not all PVCs can be mounted to the pod due to the same volumeHandle values of these PVs.
                                                    • The persistentVolumeReclaimPolicy parameter in the PVs is suggested to be set to Retain. Otherwise, when a PV is deleted, the associated underlying volume may be deleted. In this case, other PVs associated with the underlying volume malfunction.
                                                    • When the underlying volume is repeatedly used, enable isolation and protection for ReadWriteMany at the application layer to prevent data overwriting and loss.
                                                  -

                                                  Using an Existing SFS File System on the Console

                                                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                  2. Statically create a PVC and PV.

                                                    1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters. +

                                                      Using an Existing SFS File System on the Console

                                                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                      2. Statically create a PVC and PV.

                                                        1. Choose Storage in the navigation pane and click the PVCs tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters.
                                                  Table 2 Key parameters

                                                  Parameter

                                                  Mandatory

                                                  +

                                                  Mandatory

                                                  Description

                                                  +

                                                  Description

                                                  failure-domain.beta.kubernetes.io/region

                                                  +

                                                  failure-domain.beta.kubernetes.io/region

                                                  Yes

                                                  +

                                                  Yes

                                                  Region where the cluster is located.

                                                  -

                                                  For details about the value of region, see Regions and Endpoints.

                                                  +

                                                  Region where the cluster is located.

                                                  +

                                                  For details about the value of region, see Regions and Endpoints.

                                                  failure-domain.beta.kubernetes.io/zone

                                                  +

                                                  failure-domain.beta.kubernetes.io/zone

                                                  Yes

                                                  +

                                                  Yes

                                                  AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

                                                  -

                                                  For details about the value of zone, see Regions and Endpoints.

                                                  +

                                                  AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

                                                  +

                                                  For details about the value of zone, see Regions and Endpoints.

                                                  everest.io/disk-volume-type

                                                  +

                                                  everest.io/disk-volume-type

                                                  Yes

                                                  +

                                                  Yes

                                                  EVS disk type. All letters are in uppercase.
                                                  • SATA: common I/O
                                                  • SAS: high I/O
                                                  • SSD: ultra-high I/O
                                                  +
                                                  EVS disk type. All letters are in uppercase.
                                                  • SATA: common I/O
                                                  • SAS: high I/O
                                                  • SSD: ultra-high I/O

                                                  everest.io/crypt-key-id

                                                  +

                                                  everest.io/crypt-key-id

                                                  No

                                                  +

                                                  No

                                                  Mandatory when the EVS disk is encrypted. Enter the encryption key ID selected during EVS disk creation.

                                                  +

                                                  Mandatory when the EVS disk is encrypted. Enter the encryption key ID selected during EVS disk creation.

                                                  To obtain the encryption key ID, log in to the Cloud Server Console. In the navigation pane, choose Elastic Volume Service > Disks. Click the name of the target EVS disk to go to its details page. On the Summary tab page, copy the value of KMS Key ID in the Configuration Information area.

                                                  everest.io/disk-volume-tags

                                                  +

                                                  everest.io/disk-volume-tags

                                                  No

                                                  +

                                                  No

                                                  This field is optional. It is supported when the Everest version in the cluster is 2.1.39 or later.

                                                  +

                                                  This field is optional. It is supported when the Everest version in the cluster is 2.1.39 or later.

                                                  You can add resource tags to classify resources.

                                                  You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

                                                  CCE automatically creates system tags CCE-Cluster-ID={Cluster ID}, CCE-Cluster-Name={Cluster name}, and CCE-Namespace={Namespace name}. These tags cannot be modified.

                                                  csi.storage.k8s.io/fstype

                                                  +

                                                  csi.storage.k8s.io/fstype

                                                  No

                                                  +

                                                  No

                                                  This field is optional. It is supported by nodes running CentOS 7 or Ubuntu 22.04, and when Everest version in the cluster is 2.1.53 or later.

                                                  -

                                                  You can use it to configure file system type. The value can be ext4 or xfs. If it is left blank, the default value ext4 is used.

                                                  +

                                                  This field is optional. It is supported by nodes running CentOS 7 or Ubuntu 22.04, and the Everest version in the cluster must be 2.1.53 or later.

                                                  +

                                                  You can use it to configure a file system type, which can be ext4 or xfs. If it is left blank, the default value ext4 will be used.

                                                  storage

                                                  +

                                                  storage

                                                  Yes

                                                  +

                                                  Yes

                                                  Requested PVC capacity, in Gi. The value ranges from 1 to 32768.

                                                  +

                                                  Requested PVC capacity, in Gi. The value ranges from 1 to 32768.

                                                  storageClassName

                                                  +

                                                  storageClassName

                                                  Yes

                                                  +

                                                  Yes

                                                  The storage class name for EVS disks is csi-disk.

                                                  +

                                                  The storage class for EVS disks is csi-disk.

                                                  Quickly expand the capacity of a mounted EVS disk on the CCE console.

                                                  1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click More in the Operation column of the target PVC and select Scale-out.
                                                  2. Enter the capacity to be added and click OK.
                                                  +
                                                  1. Choose Storage in the navigation pane and click the PVCs tab. Click More in the Operation column of the target PVC and select Scale-out.
                                                  2. Enter the capacity to be added and click OK.

                                                  Viewing events

                                                  You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time of the PVC or PV.

                                                  1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                  2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).
                                                  +
                                                  1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                  2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).

                                                  Viewing a YAML file

                                                  You can view, copy, and download the YAML files of a PVC or PV.

                                                  1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                  2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                                  +
                                                  1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                  2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                                  - @@ -71,7 +71,7 @@

                                                  a: The parameter is available when Creation Method is set to Use existing.

                                                  b: The parameter is available when Creation Method is set to Create new.

                                                  -
                                                3. Click Create to create a PVC and a PV.

                                                  You can choose Storage in the navigation pane and view the created PVC and PV on the PersistentVolumeClaims (PVCs) and PersistentVolumes (PVs) tab pages, respectively.

                                                  +
                                                4. Click Create to create a PVC and a PV.

                                                  You can choose Storage in the navigation pane and view the created PVC and PV on the PVCs and PVs tab pages, respectively.

                                                5. Create an application.

                                                  1. In the navigation pane on the left, click Workloads. In the right pane, click the Deployments tab.
                                                  2. Click Create Workload in the upper right corner. On the displayed page, click Data Storage in the Container Settings area and click Add Volume to select PVC.
                                                    Mount and use storage volumes, as shown in Table 1. For details about other parameters, see Workloads.
                                                6. Parameter

                                                  Description

                                                  @@ -26,7 +26,7 @@

                                                  Creation Method

                                                  • If underlying storage is available, create a storage volume or use an existing storage volume to statically create a PVC based on whether a PV has been created.
                                                  • If no underlying storage is available, select Dynamically provision. For details, see Using an SFS File System Through a Dynamic PV.
                                                  +
                                                  • If underlying storage is available, create a storage volume or use an existing storage volume to statically create a PVC based on whether a PV is available.
                                                  • If no underlying storage is available, select Dynamically provision. For details, see Using an SFS File System Through a Dynamic PV.

                                                  In this example, select Create new to create a PV and PVC at the same time on the console.

                                                  - @@ -167,7 +167,7 @@ spec: - - - diff --git a/docs/cce/umn/cce_10_0620.html b/docs/cce/umn/cce_10_0620.html index 3bb9eea9..0b41a278 100644 --- a/docs/cce/umn/cce_10_0620.html +++ b/docs/cce/umn/cce_10_0620.html @@ -2,72 +2,79 @@

                                                  Using an SFS File System Through a Dynamic PV

                                                  This section describes how to use storage classes to dynamically create PVs and PVCs and implement data persistence and sharing in workloads.

                                                  -

                                                  Automatically Creating an SFS File System on the Console

                                                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                  2. Dynamically create a PVC and PV.

                                                    1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters. -
                                                  Table 1 Mounting a storage volume

                                                  Parameter

                                                  @@ -113,7 +113,7 @@

                                                  -

                                                  (kubectl) Using an Existing SFS File System

                                                  1. Use kubectl to connect to the cluster.
                                                  2. Create a PV.

                                                    1. Create the pv-sfs.yaml file.
                                                      SFS Capacity-Oriented:
                                                      apiVersion: v1
                                                      +

                                                      (kubectl) Using an Existing SFS File System

                                                      1. Use kubectl to access the cluster.
                                                      2. Create a PV.

                                                        1. Create the pv-sfs.yaml file.
                                                          SFS Capacity-Oriented:
                                                          apiVersion: v1
                                                           kind: PersistentVolume
                                                           metadata:
                                                             annotations:
                                                          @@ -158,7 +158,7 @@ spec:
                                                           
                                                           

                                                  Yes

                                                  • If an SFS Capacity-Oriented volume is used, enter the volume ID.

                                                    Log in to the console, choose Service List > Storage > Scalable File Service, and select SFS Turbo. In the list, click the name of the target SFS file system. On the details page, copy the content following ID.

                                                    +
                                                  • If an SFS Capacity-Oriented volume is used, enter the volume ID.

                                                    Log in to the CCE console, choose Service List > Storage > Scalable File Service, and select SFS Turbo. In the list, click the name of the target SFS file system. On the details page, copy the content following ID.

                                                  Yes

                                                  Shared path of the file system.

                                                  -
                                                  • For an SFS Capacity-Oriented file system, log in to the console, choose Service List > Storage > Scalable File Service, and obtain the shared path from the Mount Address column.
                                                  +
                                                  • For an SFS Capacity-Oriented file system, log in to the CCE console, choose Service List > Storage > Scalable File Service, and obtain the shared path from the Mount Address column.

                                                  mountOptions

                                                  @@ -339,7 +339,7 @@ static

                                                  Create a PV on the CCE console.

                                                  1. Choose Storage in the navigation pane and click the PersistentVolumes (PVs) tab. Click Create Volume in the upper right corner. In the dialog box displayed, configure the parameters.
                                                    • Volume Type: Select SFS.
                                                    • SFS: Click Select SFS. On the displayed page, select the SFS file system that meets your requirements and click OK.
                                                    • PV Name: Enter the PV name. The PV name must be unique in the same cluster.
                                                    • Access Mode: SFS volumes support only ReadWriteMany, indicating that a storage volume can be mounted to multiple nodes in read/write mode. For details, see Volume Access Modes.
                                                    • Reclaim Policy: Delete or Retain. For details, see PV Reclaim Policy.
                                                      NOTE:

                                                      If multiple PVs use the same underlying storage volume, use Retain to avoid cascading deletion of underlying volumes.

                                                      +
                                                  1. Choose Storage in the navigation pane and click the PVs tab. Click Create PersistentVolume in the upper right corner. In the dialog box displayed, configure the parameters.
                                                    • Volume Type: Select SFS.
                                                    • SFS: Click Select SFS. On the displayed page, select the SFS file system that meets your requirements and click OK.
                                                    • PV Name: Enter the PV name. The PV name must be unique in the same cluster.
                                                    • Access Mode: SFS volumes support only ReadWriteMany, indicating that a storage volume can be mounted to multiple nodes in read/write mode. For details, see Volume Access Modes.
                                                    • Reclaim Policy: Delete or Retain. For details, see PV Reclaim Policy.
                                                      NOTE:

                                                      If multiple PVs use the same underlying storage volume, use Retain to avoid cascading deletion of underlying volumes.

                                                    • Mount Options: Enter the mounting parameter key-value pairs. For details, see Configuring SFS Volume Mount Options.
                                                  2. Click Create.
                                                  @@ -349,14 +349,14 @@ static

                                                  You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time of the PVC or PV.

                                                  1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                  2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).
                                                  +
                                                  1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                  2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).

                                                  Viewing a YAML file

                                                  You can view, copy, and download the YAML files of a PVC or PV.

                                                  1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                  2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                                  +
                                                  1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                  2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.

                                                  Parameter

                                                  +

                                                  Prerequisites

                                                  +
                                                  +

                                                  Automatically Creating an SFS File System on the Console

                                                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                  2. Dynamically create a PVC and PV.

                                                    1. Choose Storage in the navigation pane and click the PVCs tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters. +
                                                      - - - - - - - - - - - + + +

                                                      Parameter

                                                      Description

                                                      +

                                                      Description

                                                      PVC Type

                                                      +

                                                      PVC Type

                                                      In this example, select SFS.

                                                      +

                                                      In this example, select SFS.

                                                      PVC Name

                                                      +

                                                      PVC Name

                                                      Enter the PVC name, which must be unique in the same namespace.

                                                      +

                                                      Enter the PVC name, which must be unique in the same namespace.

                                                      Creation Method

                                                      +

                                                      Creation Method

                                                      • If no underlying storage is available, select Dynamically provision to create a PVC, PV, and underlying storage on the console in cascading mode.
                                                      • If underlying storage is available, create a storage volume or use an existing storage volume to statically create a PVC based on whether a PV has been created. For details, see Using an Existing SFS File System Through a Static PV.
                                                      +
                                                      • If no underlying storage is available, select Dynamically provision to create a PVC, PV, and underlying storage on the console in cascading mode.
                                                      • If underlying storage is available, create a storage volume or use an existing storage volume to statically create a PVC based on whether a PV is available. For details, see Using an Existing SFS File System Through a Static PV.

                                                      In this example, select Dynamically provision.

                                                      Storage Classes

                                                      +

                                                      Storage Classes

                                                      The storage class for SFS volumes is csi-nas.

                                                      +

                                                      The storage class for SFS volumes is csi-nas.

                                                      Access Mode

                                                      +

                                                      Access Mode

                                                      SFS volumes support only ReadWriteMany, indicating that a storage volume can be mounted to multiple nodes in read/write mode. For details, see Volume Access Modes.

                                                      +

                                                      SFS volumes support only ReadWriteMany, indicating that a storage volume can be mounted to multiple nodes in read/write mode. For details, see Volume Access Modes.

                                                      +

                                                      Encryption

                                                      +

                                                      Configure whether to encrypt underlying storage. If you select Enabled (key), an encryption key must be configured.

                                                      -
                                                    2. Click Create to create a PVC and a PV.

                                                      You can choose Storage in the navigation pane and view the created PVC and PV on the PersistentVolumeClaims (PVCs) and PersistentVolumes (PVs) tab pages, respectively.

                                                      +
                                                    3. Click Create to create a PVC and a PV.

                                                      You can choose Storage in the navigation pane and view the created PVC and PV on the PVCs and PVs tab pages, respectively.

                                                  3. Create an application.

                                                    1. In the navigation pane on the left, click Workloads. In the right pane, click the Deployments tab.
                                                    2. Click Create Workload in the upper right corner. On the displayed page, click Data Storage in the Container Settings area and click Add Volume to select PVC.
                                                      Mount and use storage volumes, as shown in Table 1. For details about other parameters, see Workloads. -
                                                      Table 1 Mounting a storage volume

                                                      Parameter

                                                      +
                                                      - - - - - - - - - @@ -80,13 +87,13 @@

                                                      -

                                                      (kubectl) Automatically Creating an SFS File System

                                                      1. Use kubectl to connect to the cluster.
                                                      2. Use StorageClass to dynamically create a PVC and PV.

                                                        1. Create the pvc-sfs-auto.yaml file.
                                                          apiVersion: v1
                                                          +

                                                          (kubectl) Automatically Creating an SFS File System

                                                          1. Use kubectl to access the cluster.
                                                          2. Use StorageClass to dynamically create a PVC and PV.

                                                            1. Create the pvc-sfs-auto.yaml file.
                                                              apiVersion: v1
                                                               kind: PersistentVolumeClaim
                                                               metadata:
                                                                 name: pvc-sfs-auto
                                                                 namespace: default
                                                                 annotations: 
                                                              -    everest.io/crypt-key-id: <your_key_id>      # (Optional) ID of the key for encrypting file systems
                                                              +    everest.io/crypt-key-id: <your_key_id>      # (Optional) ID of the key for encrypting file systems
                                                                   everest.io/crypt-alias: sfs/default         # (Optional) Key name. Mandatory for encrypting volumes.
                                                                   everest.io/crypt-domain-id: <your_domain_id>   # (Optional) ID of the tenant to which an encrypted volume belongs. Mandatory for encrypting volumes.
                                                               spec:
                                                              @@ -95,45 +102,45 @@ spec:
                                                                 resources:
                                                                   requests:
                                                                     storage: 1Gi             # SFS volume capacity.
                                                              -  storageClassName: csi-nas    # The storage class type is SFS.
                                                              + storageClassName: csi-nas # The storage class is SFS.
                                                          -
                                                      Table 1 Mounting a storage volume

                                                      Parameter

                                                      Description

                                                      +

                                                      Description

                                                      PVC

                                                      +

                                                      PVC

                                                      Select an existing SFS volume.

                                                      +

                                                      Select an existing SFS volume.

                                                      Mount Path

                                                      +

                                                      Mount Path

                                                      Enter a mount path, for example, /tmp.

                                                      +

                                                      Enter a mount path, for example, /tmp.

                                                      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run. Otherwise, containers will be malfunctional. Mount the volume to an empty directory. If the directory is not empty, ensure that there are no files that affect container startup. Otherwise, the files will be replaced, causing container startup failures or workload creation failures.
                                                      NOTICE:

                                                      If a volume is mounted to a high-risk directory, use an account with minimum permissions to start the container. Otherwise, high-risk files on the host machine may be damaged.

                                                      Subpath

                                                      +

                                                      Subpath

                                                      Enter the subpath of the storage volume and mount a path in the storage volume to the container. In this way, different folders of the same storage volume can be used in a single pod. tmp, for example, indicates that data in the mount path of the container is stored in the tmp folder of the storage volume. If this parameter is left blank, the root path is used by default.

                                                      +

                                                      Enter the subpath of the storage volume and mount a path in the storage volume to the container. In this way, different folders of the same storage volume can be used in a single pod. tmp, for example, indicates that data in the mount path of the container is stored in the tmp folder of the storage volume. If this parameter is left blank, the root path is used by default.

                                                      Permission

                                                      +

                                                      Permission

                                                      • Read-only: You can only read the data in the mounted volumes.
                                                      • Read/Write: You can modify the data volumes mounted to the path. Newly written data will not be migrated if the container is migrated, which may cause data loss.
                                                      +
                                                      • Read-only: You can only read the data in the mounted volumes.
                                                      • Read/Write: You can modify the data volumes mounted to the path. Newly written data will not be migrated if the container is migrated, which may cause data loss.
                                                      Table 2 Key parameters

                                                      Parameter

                                                      +
                                                      - - - - - - - - - - - - - - @@ -218,26 +225,26 @@ static

                                                      Related Operations

                                                      You can also perform the operations listed in Table 3. -
                                                      Table 2 Key parameters

                                                      Parameter

                                                      Mandatory

                                                      +

                                                      Mandatory

                                                      Description

                                                      +

                                                      Description

                                                      storage

                                                      +

                                                      storage

                                                      Yes

                                                      +

                                                      Yes

                                                      Requested capacity in the PVC, in Gi.

                                                      +

                                                      Requested capacity in the PVC, in Gi.

                                                      For SFS, this field is used only for verification (cannot be empty or 0). Its value is fixed at 1, and any value you set does not take effect for SFS file systems.

                                                      everest.io/crypt-key-id

                                                      +

                                                      everest.io/crypt-key-id

                                                      No

                                                      +

                                                      No

                                                      This parameter is mandatory when an SFS system is encrypted. Enter the encryption key ID selected during SFS system creation. You can use a custom key or the default key named sfs/default.

                                                      +

                                                      This parameter is mandatory when an SFS system is encrypted. Enter the encryption key ID selected during SFS system creation. You can use a custom key or the default key named sfs/default.

                                                      To obtain a key ID, log in to the DEW console, locate the key to be encrypted, and copy the key ID.

                                                      everest.io/crypt-alias

                                                      +

                                                      everest.io/crypt-alias

                                                      No

                                                      +

                                                      No

                                                      Key name, which is mandatory when you create an encrypted volume.

                                                      +

                                                      Key name, which is mandatory when you create an encrypted volume.

                                                      To obtain a key name, log in to the DEW console, locate the key to be encrypted, and copy the key name.

                                                      everest.io/crypt-domain-id

                                                      +

                                                      everest.io/crypt-domain-id

                                                      No

                                                      +

                                                      No

                                                      ID of the tenant to which the encrypted volume belongs. This parameter is mandatory for creating an encrypted volume.

                                                      +

                                                      ID of the tenant to which the encrypted volume belongs. This parameter is mandatory for creating an encrypted volume.

                                                      To obtain a tenant ID, hover the cursor over the username in the upper right corner of the ECS console, choose My Credentials, and copy the account ID.

                                                      Table 3 Related operations

                                                      Operation

                                                      +
                                                      - - - - - - - - diff --git a/docs/cce/umn/cce_10_0624.html b/docs/cce/umn/cce_10_0624.html index 6e44ea8d..e3d4c073 100644 --- a/docs/cce/umn/cce_10_0624.html +++ b/docs/cce/umn/cce_10_0624.html @@ -6,7 +6,7 @@
                                                      • Standard file protocols: You can mount file systems as volumes to servers, the same as using local directories.
                                                      • Data sharing: The same file system can be mounted to multiple servers, so that data can be shared.
                                                      • Private network: Users can access data only in private networks of data centers.
                                                      • Data isolation: The on-cloud storage service provides exclusive cloud file storage, which delivers data isolation and ensures IOPS performance.
                                                      • Use cases: Deployments/StatefulSets in the ReadWriteMany mode, DaemonSets, and jobs created for high-traffic websites, log storage, DevOps, and enterprise OA applications

                                                      Application Scenarios

                                                      SFS Turbo supports the following mounting modes:

                                                      - +
                                                      diff --git a/docs/cce/umn/cce_10_0625.html b/docs/cce/umn/cce_10_0625.html index 6392ae18..43e0a45a 100644 --- a/docs/cce/umn/cce_10_0625.html +++ b/docs/cce/umn/cce_10_0625.html @@ -1,13 +1,13 @@

                                                      Using an Existing SFS Turbo File System Through a Static PV

                                                      -

                                                      SFS Turbo is a shared file system with high availability and durability. It is suitable for applications that contain massive small files and require low latency, and high IOPS. This section describes how to use an existing SFS Turbo file system to statically create PVs and PVCs and implement data persistence and sharing in workloads.

                                                      -

                                                      Prerequisites

                                                      • You have created a cluster and installed the CCE Container Storage (Everest) add-on in the cluster.
                                                      • If you want to create a cluster using commands, use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                      • You have created an available SFS Turbo file system, and the SFS Turbo file system and the cluster are in the same VPC.
                                                      +

                                                      SFS Turbo is a shared file system with high availability and durability. It is suitable for applications that contain massive small files and require low latency, and high IOPS. This section describes how to use an existing SFS Turbo file system to statically create PVs and PVCs for data persistence and sharing in workloads.

                                                      +

                                                      Prerequisites

                                                      • You have created a cluster and installed the CCE Container Storage (Everest) add-on in the cluster.
                                                      • Before creating a cluster using commands, ensure kubectl is used to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                      • You have created an available SFS Turbo file system, and the SFS Turbo file system and the cluster are in the same VPC.

                                                      Constraints

                                                      • Multiple PVs can use the same SFS or SFS Turbo file system with the following restrictions:
                                                        • Do not mount all PVCs/PVs that use the same underlying SFS or SFS Turbo file system to a pod. This leads to a pod startup failure because not all PVCs can be mounted to the pod due to the same volumeHandle values of these PVs.
                                                        • The persistentVolumeReclaimPolicy parameter in the PVs is suggested to be set to Retain. Otherwise, when a PV is deleted, the associated underlying volume may be deleted. In this case, other PVs associated with the underlying volume malfunction.
                                                        • When the underlying volume is repeatedly used, enable isolation and protection for ReadWriteMany at the application layer to prevent data overwriting and loss.
                                                      -

                                                      Using an Existing SFS Turbo File System on the Console

                                                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                      2. Statically create a PVC and PV.

                                                        1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters. +

                                                          Using an Existing SFS Turbo File System on the Console

                                                          1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                          2. Statically create a PVC and PV.

                                                            1. Choose Storage in the navigation pane and click the PVCs tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters.
                                                      Table 3 Related operations

                                                      Operation

                                                      Description

                                                      +

                                                      Description

                                                      Procedure

                                                      +

                                                      Procedure

                                                      Viewing events

                                                      +

                                                      Viewing events

                                                      You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time of the PVC or PV.

                                                      +

                                                      You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time of the PVC or PV.

                                                      1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                      2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).
                                                      +
                                                      1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                      2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).

                                                      Viewing a YAML file

                                                      +

                                                      Viewing a YAML file

                                                      You can view, copy, and download the YAML files of a PVC or PV.

                                                      +

                                                      You can view, copy, and download the YAML files of a PVC or PV.

                                                      1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                      2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                                      +
                                                      1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                      2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                                      -

                                                      Parameter

                                                      Description

                                                      @@ -53,7 +53,7 @@

                                                      Reclaim Policyb

                                                      Only Retain is supported, indicating that the PV is not deleted when the PVC is deleted. For details, see PV Reclaim Policy.

                                                      +

                                                      Only Retain is available. This indicates that the PV is not deleted when the PVC is deleted. For details, see PV Reclaim Policy.

                                                      Mount Optionsb

                                                      @@ -67,7 +67,7 @@

                                                      a: The parameter is available when Creation Method is set to Use existing.

                                                      b: The parameter is available when Creation Method is set to Create new.

                                                      -
                                                    3. Click Create to create a PVC and a PV.

                                                      You can choose Storage in the navigation pane and view the created PVC and PV on the PersistentVolumeClaims (PVCs) and PersistentVolumes (PVs) tab pages, respectively.

                                                      +
                                                    4. Click Create to create a PVC and a PV.

                                                      You can choose Storage in the navigation pane and view the created PVC and PV on the PVCs and PVs tab pages, respectively.

                                                    5. Create an application.

                                                      1. In the navigation pane on the left, click Workloads. In the right pane, click the Deployments tab.
                                                      2. Click Create Workload in the upper right corner. On the displayed page, click Data Storage in the Container Settings area and click Add Volume to select PVC.
                                                        Mount and use storage volumes, as shown in Table 1. For details about other parameters, see Workloads.
                                                        Table 1 Mounting a storage volume

                                                        Parameter

                                                        @@ -109,7 +109,7 @@

                                                        -

                                                        (kubectl) Using an Existing SFS File System

                                                        1. Use kubectl to connect to the cluster.
                                                        2. Create a PV.

                                                          1. Create the pv-sfsturbo.yaml file.
                                                            apiVersion: v1
                                                            +

                                                            (kubectl) Using an Existing SFS File System

                                                            1. Use kubectl to access the cluster.
                                                            2. Create a PV.

                                                              1. Create the pv-sfsturbo.yaml file.
                                                                apiVersion: v1
                                                                 kind: PersistentVolume
                                                                 metadata:
                                                                   annotations:
                                                                @@ -132,33 +132,33 @@ spec:
                                                                   storageClassName: csi-sfsturbo          # Storage class name of the SFS Turbo file system.
                                                                   mountOptions: []                         # Mount options.
                                                                -
                                                                Table 2 Key parameters

                                                                Parameter

                                                                +
                                                                - - - - - - - - - - - - - @@ -214,33 +214,33 @@ spec: storageClassName: csi-sfsturbo # Storage class of the SFS Turbo volume, which must be the same as that of the PV. volumeName: pv-sfsturbo # PV name. -
                                                                Table 2 Key parameters

                                                                Parameter

                                                                Mandatory

                                                                +

                                                                Mandatory

                                                                Description

                                                                volumeHandle

                                                                +

                                                                volumeHandle

                                                                Yes

                                                                +

                                                                Yes

                                                                SFS Turbo volume ID.

                                                                -

                                                                How to obtain: Log in to the console, choose Service List > Storage > Scalable File Service, and select SFS Turbo. In the list, click the name of the target SFS Turbo volume. On the details page, copy the content following ID.

                                                                +

                                                                How to obtain: Log in to the CCE console, choose Service List > Storage > Scalable File Service, and select SFS Turbo. In the list, click the name of the target SFS Turbo volume. On the details page, copy the content following ID.

                                                                everest.io/share-export-location

                                                                +

                                                                everest.io/share-export-location

                                                                Yes

                                                                +

                                                                Yes

                                                                Shared path of the SFS Turbo volume.

                                                                -

                                                                Log in to the console, choose Service List > Storage > Scalable File Service, and select SFS Turbo. You can obtain the shared path of the file system from the Mount Address column.

                                                                +

                                                                Log in to the CCE console, choose Service List > Storage > Scalable File Service, and select SFS Turbo. You can obtain the shared path of the file system from the Mount Address column.

                                                                mountOptions

                                                                +

                                                                mountOptions

                                                                No

                                                                +

                                                                No

                                                                Mount options.

                                                                If not specified, the following configurations are used by default. For details, see Configuring SFS Turbo Mount Options.

                                                                @@ -169,25 +169,25 @@ spec: - hard

                                                                persistentVolumeReclaimPolicy

                                                                +

                                                                persistentVolumeReclaimPolicy

                                                                Yes

                                                                +

                                                                Yes

                                                                A reclaim policy is supported when the cluster version is or later than 1.19.10 and the Everest version is or later than 1.2.9.

                                                                Only the Retain reclaim policy is supported. For details, see PV Reclaim Policy.

                                                                Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV is in the Released status and cannot be bound to the PVC again.

                                                                storage

                                                                +

                                                                storage

                                                                Yes

                                                                +

                                                                Yes

                                                                Requested capacity in the PVC, in Gi.

                                                                storageClassName

                                                                +

                                                                storageClassName

                                                                Yes

                                                                +

                                                                Yes

                                                                The storage class name of SFS Turbo volumes is csi-sfsturbo.

                                                                Table 3 Key parameters

                                                                Parameter

                                                                +
                                                                - - - - - - - @@ -270,13 +270,13 @@ spec: image: nginx:latest volumeMounts: - name: pvc-sfsturbo-volume #Volume name, which must be the same as the volume name in the volumes field. - mountPath: /data #Location where the storage volume is mounted. + mountPath: /data # Location where the storage volume is mounted. imagePullSecrets: - name: default-secret volumes: - - name: pvc-sfsturbo-volume #Volume name, which can be customized. + - name: pvc-sfsturbo-volume # Volume name, which can be customized. persistentVolumeClaim: - claimName: pvc-sfsturbo #Name of the created PVC. + claimName: pvc-sfsturbo # Name of the created PVC.
                                                              2. Run the following command to create a workload to which the SFS Turbo volume is mounted:
                                                                kubectl apply -f web-demo.yaml

                                                                After the workload is created, you can try Verifying Data Persistence and Sharing.

                                                              3. @@ -338,7 +338,7 @@ static - @@ -346,21 +346,21 @@ static - - - diff --git a/docs/cce/umn/cce_10_0626.html b/docs/cce/umn/cce_10_0626.html index 004cc269..e367e3e8 100644 --- a/docs/cce/umn/cce_10_0626.html +++ b/docs/cce/umn/cce_10_0626.html @@ -4,7 +4,7 @@

                                                                This section describes how to configure SFS Turbo mount options. For SFS Turbo, you can only set mount options in a PV and bind the PV by creating a PVC.

                                                                Prerequisites

                                                                The CCE Container Storage (Everest) add-on version must be 1.2.8 or later. This add-on identifies the mount options and transfers them to the underlying storage resources. The parameter settings take effect only if the underlying storage resources support the specified options.

                                                                -

                                                                Constraints

                                                                • Mount options cannot be configured for secure containers.
                                                                • Due to the restrictions of the NFS protocol, if an SFS volume is mounted to a node for multiple times, link-related mounting parameters (such as timeo) take effect only when the SFS volume is mounted for the first time. For example, if an SFS volume is mounted to multiple pods running on a node, the values of the mounting parameters configured later will not overwrite the existing parameter values.
                                                                +

                                                                Constraints

                                                                • Mount options cannot be configured for Kata containers.
                                                                • Due to the restrictions of the NFS protocol, if an SFS volume is mounted to a node for multiple times, link-related mounting parameters (such as timeo) take effect only when the SFS volume is mounted for the first time by default. For example, if the same SFS file system is mounted to multiple pods running on a node, the mounting parameter set later does not overwrite the existing parameter value. If you want to configure different mounting parameters in the preceding scenario, additionally configure the nosharecache parameter.

                                                                SFS Turbo Mount Options

                                                                The Everest add-on in CCE presets the options described in Table 1 for mounting SFS Turbo volumes.

                                                                @@ -25,7 +25,7 @@
                                                                - @@ -39,20 +39,29 @@ - + + + +
                                                                Table 3 Key parameters

                                                                Parameter

                                                                Mandatory

                                                                +

                                                                Mandatory

                                                                Description

                                                                storage

                                                                +

                                                                storage

                                                                Yes

                                                                +

                                                                Yes

                                                                Requested capacity in the PVC, in Gi.

                                                                The value must be the same as the storage size of the existing PV.

                                                                storageClassName

                                                                +

                                                                storageClassName

                                                                Yes

                                                                +

                                                                Yes

                                                                Storage class name, which must be the same as the storage class of the PV in 1.

                                                                The storage class name of SFS Turbo volumes is csi-sfsturbo.

                                                                volumeName

                                                                +

                                                                volumeName

                                                                Yes

                                                                +

                                                                Yes

                                                                PV name, which must be the same as the PV name in 1.

                                                                Create a PV on the CCE console.

                                                                1. Choose Storage in the navigation pane and click the PersistentVolumes (PVs) tab. Click Create Volume in the upper right corner. In the dialog box displayed, configure the parameters.
                                                                  • Volume Type: Select SFS Turbo.
                                                                  • SFS Turbo: Click Select SFS Turbo. On the page displayed, select the SFS Turbo volume that meets the requirements and click OK.
                                                                  • PV Name: Enter the PV name, which must be unique in the same cluster.
                                                                  • Access Mode: SFS volumes support only ReadWriteMany, indicating that a storage volume can be mounted to multiple nodes in read/write mode. For details, see Volume Access Modes.
                                                                  • Reclaim Policy: Only Retain is supported. For details, see PV Reclaim Policy.
                                                                  • Mount Options: Enter the mounting parameter key-value pairs. For details, see Configuring SFS Turbo Mount Options.
                                                                  +
                                                                1. Choose Storage in the navigation pane and click the PVs tab. Click Create PersistentVolume in the upper right corner. In the dialog box displayed, configure parameters.
                                                                  • Volume Type: Select SFS Turbo.
                                                                  • SFS Turbo: Click Select SFS Turbo. On the page displayed, select the SFS Turbo volume that meets the requirements and click OK.
                                                                  • PV Name: Enter the PV name, which must be unique in the same cluster.
                                                                  • Access Mode: SFS volumes support only ReadWriteMany, indicating that a storage volume can be mounted to multiple nodes in read/write mode. For details, see Volume Access Modes.
                                                                  • Reclaim Policy: Only Retain is supported. For details, see PV Reclaim Policy.
                                                                  • Mount Options: Enter the mounting parameter key-value pairs. For details, see Configuring SFS Turbo Mount Options.
                                                                2. Click Create.

                                                                Quickly expand the capacity of a mounted SFS Turbo volume on the CCE console.

                                                                1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click More in the Operation column of the target PVC and select Scale-out.
                                                                2. Enter the capacity to be added and click OK.
                                                                +
                                                                1. Choose Storage in the navigation pane and click the PVCs tab. Click More in the Operation column of the target PVC and select Scale-out.
                                                                2. Enter the capacity to be added and click OK.

                                                                Viewing events

                                                                You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time of the PVC or PV.

                                                                1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                                2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).
                                                                +
                                                                1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                                2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).

                                                                Viewing a YAML file

                                                                You can view, copy, and download the YAML files of a PVC or PV.

                                                                1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                                2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                                                +
                                                                1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                                2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.

                                                                nolock

                                                                Leave it blank.

                                                                +

                                                                Blank

                                                                Whether to lock files on the server using the NLM protocol. If nolock is selected, the lock is valid for applications on one host. For applications on another host, the lock is invalid.

                                                                hard/soft

                                                                Leave it blank.

                                                                +

                                                                Blank

                                                                Mount mode.

                                                                • hard: If the NFS request times out, the client keeps resending the request until the request is successful.
                                                                • soft: If the NFS request times out, the client returns an error to the invoking program.

                                                                The default value is hard.

                                                                sharecache/nosharecache

                                                                +

                                                                Blank

                                                                +

                                                                How the data cache and attribute cache are shared when one file system is concurrently mounted to different clients. If this parameter is set to sharecache, the caches are shared. If this parameter is set to nosharecache, the caches are not shared, and one cache is configured for each client mounting. The default value is sharecache.

                                                                +
                                                                NOTE:

                                                                The nosharecache setting will affect the performance. The mounting information must be obtained for each mounting, which increases the communication overhead with the NFS server and the memory consumption of the NFS clients. In addition, the nosharecache setting on the NFS clients may lead to inconsistent caches. Determine whether to use nosharecache based on site requirements.

                                                                +
                                                                +

                                                                You can set other mount options if needed. For details, see Mounting an NFS File System to ECSs (Linux).

                                                                Configuring Mount Options in a PV

                                                                You can use the mountOptions field to configure mount options in a PV. The options you can configure in mountOptions are listed in SFS Turbo Mount Options.

                                                                -
                                                                1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                                2. Set mount options in a PV. Example:

                                                                  apiVersion: v1
                                                                  +
                                                                  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                                  2. Configure mount options in a PV. Example:

                                                                    apiVersion: v1
                                                                     kind: PersistentVolume
                                                                     metadata:
                                                                       annotations:
                                                                    diff --git a/docs/cce/umn/cce_10_0630.html b/docs/cce/umn/cce_10_0630.html
                                                                    index 23ad0489..42591855 100644
                                                                    --- a/docs/cce/umn/cce_10_0630.html
                                                                    +++ b/docs/cce/umn/cce_10_0630.html
                                                                    @@ -2,14 +2,14 @@
                                                                     
                                                                     

                                                                    Using an OBS Bucket Through a Dynamic PV

                                                                    This section describes how to automatically create an OBS bucket. It is applicable when no underlying storage volume is available.

                                                                    -

                                                                    Constraints

                                                                    • If OBS volumes are used, the owner group and permission of the mount point cannot be modified.
                                                                    • CCE allows parallel file systems to be mounted using OBS SDKs or PVCs. If PVC mounting is used, the obsfs tool provided by OBS must be used. An obsfs resident process is generated each time an object storage volume generated from the parallel file system is mounted to a node, as shown in the following figure.
                                                                      Figure 1 obsfs resident process
                                                                      +

                                                                      Constraints

                                                                      • If OBS volumes are used, the owner group and permission of the mount point cannot be modified.
                                                                      • CCE allows parallel file systems to be mounted using OBS SDKs or PVCs. If PVC mounting is used, the obsfs tool provided by OBS must be used. An obsfs resident process is generated each time an object storage volume generated from the parallel file system is mounted to a node, as shown in the following figure.
                                                                        Figure 1 obsfs resident process

                                                                        Reserve 1 GiB of memory for each obsfs process. For example, for a node with 4 vCPUs and 8 GiB of memory, an obsfs parallel file system should be mounted to no more than eight pods.

                                                                        • An obsfs resident process runs on a node. If the consumed memory exceeds the upper limit of the node, the node malfunctions. On a node with 4 vCPUs and 8 GiB of memory, if more than 100 pods are mounted to a parallel file system, the node will be unavailable. Control the number of pods mounted to a parallel file system on a single node.
                                                                      • Kata containers do not support OBS volumes.
                                                                      • OBS allows a single user to create a maximum of 100 buckets. If a large number of dynamic PVCs are created, the number of buckets may exceed the upper limit, and no more OBS buckets can be created. In this case, use OBS by calling its API or SDK and do not mount OBS buckets to workloads.
                                                                      -

                                                                      Automatically Creating an OBS Volume on the Console

                                                                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                      2. Dynamically create a PVC and PV.

                                                                        1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters. +

                                                                          Automatically Creating an OBS Volume on the Console

                                                                          1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                          2. Dynamically create a PVC and PV.

                                                                            1. Choose Storage in the navigation pane and click the PVCs tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters.

                                                                              Parameter

                                                                              Description

                                                                              @@ -63,36 +63,36 @@
                                                                              -
                                                                            2. Click Create to create a PVC and a PV.

                                                                              You can choose Storage in the navigation pane and view the created PVC and PV on the PersistentVolumeClaims (PVCs) and PersistentVolumes (PVs) tab pages, respectively.

                                                                              +
                                                                            3. Click Create to create a PVC and a PV.

                                                                              You can choose Storage in the navigation pane and view the created PVC and PV on the PVCs and PVs tab pages, respectively.

                                                                          3. Create an application.

                                                                            1. In the navigation pane on the left, click Workloads. In the right pane, click the Deployments tab.
                                                                            2. Click Create Workload in the upper right corner. On the displayed page, click Data Storage in the Container Settings area and click Add Volume to select PVC.
                                                                              Mount and use storage volumes, as shown in Table 1. For details about other parameters, see Workloads. -
                                                                              Table 1 Mounting a storage volume

                                                                              Parameter

                                                                              +
                                                                              - - - - - - - - - @@ -101,7 +101,7 @@

                                                                              In this example, the disk is mounted to the /data path of the container. The container data generated in this path is stored in the OBS volume.

                                                                              -
                                                                            3. After the configuration, click Create Workload.

                                                                              After the workload is created, the data in the container mount directory will be persistently stored. Verify the storage by referring to PV Reclaim Policy.

                                                                              +
                                                                            4. After the configuration, click Create Workload.

                                                                              After the workload is created, the data in the container mount directory will be persistently stored. Verify the storage by referring to Verifying Data Persistence and Sharing.

                                                                            5. @@ -124,56 +124,56 @@ spec: storage: 1Gi # OBS volume capacity. storageClassName: csi-obs # The storage class type is OBS. -
                                                                              Table 1 Mounting a storage volume

                                                                              Parameter

                                                                              Description

                                                                              +

                                                                              Description

                                                                              PVC

                                                                              +

                                                                              PVC

                                                                              Select an existing object storage volume.

                                                                              +

                                                                              Select an existing object storage volume.

                                                                              Mount Path

                                                                              +

                                                                              Mount Path

                                                                              Enter a mount path, for example, /tmp.

                                                                              +

                                                                              Enter a mount path, for example, /tmp.

                                                                              This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run. Otherwise, containers will be malfunctional. Mount the volume to an empty directory. If the directory is not empty, ensure that there are no files that affect container startup. Otherwise, the files will be replaced, causing container startup failures or workload creation failures.
                                                                              NOTICE:

                                                                              If a volume is mounted to a high-risk directory, use an account with minimum permissions to start the container. Otherwise, high-risk files on the host machine may be damaged.

                                                                              Subpath

                                                                              +

                                                                              Subpath

                                                                              Enter the subpath of the storage volume and mount a path in the storage volume to the container. In this way, different folders of the same storage volume can be used in a single pod. tmp, for example, indicates that data in the mount path of the container is stored in the tmp folder of the storage volume. If this parameter is left blank, the root path is used by default.

                                                                              +

                                                                              Enter the subpath of the storage volume and mount a path in the storage volume to the container. In this way, different folders of the same storage volume can be used in a single pod. tmp, for example, indicates that data in the mount path of the container is stored in the tmp folder of the storage volume. If this parameter is left blank, the root path is used by default.

                                                                              Permission

                                                                              +

                                                                              Permission

                                                                              • Read-only: You can only read the data in the mounted volumes.
                                                                              • Read/Write: You can modify the data volumes mounted to the path. Newly written data will not be migrated if the container is migrated, which may cause data loss.
                                                                              +
                                                                              • Read-only: You can only read the data in the mounted volumes.
                                                                              • Read/Write: You can modify the data volumes mounted to the path. Newly written data will not be migrated if the container is migrated, which may cause data loss.
                                                                              Table 2 Key parameters

                                                                              Parameter

                                                                              +
                                                                              - - - - - - - - - - - - - @@ -203,13 +203,13 @@ spec: image: nginx:latest volumeMounts: - name: pvc-obs-volume #Volume name, which must be the same as the volume name in the volumes field. - mountPath: /data #Location where the storage volume is mounted. + mountPath: /data # Location where the storage volume is mounted. imagePullSecrets: - name: default-secret volumes: - - name: pvc-obs-volume #Volume name, which can be customized. + - name: pvc-obs-volume # Volume name, which can be customized. persistentVolumeClaim: - claimName: pvc-obs-auto #Name of the created PVC. + claimName: pvc-obs-auto # Name of the created PVC.
                                                                            6. Run the following command to create a workload to which the OBS volume is mounted:
                                                                              kubectl apply -f web-demo.yaml

                                                                              After the workload is created, you can try Verifying Data Persistence and Sharing.

                                                                            7. @@ -271,7 +271,7 @@ static - @@ -280,14 +280,14 @@ static - - diff --git a/docs/cce/umn/cce_10_0631.html b/docs/cce/umn/cce_10_0631.html index bc665860..c9ab79e0 100644 --- a/docs/cce/umn/cce_10_0631.html +++ b/docs/cce/umn/cce_10_0631.html @@ -8,109 +8,110 @@

                                                                              OBS Mount Options

                                                                              When mounting an OBS volume, the Everest add-on presets the options described in Table 1 and Table 2 by default. The options in Table 1 are mandatory.

                                                                              -
                                                                              Table 2 Key parameters

                                                                              Parameter

                                                                              Mandatory

                                                                              +

                                                                              Mandatory

                                                                              Description

                                                                              everest.io/obs-volume-type

                                                                              +

                                                                              everest.io/obs-volume-type

                                                                              Yes

                                                                              +

                                                                              Yes

                                                                              OBS storage class.

                                                                              • If fsType is set to s3fs, STANDARD (standard bucket) and WARM (infrequent access bucket) are supported.
                                                                              • This parameter is invalid when fsType is set to obsfs.

                                                                              csi.storage.k8s.io/fstype

                                                                              +

                                                                              csi.storage.k8s.io/fstype

                                                                              Yes

                                                                              +

                                                                              Yes

                                                                              Instance type. The value can be obsfs or s3fs.

                                                                              • obsfs: Parallel file system, which is mounted using obsfs (recommended).
                                                                              • s3fs: Object bucket, which is mounted using s3fs.

                                                                              csi.storage.k8s.io/node-publish-secret-name

                                                                              +

                                                                              csi.storage.k8s.io/node-publish-secret-name

                                                                              No

                                                                              +

                                                                              No

                                                                              Custom secret name.

                                                                              (Recommended) Select this option if you want to assign different user permissions to different OBS storage devices. For details, see Using a Custom Access Key (AK/SK) to Mount an OBS Volume.

                                                                              csi.storage.k8s.io/node-publish-secret-namespace

                                                                              +

                                                                              csi.storage.k8s.io/node-publish-secret-namespace

                                                                              No

                                                                              +

                                                                              No

                                                                              Namespace of a custom secret.

                                                                              storage

                                                                              +

                                                                              storage

                                                                              Yes

                                                                              +

                                                                              Yes

                                                                              Requested capacity in the PVC, in Gi.

                                                                              -

                                                                              For OBS buckets, this field is used only for verification (cannot be empty or 0). Its value is fixed at 1, and any value you set does not take effect for OBS buckets.

                                                                              +

                                                                              For OBS, this field is used only for verification (cannot be empty or 0). Its value is fixed at 1, and any value you set does not take effect for OBS.

                                                                              storageClassName

                                                                              +

                                                                              storageClassName

                                                                              Yes

                                                                              +

                                                                              Yes

                                                                              Storage class name. The storage class name of OBS volumes is csi-obs.

                                                                              Update the access key of object storage on the CCE console.

                                                                              1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click More in the Operation column of the target PVC and select Update Access Key.
                                                                              2. Upload a key file in .csv format. For details, see Obtaining an Access Key. Click OK.
                                                                                NOTE:

                                                                                After a global access key is updated, all pods mounted with the object storage that uses this access key can be accessed only after being restarted.

                                                                                +
                                                                              1. Choose Storage in the navigation pane and click the PVCs tab. Click More in the Operation column of the target PVC and select Update Access Key.
                                                                              2. Upload a key file in .csv format. For details, see Obtaining an Access Key. Click OK.
                                                                                NOTE:

                                                                                After a global access key is updated, all pods mounted with the object storage that uses this access key can be accessed only after being restarted.

                                                                              You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time of the PVC or PV.

                                                                              1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                                              2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).
                                                                              +
                                                                              1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                                              2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).

                                                                              Viewing a YAML file

                                                                              You can view, copy, and download the YAML files of a PVC or PV.

                                                                              1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                                              2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                                                              +
                                                                              1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                                              2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                                                              Table 1 Mandatory mount options configured by default

                                                                              Parameter

                                                                              +
                                                                              - - - - - - - - - - - - - - - - - - - - - - - + + + +
                                                                              Table 1 Mandatory mount options configured by default

                                                                              Parameter

                                                                              Value

                                                                              +

                                                                              Value

                                                                              Description

                                                                              +

                                                                              Description

                                                                              use_ino

                                                                              +

                                                                              use_ino

                                                                              Leave it blank.

                                                                              +

                                                                              Blank

                                                                              If enabled, obsfs allocates the inode number. Enabled by default in read/write mode.

                                                                              +

                                                                              If enabled, obsfs allocates the inode number. Enabled by default in read/write mode.

                                                                              big_writes

                                                                              +

                                                                              big_writes

                                                                              Leave it blank.

                                                                              +

                                                                              Blank

                                                                              If configured, the maximum size of the cache can be modified.

                                                                              +

                                                                              If configured, the maximum size of the cache can be modified.

                                                                              nonempty

                                                                              +

                                                                              nonempty

                                                                              Leave it blank.

                                                                              +

                                                                              Blank

                                                                              Allows non-empty mount paths.

                                                                              +

                                                                              Allows non-empty mount paths.

                                                                              allow_other

                                                                              +

                                                                              allow_other

                                                                              Leave it blank.

                                                                              +

                                                                              Blank

                                                                              Allows other users to access the parallel file system.

                                                                              +

                                                                              Allows other users to access the parallel file system.

                                                                              no_check_certificate

                                                                              +

                                                                              no_check_certificate

                                                                              Leave it blank.

                                                                              +

                                                                              Blank

                                                                              Disables server certificate verification.

                                                                              +

                                                                              Disables server certificate verification.

                                                                              enable_noobj_cache

                                                                              +

                                                                              enable_noobj_cache

                                                                              Leave it blank.

                                                                              +

                                                                              Blank

                                                                              Enables cache entries for objects that do not exist, which can improve performance. Enabled by default in object bucket read/write mode.

                                                                              +

                                                                              Enables cache entries for objects that do not exist, which can improve performance. Enabled by default in object bucket read/write mode.

                                                                              This option is no longer configured by default since Everest 1.2.40.

                                                                              sigv2

                                                                              +

                                                                              sigv2

                                                                              Leave it blank.

                                                                              +

                                                                              Blank

                                                                              Specifies the signature version. Used by default in object buckets.

                                                                              +

                                                                              Specifies the signature version. Used by default in object buckets.

                                                                              +

                                                                              public_bucket

                                                                              +

                                                                              1

                                                                              +

                                                                              If this parameter is set to 1, public buckets are mounted anonymously. Enabled by default in object bucket read-only mode.

                                                                              -
                                                                              - - diff --git a/docs/cce/umn/cce_10_0635.html b/docs/cce/umn/cce_10_0635.html index 5ce94ecd..9b3ff2cf 100644 --- a/docs/cce/umn/cce_10_0635.html +++ b/docs/cce/umn/cce_10_0635.html @@ -3,9 +3,9 @@

                                                                              Dynamically Mounting a Local PV to a StatefulSet

                                                                              Application Scenarios

                                                                              Dynamic mounting is available only for creating a StatefulSet. It is implemented through a volume claim template (volumeClaimTemplates field) and depends on the storage class to dynamically provision PVs. In this mode, each pod in a multi-pod StatefulSet is associated with a unique PVC and PV. After a pod is rescheduled, the original data can still be mounted to it based on the PVC name. In the common mounting mode for a Deployment, if ReadWriteMany is supported, multiple pods of the Deployment will be mounted to the same underlying storage.

                                                                              -

                                                                              Prerequisites

                                                                              +

                                                                              Prerequisites

                                                                              -

                                                                              Dynamically Mounting a Local PV on the Console

                                                                              1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                              2. In the navigation pane on the left, click Workloads. In the right pane, click the StatefulSets tab.
                                                                              3. Click Create Workload in the upper right corner. On the displayed page, click Data Storage in the Container Settings area and click Add Volume to select VolumeClaimTemplate (VTC).
                                                                              4. Click Create PVC. In the dialog box displayed, configure the volume claim template parameters.

                                                                                Click Create. +

                                                                                Dynamically Mounting a Local PV on the Console

                                                                                1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                                2. In the navigation pane on the left, click Workloads. In the right pane, click the StatefulSets tab.
                                                                                3. Click Create Workload in the upper right corner. On the displayed page, click Data Storage in the Container Settings area and click Add Volume to select VolumeClaimTemplate.
                                                                                4. Click Create PVC. In the dialog box displayed, configure the volume claim template parameters.

                                                                                  Click Create.
                                                                              Table 2 Optional mount options configured by default

                                                                              Parameter

                                                                              +
                                                                              - - - - - - - - - - - - - - - - - - @@ -145,10 +146,10 @@ spec: persistentVolumeReclaimPolicy: Retain # Reclaim policy. storageClassName: csi-obs # Storage class name. mountOptions: # Mount options. - - umask=0027 + - umask=027

                                                                            8. After a PV is created, you can create a PVC and bind it to the PV, and then mount the PV to the container in the workload. For details, see Using an Existing OBS Bucket Through a Static PV.
                                                                            9. Check whether the mount options take effect.

                                                                              In this example, the PVC is mounted to the workload that uses the nginx:latest image. You can log in to the node where the pod to which the OBS volume is mounted resides and view the progress details.

                                                                              -
                                                                              Run the following command:
                                                                              • Object bucket: ps -ef | grep s3fs
                                                                                root     22142     1  0 Jun03 ?        00:00:00 /usr/bin/s3fs {your_obs_name} /mnt/paas/kubernetes/kubelet/pods/{pod_uid}/volumes/kubernetes.io~csi/{your_pv_name}/mount -o url=https://{endpoint}:443 -o endpoint={region} -o passwd_file=/opt/everest-host-connector/***_obstmpcred/{your_obs_name} -o nonempty -o big_writes -o sigv2 -o allow_other -o no_check_certificate -o ssl_verify_hostname=0 -o umask=0027 -o max_write=131072 -o multipart_size=20
                                                                                -
                                                                              • Parallel file system: ps -ef | grep obsfs
                                                                                root      1355     1  0 Jun03 ?        00:03:16 /usr/bin/obsfs {your_obs_name} /mnt/paas/kubernetes/kubelet/pods/{pod_uid}/volumes/kubernetes.io~csi/{your_pv_name}/mount -o url=https://{endpoint}:443 -o endpoint={region} -o passwd_file=/opt/everest-host-connector/***_obstmpcred/{your_obs_name} -o allow_other -o nonempty -o big_writes -o use_ino -o no_check_certificate -o ssl_verify_hostname=0 -o max_background=100 -o umask=0027 -o max_write=131072
                                                                                +
                                                                                Run the following command:
                                                                                • Object bucket: ps -ef | grep s3fs
                                                                                  root     22142     1  0 Jun03 ?        00:00:00 /usr/bin/s3fs {your_obs_name} /mnt/paas/kubernetes/kubelet/pods/{pod_uid}/volumes/kubernetes.io~csi/{your_pv_name}/mount -o url=https://{endpoint}:443 -o endpoint={region} -o passwd_file=/opt/everest-host-connector/***_obstmpcred/{your_obs_name} -o nonempty -o big_writes -o sigv2 -o allow_other -o no_check_certificate -o ssl_verify_hostname=0 -o umask=027 -o max_write=131072 -o multipart_size=20
                                                                                  +
                                                                                • Parallel file system: ps -ef | grep obsfs
                                                                                  root      1355     1  0 Jun03 ?        00:03:16 /usr/bin/obsfs {your_obs_name} /mnt/paas/kubernetes/kubelet/pods/{pod_uid}/volumes/kubernetes.io~csi/{your_pv_name}/mount -o url=https://{endpoint}:443 -o endpoint={region} -o passwd_file=/opt/everest-host-connector/***_obstmpcred/{your_obs_name} -o allow_other -o nonempty -o big_writes -o use_ino -o no_check_certificate -o ssl_verify_hostname=0 -o max_background=100 -o umask=027 -o max_write=131072

                                                                              • @@ -168,8 +169,8 @@ volumeBindingMode: Immediate mountOptions: # Mount options. - umask=0027

                                                                              • After the StorageClass is configured, you can use it to create a PVC. By default, the dynamically created PVs inherit the mount options configured in the StorageClass. For details, see Using an OBS Bucket Through a Dynamic PV.
                                                                              • Check whether the mount options take effect.

                                                                                In this example, the PVC is mounted to the workload that uses the nginx:latest image. You can log in to the node where the pod to which the OBS volume is mounted resides and view the progress details.

                                                                                -
                                                                                Run the following command:
                                                                                • Object bucket: ps -ef | grep s3fs
                                                                                  root     22142     1  0 Jun03 ?        00:00:00 /usr/bin/s3fs {your_obs_name} /mnt/paas/kubernetes/kubelet/pods/{pod_uid}/volumes/kubernetes.io~csi/{your_pv_name}/mount -o url=https://{endpoint}:443 -o endpoint={region} -o passwd_file=/opt/everest-host-connector/***_obstmpcred/{your_obs_name} -o nonempty -o big_writes -o sigv2 -o allow_other -o no_check_certificate -o ssl_verify_hostname=0 -o umask=0027 -o max_write=131072 -o multipart_size=20
                                                                                  -
                                                                                • Parallel file system: ps -ef | grep obsfs
                                                                                  root      1355     1  0 Jun03 ?        00:03:16 /usr/bin/obsfs {your_obs_name} /mnt/paas/kubernetes/kubelet/pods/{pod_uid}/volumes/kubernetes.io~csi/{your_pv_name}/mount -o url=https://{endpoint}:443 -o endpoint={region} -o passwd_file=/opt/everest-host-connector/***_obstmpcred/{your_obs_name} -o allow_other -o nonempty -o big_writes -o use_ino -o no_check_certificate -o ssl_verify_hostname=0 -o max_background=100 -o umask=0027 -o max_write=131072
                                                                                  +
                                                                                  Run the following command:
                                                                                  • Object bucket: ps -ef | grep s3fs
                                                                                    root     22142     1  0 Jun03 ?        00:00:00 /usr/bin/s3fs {your_obs_name} /mnt/paas/kubernetes/kubelet/pods/{pod_uid}/volumes/kubernetes.io~csi/{your_pv_name}/mount -o url=https://{endpoint}:443 -o endpoint={region} -o passwd_file=/opt/everest-host-connector/***_obstmpcred/{your_obs_name} -o nonempty -o big_writes -o sigv2 -o allow_other -o no_check_certificate -o ssl_verify_hostname=0 -o umask=027 -o max_write=131072 -o multipart_size=20
                                                                                    +
                                                                                  • Parallel file system: ps -ef | grep obsfs
                                                                                    root      1355     1  0 Jun03 ?        00:03:16 /usr/bin/obsfs {your_obs_name} /mnt/paas/kubernetes/kubelet/pods/{pod_uid}/volumes/kubernetes.io~csi/{your_pv_name}/mount -o url=https://{endpoint}:443 -o endpoint={region} -o passwd_file=/opt/everest-host-connector/***_obstmpcred/{your_obs_name} -o allow_other -o nonempty -o big_writes -o use_ino -o no_check_certificate -o ssl_verify_hostname=0 -o max_background=100 -o umask=027 -o max_write=131072

                                                                                • diff --git a/docs/cce/umn/cce_10_0634.html b/docs/cce/umn/cce_10_0634.html index 565ab02f..51360e09 100644 --- a/docs/cce/umn/cce_10_0634.html +++ b/docs/cce/umn/cce_10_0634.html @@ -1,11 +1,11 @@

                                                                                  Using a Local PV Through a Dynamic PV

                                                                                  -

                                                                                  Prerequisites

                                                                                  +

                                                                                  Prerequisites

                                                                                  Constraints

                                                                                  • Local PVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 2.1.23 or later. Version 2.1.23 or later is recommended.
                                                                                  • Deleting, removing, resetting, or scaling in a node will cause the PVC/PV data of the local PV associated with the node to be lost, which cannot be restored or used again. For details, see Removing a Node, Deleting a Node, Resetting a Node, and Scaling a Node. In these scenarios, the pod that uses the local PV is evicted from the node. A new pod will be created and stay in the pending state. This is because the PVC used by the pod has a node label, due to which the pod cannot be scheduled. After the node is reset, the pod may be scheduled to the reset node. In this case, the pod remains in the creating state because the underlying logical volume corresponding to the PVC does not exist.
                                                                                  • Do not manually delete the corresponding storage pool or detach data disks from the node. Otherwise, exceptions such as data loss may occur.
                                                                                  • Local PVs are in non-shared mode and cannot be mounted to multiple workloads or tasks concurrently. Additionally, local PVs cannot be mounted to multiple pods of a workload concurrently.
                                                                                  -

                                                                                  Automatically Creating a Local PV on the Console

                                                                                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                                  2. Dynamically create a PVC and PV.

                                                                                    1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters. +

                                                                                      Automatically Creating a Local PV on the Console

                                                                                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                                      2. Dynamically create a PVC and PV.

                                                                                        1. Choose Storage in the navigation pane and click the PVCs tab. Click Create PVC in the upper right corner. In the dialog box displayed, configure the PVC parameters.
                                                                            10. Table 2 Optional mount options configured by default

                                                                              Parameter

                                                                              Value

                                                                              +

                                                                              Value

                                                                              Description

                                                                              +

                                                                              Description

                                                                              max_write

                                                                              +

                                                                              max_write

                                                                              131072

                                                                              +

                                                                              131072

                                                                              This parameter is valid only when big_writes is configured. The recommended value is 128 KB.

                                                                              +

                                                                              This parameter is valid only when big_writes is configured. The recommended value is 128 KB.

                                                                              ssl_verify_hostname

                                                                              +

                                                                              ssl_verify_hostname

                                                                              0

                                                                              +

                                                                              0

                                                                              Disables SSL certificate verification based on the host name.

                                                                              +

                                                                              Disables SSL certificate verification based on the host name.

                                                                              max_background

                                                                              +

                                                                              max_background

                                                                              100

                                                                              +

                                                                              100

                                                                              Allows setting the maximum number of waiting requests in the background. Used by default in parallel file systems.

                                                                              +

                                                                              Allows setting the maximum number of waiting requests in the background. Used by default in parallel file systems.

                                                                              public_bucket

                                                                              +

                                                                              umask

                                                                              1

                                                                              +

                                                                              A three-digit octal number

                                                                              If set to 1, public buckets are mounted anonymously. Enabled by default in object bucket read/write mode.

                                                                              -

                                                                              umask

                                                                              -

                                                                              Leave it blank.

                                                                              -

                                                                              Mask of the configuration file permission.

                                                                              +

                                                                              Mask of the configuration file permission.

                                                                              +

                                                                              For example, if the umask value is 022, the directory permission (the maximum permission is 777) is 755 (777 - 022 = 755, rwxr-xr-x).

                                                                              Parameter

                                                                              Description

                                                                              @@ -50,7 +50,7 @@
                                                                              -
                                                                            11. Click Create to create a PVC and a PV.

                                                                              You can choose Storage in the navigation pane and view the created PVC and PV on the PersistentVolumeClaims (PVCs) and PersistentVolumes (PVs) tab pages, respectively.

                                                                              +
                                                                            12. Click Create to create a PVC and a PV.

                                                                              You can choose Storage in the navigation pane and view the created PVC and PV on the PVCs and PVs tab pages, respectively.

                                                                              The volume binding mode of the local storage class (named csi-local-topology) is late binding (that is, the value of volumeBindingMode is WaitForFirstConsumer). In this mode, PV creation and binding are delayed. The corresponding PV is created and bound only when the PVC is used during workload creation.

                                                                            13. @@ -95,14 +95,14 @@

                                                                              -

                                                                              (kubectl) Automatically Creating a Local PV

                                                                              1. Use kubectl to connect to the cluster.
                                                                              2. Use StorageClass to dynamically create a PVC and PV.

                                                                                1. Create the pvc-local.yaml file.
                                                                                  apiVersion: v1
                                                                                  +

                                                                                  (kubectl) Automatically Creating a Local PV

                                                                                  1. Use kubectl to access the cluster.
                                                                                  2. Use StorageClass to dynamically create a PVC and PV.

                                                                                    1. Create the pvc-local.yaml file.
                                                                                      apiVersion: v1
                                                                                       kind: PersistentVolumeClaim
                                                                                       metadata:
                                                                                         name: pvc-local
                                                                                         namespace: default
                                                                                       spec:
                                                                                         accessModes:
                                                                                      -    - ReadWriteOnce             # The local PV must adopt ReadWriteOnce.
                                                                                      +    - ReadWriteOnce             # The value must be ReadWriteOnce for local PVs.
                                                                                         resources:
                                                                                           requests:
                                                                                             storage: 10Gi             # Size of the local PV.
                                                                                      @@ -225,14 +225,14 @@ spec:
                                                                                       
                                                                                       

                                                                              You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time of the PVC or PV.

                                                                              1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                                              2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).
                                                                              +
                                                                              1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                                              2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).

                                                                              Viewing a YAML file

                                                                              You can view, copy, and download the YAML files of a PVC or PV.

                                                                              1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                                              2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                                                              +
                                                                              1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                                              2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                                                              - - diff --git a/docs/cce/umn/cce_10_0637.html b/docs/cce/umn/cce_10_0637.html index c8152644..2ad0a75b 100644 --- a/docs/cce/umn/cce_10_0637.html +++ b/docs/cce/umn/cce_10_0637.html @@ -3,7 +3,7 @@

                                                                              Overview

                                                                              Introduction

                                                                              Some applications require additional storage, but whether the data is still available after a restart is not important. For example, although cache services are limited by memory size, cache services can move infrequently used data to storage slower than memory. As a result, overall performance is not impacted significantly. Other applications require read-only data injected as files, such as configuration data or secrets.

                                                                              Ephemeral volumes (EVs) in Kubernetes are designed for the above scenario. EVs are created and deleted together with pods following the pod lifecycle.

                                                                              -
                                                                              Common EVs in Kubernetes:
                                                                              • emptyDir: empty at pod startup, with storage coming locally from the kubelet base directory (usually the root disk) or memory. emptyDir is allocated from the EV of the node. If data from other sources (such as log files or image tiering data) occupies the temporary storage, the storage capacity may be insufficient.
                                                                              • ConfigMap: Kubernetes data of the ConfigMap type is mounted to pods as data volumes.
                                                                              • Secret: Kubernetes data of the Secret type is mounted to pods as data volumes.
                                                                              +
                                                                              Common EVs in Kubernetes:
                                                                              • emptyDir: empty at pod startup, with storage coming locally from the kubelet base directory (usually the root disk) or memory. emptyDir is allocated from the EV of the node. If data from other sources (such as log files or image tiering data) occupies the ephemeral storage, the storage capacity may be insufficient.
                                                                              • ConfigMap: Kubernetes data of the ConfigMap type is mounted to pods as data volumes.
                                                                              • Secret: Kubernetes data of the Secret type is mounted to pods as data volumes.

                                                                              emptyDir Types

                                                                              CCE provides the following emptyDir types:
                                                                              • Using a Temporary Path: Kubernetes-native emptyDir type. Its lifecycle is the same as that of a pod. Memory can be specified as the storage medium. When the pod is deleted, the emptyDir volume is deleted and its data is lost.
                                                                              • Using a Local EV: Local data disks in a node form a storage pool (VolumeGroup) through LVM. LVs are created as the storage medium of emptyDir and mounted to containers. LVs deliver better performance than the default storage medium of emptyDir.
                                                                              diff --git a/docs/cce/umn/cce_10_0638.html b/docs/cce/umn/cce_10_0638.html index 138af0fc..8e6582f3 100644 --- a/docs/cce/umn/cce_10_0638.html +++ b/docs/cce/umn/cce_10_0638.html @@ -2,7 +2,7 @@

                                                                              Using a Temporary Path

                                                                              A temporary path is of the Kubernetes-native emptyDir type. Its lifecycle is the same as that of a pod. Memory can be specified as the storage medium. When the pod is deleted, the emptyDir volume is deleted and its data is lost.

                                                                              -

                                                                              Using the Console to Use a Temporary Path

                                                                              1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                              2. In the navigation pane on the left, click Workloads. In the right pane, click the Deployments tab.
                                                                              3. Click Create Workload in the upper right corner of the page. In the Container Settings area, click the Data Storage tab and click Add Volume > emptyDir.
                                                                              4. Mount and use storage volumes, as shown in Table 1. For details about other parameters, see Workloads.

                                                                                +

                                                                                Using the Console to Use a Temporary Path

                                                                                1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                                2. In the navigation pane on the left, click Workloads. In the right pane, click the Deployments tab.
                                                                                3. Click Create Workload in the upper right corner of the page. In the Container Settings area, click the Data Storage tab and click Add Volume > EmptyDir.
                                                                                4. Mount and use storage volumes, as shown in Table 1. For details about other parameters, see Workloads.

                                                                              Parameter

                                                                              Description

                                                                              @@ -84,7 +84,7 @@

                                                                            14. Dynamically mount and use storage volumes. For details about other parameters, see Creating a StatefulSet. After the configuration, click Create Workload.

                                                                              After the workload is created, the data in the container mount directory will be persistently stored. Verify the storage by referring to Verifying Data Persistence.

                                                                            15. -

                                                                              Dynamically Mounting a Local PV Using kubectl

                                                                              1. Use kubectl to connect to the cluster.
                                                                              2. Create a file named statefulset-local.yaml. In this example, the local PV is mounted to the /data path.

                                                                                apiVersion: apps/v1
                                                                                +

                                                                                Dynamically Mounting a Local PV Using kubectl

                                                                                1. Use kubectl to access the cluster.
                                                                                2. Create a file named statefulset-local.yaml. In this example, the local PV is mounted to the /data path.

                                                                                  apiVersion: apps/v1
                                                                                   kind: StatefulSet
                                                                                   metadata:
                                                                                     name: statefulset-local
                                                                                  @@ -116,7 +116,7 @@ spec:
                                                                                           namespace: default
                                                                                         spec:
                                                                                           accessModes:
                                                                                  -          - ReadWriteOnce               # The local PV must adopt ReadWriteOnce.
                                                                                  +          - ReadWriteOnce               # The value must be ReadWriteOnce for local PVs.
                                                                                           resources:
                                                                                             requests:
                                                                                               storage: 10Gi               # Storage volume capacity.
                                                                                  @@ -209,14 +209,14 @@ statefulset-local-1          1/1     Running   0             28s

                                                                              You can view event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time of the PVC or PV.

                                                                              1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                                              2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).
                                                                              +
                                                                              1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                                              2. Click View Events in the Operation column of the target PVC or PV to view events generated within one hour (event data is retained for one hour).

                                                                              Viewing a YAML file

                                                                              You can view, copy, and download the YAML files of a PVC or PV.

                                                                              1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) or PersistentVolumes (PVs) tab.
                                                                              2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                                                              +
                                                                              1. Choose Storage in the navigation pane and click the PVCs or PVs tab.
                                                                              2. Click View YAML in the Operation column of the target PVC or PV to view or download the YAML.
                                                                              Table 1 Mounting an EV

                                                                              Parameter

                                                                              Description

                                                                              diff --git a/docs/cce/umn/cce_10_0652.html b/docs/cce/umn/cce_10_0652.html index 347c27cc..0386fc47 100644 --- a/docs/cce/umn/cce_10_0652.html +++ b/docs/cce/umn/cce_10_0652.html @@ -4,578 +4,691 @@

                                                                              Constraints

                                                                              The default node pool DefaultPool does not support the following management operations.

                                                                              Configuration Management

                                                                              CCE allows you to highly customize Kubernetes parameter settings on core components in a cluster. For more information, see kubelet.

                                                                              -

                                                                              This function is supported only in clusters of v1.15 and later. It is not displayed for clusters earlier than v1.15.

                                                                              -
                                                                              1. Log in to the CCE console.
                                                                              2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
                                                                              3. Click Manage in the Operation column of the target node pool
                                                                              4. On the Manage Components page on the right, change the values of the following Kubernetes parameters:

                                                                                -

                                                                                Table 1 kubelet

                                                                                Parameter

                                                                                +

                                                                                This function is supported only in clusters of v1.15 and later. It is not displayed for versions earlier than v1.15.

                                                                                +
                                                                                1. Log in to the CCE console.
                                                                                2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
                                                                                3. Click Manage in the Operation column of the target node pool
                                                                                4. On the Manage Components page on the right, change the values of Kubernetes parameters.

                                                                                  +

                                                                                  - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - + - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                                                                                  Table 1 kubelet

                                                                                  Item

                                                                                  Description

                                                                                  +

                                                                                  Parameter

                                                                                  Default Value

                                                                                  +

                                                                                  Description

                                                                                  Modification

                                                                                  +

                                                                                  Value

                                                                                  Remarks

                                                                                  +

                                                                                  Modification

                                                                                  cpu-manager-policy

                                                                                  +

                                                                                  CPU management policy

                                                                                  CPU management policy configuration. For details, see CPU Scheduling.

                                                                                  +

                                                                                  cpu-manager-policy

                                                                                  +

                                                                                  CPU management policy configuration. For details, see CPU Scheduling.

                                                                                  • none: disables pods from exclusively occupying CPUs. Select this value if you want a large pool of shareable CPU cores.
                                                                                  • static: enables pods to exclusively occupy CPUs. Select this value if your workload is sensitive to latency in CPU cache and scheduling.
                                                                                  • enhanced-static: allows burstable pods to preferentially use CPU cores. Select this value if your workload has huge peak-trough difference and is in the trough state most of the time.

                                                                                  none

                                                                                  +

                                                                                  Default: none

                                                                                  None

                                                                                  -

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  kube-api-qps

                                                                                  +

                                                                                  QPS for requests to kube-apiserver

                                                                                  Query per second (QPS) for communicating with kube-apiserver.

                                                                                  +

                                                                                  kube-api-qps

                                                                                  100

                                                                                  +

                                                                                  Number of queries per second for communication with the API server.

                                                                                  None

                                                                                  +

                                                                                  Default: 100

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  kube-api-burst

                                                                                  +

                                                                                  Burst for requests to kube-apiserver

                                                                                  Burst to use while talking with kube-apiserver.

                                                                                  +

                                                                                  kube-api-burst

                                                                                  100

                                                                                  +

                                                                                  Maximum number of burst requests sent to the API server per second.

                                                                                  None

                                                                                  +

                                                                                  Default: 100

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  max-pods

                                                                                  +

                                                                                  Limit on the pods managed by kubelet

                                                                                  Maximum number of pods managed by kubelet.

                                                                                  +

                                                                                  max-pods

                                                                                  +

                                                                                  Maximum number of pods that can run on a node.

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  pod-pids-limit

                                                                                  +

                                                                                  Limited number of processes in a pod

                                                                                  Limited number of PIDs in Kubernetes

                                                                                  +

                                                                                  pod-pids-limit

                                                                                  -1

                                                                                  +

                                                                                  Maximum number of PIDs that can be used in each pod.

                                                                                  None

                                                                                  +

                                                                                  Default: -1, which indicates that the number of PIDs is not limited

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  with-local-dns

                                                                                  +

                                                                                  Whether to use a local IP address as a node's ClusterDNS

                                                                                  Whether to use the local IP address as the ClusterDNS of the node.

                                                                                  +

                                                                                  with-local-dns

                                                                                  false

                                                                                  +

                                                                                  The default ENI IP address of the node will be automatically added to the node's kubelet configuration as the preferred DNS address.

                                                                                  None

                                                                                  +

                                                                                  Default: false

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  event-qps

                                                                                  +

                                                                                  QPS limit on creating events

                                                                                  QPS limit for event creation

                                                                                  +

                                                                                  event-qps

                                                                                  5

                                                                                  +

                                                                                  Number of events that can be generated per second.

                                                                                  None

                                                                                  +

                                                                                  Default: 5

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  allowed-unsafe-sysctls

                                                                                  +

                                                                                  Allowed unsafe sysctls

                                                                                  Insecure system configuration allowed.

                                                                                  +

                                                                                  allowed-unsafe-sysctls

                                                                                  +

                                                                                  Insecure system configuration allowed.

                                                                                  Starting from v1.17.17, CCE enables pod security policies for kube-apiserver. Add corresponding configurations to allowedUnsafeSysctls of a pod security policy to make the policy take effect. (This configuration is not required for clusters earlier than v1.17.17.) For details, see Example of Enabling Unsafe Sysctls in Pod Security Policy.

                                                                                  []

                                                                                  +

                                                                                  Default: []

                                                                                  None

                                                                                  -

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  over-subscription-resource

                                                                                  +

                                                                                  Node oversubscription

                                                                                  Whether to enable node oversubscription.

                                                                                  -

                                                                                  If this parameter is set to true, node oversubscription is enabled. For details, see Dynamic Resource Oversubscription.

                                                                                  +

                                                                                  over-subscription-resource

                                                                                  false

                                                                                  +

                                                                                  Whether to enable node oversubscription.

                                                                                  +

                                                                                  If this parameter is set to true, node oversubscription is enabled on nodes. For details, see Dynamic Resource Oversubscription.

                                                                                  None

                                                                                  +
                                                                                  • For clusters of versions earlier than v1.23.9-r0 or v1.25.4-r0: enabled (true) by default
                                                                                  • Disabled by default if the cluster version is v1.23.9-r0, v1.25.4-r0, v1.27-r0, or later

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  colocation

                                                                                  +

                                                                                  Hybrid deployment

                                                                                  Whether to enable hybrid deployment on nodes.

                                                                                  +

                                                                                  colocation

                                                                                  +

                                                                                  Whether to enable hybrid deployment on nodes.

                                                                                  If this parameter is set to true, hybrid deployment is enabled on nodes. For details, see Dynamic Resource Oversubscription.

                                                                                  false

                                                                                  +
                                                                                  • For clusters of versions earlier than v1.23.9-r0 or v1.25.4-r0: enabled (true) by default
                                                                                  • Disabled by default if the cluster version is v1.23.9-r0, v1.25.4-r0, v1.27-r0, or later

                                                                                  None

                                                                                  -

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  kube-reserved-mem

                                                                                  -

                                                                                  system-reserved-mem

                                                                                  +

                                                                                  Topology management policy

                                                                                  Reserved node memory.

                                                                                  +

                                                                                  topology-manager-policy

                                                                                  Depends on node specifications. For details, see Node Resource Reservation Policy.

                                                                                  -

                                                                                  None

                                                                                  -

                                                                                  The sum of kube-reserved-mem and system-reserved-mem is less than half of the memory.

                                                                                  -

                                                                                  topology-manager-policy

                                                                                  -

                                                                                  Set the topology management policy.

                                                                                  +

                                                                                  Set the topology management policy.

                                                                                  Valid values are as follows:

                                                                                  • restricted: kubelet accepts only pods that achieve optimal NUMA alignment on the requested resources.
                                                                                  • best-effort: kubelet preferentially selects pods that implement NUMA alignment on CPU and device resources.
                                                                                  • none (default): The topology management policy is disabled.
                                                                                  • single-numa-node: kubelet allows only pods that are aligned to the same NUMA node in terms of CPU and device resources.

                                                                                  none

                                                                                  +

                                                                                  Default: none

                                                                                  None

                                                                                  -
                                                                                  NOTICE:

                                                                                  Modifying topology-manager-policy and topology-manager-scope will restart kubelet, and the resource allocation of pods will be recalculated based on the modified policy. In this case, running pods may restart or even fail to receive any resources.

                                                                                  +
                                                                                  NOTICE:

                                                                                  Modifying topology-manager-policy and topology-manager-scope will restart kubelet, and the resource allocation of pods will be recalculated based on the modified policy. In this case, running pods may restart or even fail to receive any resources.

                                                                                  topology-manager-scope

                                                                                  +

                                                                                  Topology management scope

                                                                                  Set the resource alignment granularity of the topology management policy. Valid values are as follows:

                                                                                  +

                                                                                  topology-manager-scope

                                                                                  +

                                                                                  Configure the resource alignment granularity of the topology management policy. Valid values are as follows:

                                                                                  • container (default)
                                                                                  • pod

                                                                                  container

                                                                                  +

                                                                                  Default: container

                                                                                  resolv-conf

                                                                                  +

                                                                                  Specified DNS configuration file

                                                                                  DNS resolution configuration file specified by the container

                                                                                  +

                                                                                  resolv-conf

                                                                                  Null

                                                                                  +

                                                                                  DNS resolution configuration file specified by the container

                                                                                  None

                                                                                  +

                                                                                  Default: null

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  runtime-request-timeout

                                                                                  +

                                                                                  Timeout for all runtime requests except long-running requests

                                                                                  Timeout interval of all runtime requests except long-running requests (pull, logs, exec, and attach).

                                                                                  +

                                                                                  runtime-request-timeout

                                                                                  2m0s

                                                                                  +

                                                                                  Timeout interval of all runtime requests except long-running requests (pull, logs, exec, and attach).

                                                                                  None

                                                                                  +

                                                                                  Default: 2m0s

                                                                                  This parameter is available only in clusters v1.21.10-r0, v1.23.8-r0, v1.25.3-r0 and later versions.

                                                                                  +

                                                                                  This parameter is available only in clusters of v1.21.10-r0, v1.23.8-r0, v1.25.3-r0, or later versions.

                                                                                  registry-pull-qps

                                                                                  +

                                                                                  Whether to allow kubelet to pull only one image at a time

                                                                                  Maximum number of image pulls per second.

                                                                                  +

                                                                                  serialize-image-pulls

                                                                                  5

                                                                                  +

                                                                                  Pull an image in serial mode.

                                                                                  +
                                                                                  • false: recommended configuration so that an image can be pulled in parallel mode to improve pod startup.
                                                                                  • true: allows images to be pulled in serial mode.

                                                                                  The value ranges from 1 to 50.

                                                                                  +
                                                                                  • Enabled by default if the cluster version is earlier than v1.21.12-r0, v1.23.11-r0, v1.27.3-r0 or v1.25.6-r0
                                                                                  • Disabled by default if the cluster version is v1.21.12-r0, v1.23.11-r0, v1.25.6-r0, v1.27.3-r0, or later

                                                                                  This parameter is available only in clusters v1.21.10-r0, v1.23.8-r0, v1.25.3-r0 and later versions.

                                                                                  +

                                                                                  This parameter is available only in clusters of v1.21.10-r0, v1.23.8-r0, v1.25.3-r0, or later versions.

                                                                                  registry-burst

                                                                                  +

                                                                                  Image repository pull limit per second

                                                                                  Maximum number of burst image pulls.

                                                                                  +

                                                                                  registry-pull-qps

                                                                                  10

                                                                                  +

                                                                                  QPS upper limit of an image repository.

                                                                                  The value ranges from 1 to 100 and must be greater than or equal to the value of registry-pull-qps.

                                                                                  +

                                                                                  Default: 5

                                                                                  +

                                                                                  The value ranges from 1 to 50.

                                                                                  This parameter is available only in clusters v1.21.10-r0, v1.23.8-r0, v1.25.3-r0 and later versions.

                                                                                  +

                                                                                  This parameter is available only in clusters of v1.21.10-r0, v1.23.8-r0, v1.25.3-r0, or later versions.

                                                                                  serialize-image-pulls

                                                                                  +

                                                                                  Upper limit of burst image pull

                                                                                  When this function is enabled, kubelet is notified to pull only one image at a time.

                                                                                  +

                                                                                  registry-burst

                                                                                  true

                                                                                  +

                                                                                  Maximum number of burst image pulls.

                                                                                  None

                                                                                  +

                                                                                  Default: 10

                                                                                  +

                                                                                  The value ranges from 1 to 100 and must be greater than or equal to the value of registry-pull-qps.

                                                                                  This parameter is available only in clusters v1.21.10-r0, v1.23.8-r0, v1.25.3-r0 and later versions.

                                                                                  +

                                                                                  This parameter is available only in clusters of v1.21.10-r0, v1.23.8-r0, v1.25.3-r0, or later versions.

                                                                                  evictionHard: memory.available

                                                                                  +

                                                                                  Node memory reservation

                                                                                  A hard eviction signal. The threshold is memory.available.

                                                                                  +

                                                                                  system-reserved-mem

                                                                                  The value is fixed at 100 MiB.

                                                                                  +

                                                                                  System memory reservation reserves memory resources for OS system daemons such as sshd and udev.

                                                                                  None

                                                                                  +

                                                                                  Default value: automatically calculated, which varies depending on node flavors. For details, see Node Resource Reservation Policy.

                                                                                  For details, see Node-pressure Eviction.

                                                                                  -
                                                                                  NOTICE:

                                                                                  Exercise caution when modifying the eviction threshold configuration. Improper configuration may cause pods to be frequently evicted or fail to be evicted when the node is overloaded.

                                                                                  +

                                                                                  The sum of kube-reserved-mem and system-reserved-mem must be less than 50% of the minimum memory of nodes in the node pool.

                                                                                  +

                                                                                  +

                                                                                  kube-reserved-mem

                                                                                  +

                                                                                  Kubernetes memory reservation reserves memory resources for Kubernetes daemons such kubelet and container runtime.

                                                                                  +

                                                                                  Hard eviction

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  memory.available

                                                                                  +

                                                                                  Available memory on a node.

                                                                                  +

                                                                                  The value is fixed at 100 MiB.

                                                                                  +

                                                                                  For details, see Node-pressure Eviction.

                                                                                  +
                                                                                  NOTICE:

                                                                                  Exercise caution when modifying an eviction configuration item. Improper configuration may cause pods to be frequently evicted or fail to be evicted when the node is overloaded.

                                                                                  -

                                                                                  nodefs and imagefs correspond to the file system partitions used by kubelet and container engines, respectively.

                                                                                  +

                                                                                  kubelet can identify the following specific file system identifiers:

                                                                                  +
                                                                                  • nodefs: main file system of a node. It is used for local disk volumes, emptyDir volumes that are not supported by memory, and log storage. For example, nodefs contains /var/lib/kubelet/.
                                                                                  • imagefs: file system partition used by a container engine.
                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  +

                                                                                  evictionHard: nodefs.available

                                                                                  +

                                                                                  nodefs.available

                                                                                  A hard eviction signal. The threshold is nodefs.available.

                                                                                  +

                                                                                  Percentage of the available capacity in the filesystem used by kubelet.

                                                                                  10%

                                                                                  -

                                                                                  The value ranges from 1% to 99%.

                                                                                  +

                                                                                  Default: 10%

                                                                                  +

                                                                                  Value range: 1% to 99%

                                                                                  evictionHard: nodefs.inodesFree

                                                                                  +

                                                                                  nodefs.inodesFree

                                                                                  A hard eviction signal. The threshold is nodefs.inodesFree.

                                                                                  +

                                                                                  Percentage of available inodes in the filesystem used by kubelet.

                                                                                  5%

                                                                                  -

                                                                                  The value ranges from 1% to 99%.

                                                                                  +

                                                                                  Default: 5%

                                                                                  +

                                                                                  Value range: 1% to 99%

                                                                                  evictionHard: imagefs.available

                                                                                  +

                                                                                  imagefs.available

                                                                                  A hard eviction signal. The threshold is imagefs.available.

                                                                                  +

                                                                                  Percentage of the available capacity in the filesystem used by container runtimes to store resources such as images.

                                                                                  10%

                                                                                  -

                                                                                  The value ranges from 1% to 99%.

                                                                                  +

                                                                                  Default: 10%

                                                                                  +

                                                                                  Value range: 1% to 99%

                                                                                  evictionHard: imagefs.inodesFree

                                                                                  +

                                                                                  imagefs.inodesFree

                                                                                  A hard eviction signal. The threshold is imagefs.inodesFree.

                                                                                  +

                                                                                  Percentage of available inodes in the filesystem used by container runtimes to store resources such as images.

                                                                                  This parameter is left blank by default.

                                                                                  -

                                                                                  The value ranges from 1% to 99%.

                                                                                  +

                                                                                  Value range: 1% to 99%

                                                                                  evictionHard: pid.available

                                                                                  +

                                                                                  pid.available

                                                                                  A hard eviction signal. The threshold is pid.available.

                                                                                  +

                                                                                  Percentage of allocatable PIDs reserved for pods.

                                                                                  10%

                                                                                  -

                                                                                  The value ranges from 1% to 99%.

                                                                                  +

                                                                                  Default: 10%

                                                                                  +

                                                                                  Value range: 1% to 99%

                                                                                  evictionSoft: memory.available

                                                                                  +

                                                                                  Soft eviction

                                                                                  +

                                                                                  +

                                                                                  A soft eviction signal. The threshold is memory.available.

                                                                                  +

                                                                                  memory.available

                                                                                  This parameter is left blank by default.

                                                                                  +

                                                                                  Available memory on a node.

                                                                                  +

                                                                                  The value must be greater than the hard eviction value of the same parameter, and the eviction grace period (evictionSoftGracePeriod) must be configured accordingly.

                                                                                  The value ranges from 100 MiB to 1,000,000 MiB. Configure evictionSoftGracePeriod of the corresponding eviction signal to configure the eviction grace period. This value must be greater than the threshold of the corresponding hard eviction signal.

                                                                                  +

                                                                                  This parameter is left blank by default.

                                                                                  +

                                                                                  Value range: 100 to 1000000

                                                                                  evictionSoft: nodefs.available

                                                                                  +

                                                                                  nodefs.available

                                                                                  A soft eviction signal. The threshold is nodefs.available.

                                                                                  +

                                                                                  Percentage of the available capacity in the filesystem used by kubelet.

                                                                                  +

                                                                                  The value must be greater than the hard eviction value of the same parameter, and the eviction grace period (evictionSoftGracePeriod) must be configured accordingly.

                                                                                  This parameter is left blank by default.

                                                                                  -

                                                                                  The value ranges from 1% to 99%. Configure evictionSoftGracePeriod of the corresponding eviction signal to configure the eviction grace period. This value must be greater than the threshold of the corresponding hard eviction signal.

                                                                                  +

                                                                                  This parameter is left blank by default.

                                                                                  +

                                                                                  Value range: 1% to 99%

                                                                                  evictionSoft: nodefs.inodesFree

                                                                                  +

                                                                                  nodefs.inodesFree

                                                                                  A soft eviction signal. The threshold is nodefs.inodesFree.

                                                                                  +

                                                                                  Percentage of available inodes in the filesystem used by kubelet.

                                                                                  +

                                                                                  The value must be greater than the hard eviction value of the same parameter, and the eviction grace period (evictionSoftGracePeriod) must be configured accordingly.

                                                                                  This parameter is left blank by default.

                                                                                  -

                                                                                  The value ranges from 1% to 99%. Configure evictionSoftGracePeriod of the corresponding eviction signal to configure the eviction grace period. This value must be greater than the threshold of the corresponding hard eviction signal.

                                                                                  +

                                                                                  Value range: 1% to 99%

                                                                                  evictionSoft: imagefs.available

                                                                                  +

                                                                                  imagefs.available

                                                                                  A soft eviction signal. The threshold is imagefs.available.

                                                                                  +

                                                                                  Percentage of the available capacity in the filesystem used by container runtimes to store resources such as images.

                                                                                  +

                                                                                  The value must be greater than the hard eviction value of the same parameter, and the eviction grace period (evictionSoftGracePeriod) must be configured accordingly.

                                                                                  This parameter is left blank by default.

                                                                                  -

                                                                                  The value ranges from 1% to 99%. Configure evictionSoftGracePeriod of the corresponding eviction signal to configure the eviction grace period. This value must be greater than the threshold of the corresponding hard eviction signal.

                                                                                  +

                                                                                  Value range: 1% to 99%

                                                                                  evictionSoft: imagefs.inodesFree

                                                                                  +

                                                                                  imagefs.inodesFree

                                                                                  A soft eviction signal. The threshold is imagefs.inodesFree.

                                                                                  +

                                                                                  Percentage of available inodes in the filesystem used by container runtimes to store resources such as images.

                                                                                  +

                                                                                  The value must be greater than the hard eviction value of the same parameter, and the eviction grace period (evictionSoftGracePeriod) must be configured accordingly.

                                                                                  This parameter is left blank by default.

                                                                                  -

                                                                                  The value ranges from 1% to 99%. Configure evictionSoftGracePeriod of the corresponding eviction signal to configure the eviction grace period. This value must be greater than the threshold of the corresponding hard eviction signal.

                                                                                  +

                                                                                  This parameter is left blank by default.

                                                                                  +

                                                                                  Value range: 1% to 99%

                                                                                  evictionSoft: pid.available

                                                                                  +

                                                                                  pid.available

                                                                                  A soft eviction signal. The threshold is pid.available.

                                                                                  +

                                                                                  Percentage of allocatable PIDs reserved for pods.

                                                                                  +

                                                                                  The value must be greater than the hard eviction value of the same parameter, and the eviction grace period (evictionSoftGracePeriod) must be configured accordingly.

                                                                                  This parameter is left blank by default.

                                                                                  -

                                                                                  The value ranges from 1% to 99%. Configure evictionSoftGracePeriod of the corresponding eviction signal to configure the eviction grace period. This value must be greater than the threshold of the corresponding hard eviction signal.

                                                                                  +

                                                                                  Value range: 1% to 99%

                                                                                  -
                                                                                  Table 2 kube-proxy

                                                                                  Parameter

                                                                                  +
                                                                                  - - - + - - + - - - - + - -
                                                                                  Table 2 kube-proxy

                                                                                  Item

                                                                                  Description

                                                                                  +

                                                                                  Parameter

                                                                                  Default Value

                                                                                  +

                                                                                  Description

                                                                                  Modification

                                                                                  +

                                                                                  Value

                                                                                  +

                                                                                  Modification

                                                                                  conntrack-min

                                                                                  +

                                                                                  Maximum number of connection tracking entries

                                                                                  Maximum number of connection tracking entries

                                                                                  +

                                                                                  conntrack-min

                                                                                  +

                                                                                  Maximum number of connection tracking entries

                                                                                  To obtain the value, run the following command:

                                                                                  sysctl -w net.nf_conntrack_max

                                                                                  131072

                                                                                  +

                                                                                  Default: 131072

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  conntrack-tcp-timeout-close-wait

                                                                                  +

                                                                                  Wait time of a closed TCP connection

                                                                                  Wait time of a closed TCP connection

                                                                                  +

                                                                                  conntrack-tcp-timeout-close-wait

                                                                                  +

                                                                                  Wait time of a closed TCP connection

                                                                                  To obtain the value, run the following command:

                                                                                  sysctl -w net.netfilter.nf_conntrack_tcp_timeout_close_wait

                                                                                  1h0m0s

                                                                                  +

                                                                                  Default: 1h0m0s

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  -
                                                                                  Table 3 Network components (available only for CCE Turbo clusters)

                                                                                  Parameter

                                                                                  +
                                                                                  - - - + - - - - + + + + + + + - - - - + - - - - + - - - - + - - - - +
                                                                                  Table 3 Network components (available only for CCE Turbo clusters)

                                                                                  Item

                                                                                  Description

                                                                                  +

                                                                                  Parameter

                                                                                  Default Value

                                                                                  +

                                                                                  Description

                                                                                  Modification

                                                                                  +

                                                                                  Value

                                                                                  +

                                                                                  Modification

                                                                                  nic-threshold

                                                                                  +

                                                                                  Node pool ENI pre-binding

                                                                                  Low threshold of the number of bound ENIs: High threshold of the number of bound ENIs

                                                                                  +

                                                                                  enable-node-nic-configuration

                                                                                  0:0

                                                                                  +

                                                                                  Whether to enable ENI pre-binding in a node pool.

                                                                                  NOTE:

                                                                                  This parameter is being discarded. Use the dynamic pre-binding parameters of the other four ENIs.

                                                                                  +

                                                                                  Default: false

                                                                                  +

                                                                                  After network component configuration is disabled in a node pool, the dynamic container ENI pre-binding parameter settings of the node pool are the same as those of cluster-level parameter settings.

                                                                                  +

                                                                                  ENI threshold

                                                                                  +

                                                                                  nic-threshold

                                                                                  +

                                                                                  Low threshold of the number of bound ENIs: High threshold of the number of bound ENIs

                                                                                  +

                                                                                  Default: 0:0

                                                                                  +
                                                                                  NOTE:

                                                                                  This parameter is being discarded. Use the dynamic pre-binding parameters of the other four ENIs.

                                                                                  nic-minimum-target

                                                                                  +

                                                                                  Minimum number of ENIs bound to a node in a node pool

                                                                                  Minimum number of ENIs bound to the nodes in the node pool

                                                                                  +

                                                                                  nic-minimum-target

                                                                                  10

                                                                                  +

                                                                                  Minimum number of container ENIs bound to a node.

                                                                                  +

                                                                                  The parameter value must be a positive integer. The value 10 indicates that at least 10 container ENIs must be bound to a node. If the number you specified exceeds the container ENI quota of the node, the ENI quota will be used.

                                                                                  None

                                                                                  +

                                                                                  Default: 10

                                                                                  +

                                                                                  Configure these parameters based on the number of pods typically running on most nodes.

                                                                                  nic-maximum-target

                                                                                  +

                                                                                  Maximum number of ENIs pre-bound to a node in a node pool

                                                                                  Maximum number of ENIs pre-bound to a node at the node pool level

                                                                                  +

                                                                                  nic-maximum-target

                                                                                  0

                                                                                  +

                                                                                  After the number of ENIs bound to a node exceeds the nic-maximum-target value, CCE will not proactively pre-bind ENIs.

                                                                                  +

                                                                                  Checking the upper limit of pre-bound container ENIs is enabled only when the value of this parameter is greater than or equal to the minimum number of container ENIs (nic-minimum-target) bound to a node.

                                                                                  +

                                                                                  The parameter value must be a positive integer. The value 0 indicates that checking the upper limit of pre-bound container ENIs is disabled. If the number you specified exceeds the container ENI quota of the node, the ENI quota will be used.

                                                                                  None

                                                                                  +

                                                                                  Default: 0

                                                                                  +

                                                                                  Configure these parameters based on the maximum number of pods running on most nodes.

                                                                                  nic-warm-target

                                                                                  +

                                                                                  Number of ENIs dynamically pre-bound to a node in a node pool

                                                                                  Number of ENIs pre-bound to a node at the node pool level

                                                                                  +

                                                                                  nic-warm-target

                                                                                  2

                                                                                  +

                                                                                  Extra ENIs will be pre-bound after the nic-minimum-target is used up in a pod. The value can only be a number.

                                                                                  +

                                                                                  When the sum of the nic-warm-target value and the current number of ENIs bound to the node is greater than the nic-maximum-target value, CCE will pre-bind on the number of ENIs specified by the difference between the nic-maximum-target value and the current number of ENIs bound to the node.

                                                                                  None

                                                                                  +

                                                                                  Default: 2

                                                                                  +

                                                                                  Set the parameter value to the number of pods that can be scaled out instantaneously within 10 seconds on most nodes.

                                                                                  nic-max-above-warm-target

                                                                                  +

                                                                                  Threshold for reclaiming the ENIs pre-bound to a node in a node pool

                                                                                  Reclaim number of ENIs pre-bound to a node at the node pool level

                                                                                  +

                                                                                  nic-max-above-warm-target

                                                                                  2

                                                                                  +

                                                                                  Only when the difference between the number of idle ENIs on a node and the nic-warm-target value is greater than the threshold, the pre-bound ENIs will be unbound and reclaimed. The value can only be a number.

                                                                                  +
                                                                                  • A large value will accelerate pod startup but slow down the unbinding of idle container ENIs and decrease the IP address usage. Exercise caution when performing this operation.
                                                                                  • A small value will speed up the unbinding of idle container ENIs and increase the IP address usage but will slow down pod startup, especially when a large number of pods increase instantaneously.

                                                                                  None

                                                                                  +

                                                                                  Default: 2

                                                                                  +

                                                                                  Set the parameter value to the difference between the number of pods that are frequently scaled on most nodes within minutes and the number of pods that are instantly scaled out on most nodes within 10 seconds.

                                                                                  -
                                                                                  Table 4 Pod security group in a node pool (available only for CCE Turbo clusters)

                                                                                  Parameter

                                                                                  +
                                                                                  - - - + - - - - +
                                                                                  Table 4 Pod security group in a node pool (available only for CCE Turbo clusters)

                                                                                  Item

                                                                                  Description

                                                                                  +

                                                                                  Parameter

                                                                                  Default Value

                                                                                  +

                                                                                  Description

                                                                                  Modification

                                                                                  +

                                                                                  Value

                                                                                  +

                                                                                  Modification

                                                                                  security_groups_for_nodepool

                                                                                  +

                                                                                  Default security group used by pods in a node pool

                                                                                  • Default security group used by pods in a node pool. You can enter the security group ID. If this parameter is not set, the default security group of the cluster container network is used. A maximum of five security group IDs can be specified at the same time, separated by semicolons (;).
                                                                                  • The priority of the security group is lower than that of the security group configured for Security Groups.
                                                                                  +

                                                                                  security_groups_for_nodepool

                                                                                  None

                                                                                  +

                                                                                  You can enter the security group ID. If this parameter is not configured, the default security group of the cluster container network will be used, and a maximum of five security group IDs that are separated by semicolons (;) can be specified at a time.

                                                                                  +

                                                                                  The priority of the security group is lower than that of the security group configured for SecurityGroups.

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  -
                                                                                  Table 5 Docker (available only for node pools that use Docker)

                                                                                  Parameter

                                                                                  +
                                                                                  - - - + - - - - + - - - - + - - - - + - - + - - - - - - + - - - - +
                                                                                  Table 5 Docker (available only for node pools that use Docker)

                                                                                  Item

                                                                                  Description

                                                                                  +

                                                                                  Parameter

                                                                                  Default Value

                                                                                  +

                                                                                  Description

                                                                                  Modification

                                                                                  +

                                                                                  Value

                                                                                  +

                                                                                  Modification

                                                                                  native-umask

                                                                                  +

                                                                                  Container umask

                                                                                  `--exec-opt native.umask

                                                                                  +

                                                                                  native-umask

                                                                                  normal

                                                                                  +

                                                                                  The default value normal indicates that the umask value of the started container is 0022.

                                                                                  Cannot be changed.

                                                                                  +

                                                                                  Default: normal

                                                                                  +

                                                                                  The parameter value cannot be changed.

                                                                                  docker-base-size

                                                                                  +

                                                                                  Available data space for a single container

                                                                                  `--storage-opts dm.basesize

                                                                                  +

                                                                                  docker-base-size

                                                                                  0

                                                                                  +

                                                                                  Maximum data space that can be used by each container.

                                                                                  Cannot be changed.

                                                                                  +

                                                                                  Default: 0

                                                                                  +

                                                                                  The parameter value cannot be changed.

                                                                                  insecure-registry

                                                                                  +

                                                                                  Insecure image source address

                                                                                  Address of an insecure image registry

                                                                                  +

                                                                                  insecure-registry

                                                                                  false

                                                                                  +

                                                                                  Whether an insecure image source address can be used.

                                                                                  Cannot be changed.

                                                                                  +

                                                                                  false

                                                                                  +

                                                                                  The parameter value cannot be changed.

                                                                                  limitcore

                                                                                  +

                                                                                  Maximum size of a container core file

                                                                                  Maximum size of a core file in a container. The unit is byte.

                                                                                  +

                                                                                  limitcore

                                                                                  +

                                                                                  Maximum size of a core file in a container. The unit is byte.

                                                                                  If not specified, the value is infinity.

                                                                                  5368709120

                                                                                  +

                                                                                  Default: 5368709120

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  default-ulimit-nofile

                                                                                  +

                                                                                  Limit on the number of handles in a container

                                                                                  Limit on the number of handles in a container

                                                                                  +

                                                                                  default-ulimit-nofile

                                                                                  {soft}:{hard}

                                                                                  +

                                                                                  Maximum number of handles that can be used in a container.

                                                                                  The value cannot exceed the value of the kernel parameter nr_open and cannot be a negative number.

                                                                                  +

                                                                                  Default: {soft}:{hard}

                                                                                  +

                                                                                  The value cannot exceed the value of the kernel parameter nr_open and cannot be a negative number.

                                                                                  You can run the following command to obtain the kernel parameter nr_open:

                                                                                  sysctl -a | grep nr_open

                                                                                  image-pull-progress-timeout

                                                                                  +

                                                                                  Image pull timeout

                                                                                  If the image fails to be pulled before time outs, the image pull will be canceled.

                                                                                  +

                                                                                  image-pull-progress-timeout

                                                                                  1m0s

                                                                                  +

                                                                                  If the image fails to be pulled before time outs, the image pull will be canceled.

                                                                                  This parameter is supported in v1.25.3-r0 and later.

                                                                                  +

                                                                                  Default: 1m0s

                                                                                  +

                                                                                  This parameter is supported in v1.25.3-r0 and later.

                                                                                  -
                                                                                  @@ -81,7 +81,7 @@ @@ -91,36 +91,36 @@
                                                                                5. Status-related

                                                                                  For status-related check items, when a problem occurs, NPD reports an event to the API server and changes the node status synchronously. This function can be used together with Node-problem-controller fault isolation to isolate nodes.

                                                                                  If the check period is not specified in the following check items, the default period is 30 seconds.

                                                                                  -
                                                                                6. Table 6 containerd (available only for node pools that use containerd)

                                                                                  Parameter

                                                                                  +
                                                                                  - - - + - - - - + - - + - - - - - - + - - - - + + + + + + + diff --git a/docs/cce/umn/cce_10_0653.html b/docs/cce/umn/cce_10_0653.html index e11fa312..54721f91 100644 --- a/docs/cce/umn/cce_10_0653.html +++ b/docs/cce/umn/cce_10_0653.html @@ -1,7 +1,7 @@

                                                                                  Updating a Node Pool

                                                                                  -

                                                                                  Constraints

                                                                                  • When editing the resource tags of the node pool. The modified configuration takes effect only for new nodes. To synchronize the configuration to the existing nodes, manually reset the existing nodes.
                                                                                  • Updates of kubernetes labels and taints are automatically synchronized to existing nodes. You do not need to reset nodes.
                                                                                  +

                                                                                  Constraints

                                                                                  • The modification of resource tags of a node pool takes effect only on new nodes. To synchronize the modification onto existing nodes, manually reset the existing nodes.
                                                                                  • Changes to Kubernetes labels/taints in a node pool will be automatically synchronized to existing nodes. You do not need to reset these nodes.

                                                                                  Updating a Node Pool

                                                                                  1. Log in to the CCE console.
                                                                                  2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
                                                                                  3. Click Update next to the name of the node pool you will edit. Configure the parameters in the displayed Update Node Pool page.

                                                                                    Basic Settings
                                                                                  Table 6 containerd (available only for node pools that use containerd)

                                                                                  Item

                                                                                  Description

                                                                                  +

                                                                                  Parameter

                                                                                  Default Value

                                                                                  +

                                                                                  Description

                                                                                  Modification

                                                                                  +

                                                                                  Value

                                                                                  +

                                                                                  Modification

                                                                                  devmapper-base-size

                                                                                  +

                                                                                  Available data space for a single container

                                                                                  Available data space of a single container

                                                                                  +

                                                                                  devmapper-base-size

                                                                                  0

                                                                                  +

                                                                                  Maximum data space that can be used by each container.

                                                                                  Cannot be changed.

                                                                                  +

                                                                                  Default: 0

                                                                                  +

                                                                                  The parameter value cannot be changed.

                                                                                  limitcore

                                                                                  +

                                                                                  Maximum size of a container core file

                                                                                  Maximum size of a core file in a container. The unit is byte.

                                                                                  +

                                                                                  limitcore

                                                                                  +

                                                                                  Maximum size of a core file in a container. The unit is byte.

                                                                                  If not specified, the value is infinity.

                                                                                  5368709120

                                                                                  +

                                                                                  Default: 5368709120

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  default-ulimit-nofile

                                                                                  +

                                                                                  Limit on the number of handles in a container

                                                                                  Limit on the number of handles in a container

                                                                                  +

                                                                                  default-ulimit-nofile

                                                                                  1048576

                                                                                  +

                                                                                  Maximum number of handles that can be used in a container.

                                                                                  The value cannot exceed the value of the kernel parameter nr_open and cannot be a negative number.

                                                                                  +

                                                                                  Default: 1048576

                                                                                  +

                                                                                  The value cannot exceed the value of the kernel parameter nr_open and cannot be a negative number.

                                                                                  You can run the following command to obtain the kernel parameter nr_open:

                                                                                  sysctl -a | grep nr_open

                                                                                  image-pull-progress-timeout

                                                                                  +

                                                                                  Image pull timeout

                                                                                  If the image fails to be pulled before time outs, the image pull will be canceled.

                                                                                  +

                                                                                  image-pull-progress-timeout

                                                                                  1m0s

                                                                                  +

                                                                                  If the image fails to be pulled before time outs, the image pull will be canceled.

                                                                                  This parameter is supported in v1.25.3-r0 and later.

                                                                                  +

                                                                                  Default: 1m0s

                                                                                  +

                                                                                  This parameter is supported in v1.25.3-r0 and later.

                                                                                  +

                                                                                  Verification on insure skips

                                                                                  +

                                                                                  insecure_skip_verify

                                                                                  +

                                                                                  Whether to skip repository certificate verification.

                                                                                  +

                                                                                  Default: false

                                                                                  +

                                                                                  The parameter value cannot be changed.

                                                                                  - @@ -31,20 +31,20 @@ - - - - + + + @@ -53,11 +53,11 @@ - - -
                                                                                  Table 1 Basic settings

                                                                                  Parameter

                                                                                  @@ -15,7 +15,7 @@

                                                                                  Name of the node pool.

                                                                                  Nodes

                                                                                  +

                                                                                  Expected Nodes

                                                                                  Change the number of nodes based on service requirements.

                                                                                  Kubernetes Label

                                                                                  -

                                                                                  A Kubernetes label is a key-value pair added to a Kubernetes object (such as a pod). After specifying a label, click Add. A maximum of 20 labels can be added.

                                                                                  -

                                                                                  Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

                                                                                  -
                                                                                  NOTE:

                                                                                  After a Kubernetes label is modified, the inventory nodes in the node pool are updated synchronously.

                                                                                  -
                                                                                  -

                                                                                  Resource Tag

                                                                                  +

                                                                                  Resource Tag

                                                                                  You can add resource tags to classify resources.

                                                                                  You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

                                                                                  CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

                                                                                  -
                                                                                  NOTE:

                                                                                  After a resource tag is modified, the modification automatically takes effect on newly added nodes. For existing nodes, manually reset the nodes for the modification to take effect.

                                                                                  +
                                                                                  NOTE:

                                                                                  Modified resource tags automatically take effect on new nodes.

                                                                                  +
                                                                                  +

                                                                                  Kubernetes Label

                                                                                  +

                                                                                  A key-value pair added to a Kubernetes object (such as a pod). After specifying a label, click Add. A maximum of 20 labels can be added.

                                                                                  +

                                                                                  Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

                                                                                  +
                                                                                  NOTE:

                                                                                  Modified Kubernetes labels automatically take effect on new nodes.

                                                                                  This field is left blank by default. You can add taints to configure node anti-affinity. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
                                                                                  • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
                                                                                  • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
                                                                                  • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.

                                                                                  For details, see Managing Node Taints.

                                                                                  -
                                                                                  NOTE:

                                                                                  After a taint is modified, the existing nodes in the node pool are updated synchronously.

                                                                                  +
                                                                                  NOTE:

                                                                                  Modified taints automatically take effect on new nodes

                                                                                  Edit Key Pair

                                                                                  +

                                                                                  Edit key pair

                                                                                  Only node pools that use key pairs for login support key pair editing. You can select another key pair.

                                                                                  NOTE:

                                                                                  The edited key pair automatically takes effect on newly added nodes. For existing nodes, manually reset the nodes for the modification to take effect.

                                                                                  @@ -66,7 +66,7 @@

                                                                                  Pre-installation Command

                                                                                  Enter commands. A maximum of 1000 characters are allowed.

                                                                                  +

                                                                                  Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

                                                                                  The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

                                                                                  NOTE:

                                                                                  The modified pre-installation command automatically takes effect on newly added nodes. For existing nodes, manually reset the nodes for the modification to take effect.

                                                                                  @@ -74,7 +74,7 @@

                                                                                  Post-installation Command

                                                                                  Enter commands. A maximum of 1000 characters are allowed.

                                                                                  +

                                                                                  Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

                                                                                  The script will be executed after Kubernetes software is installed, which does not affect the installation.

                                                                                  NOTE:

                                                                                  The modified post-installation command automatically takes effect on newly added nodes. For existing nodes, manually reset the nodes for the modification to take effect.

                                                                                  @@ -84,7 +84,7 @@
                                                                                  -

                                                                                7. Click OK.

                                                                                  After the node pool parameters are updated, go to the Nodes page to check whether the node to which the node pool belongs is updated. You can reset the node to synchronize the configuration updates for the node pool.

                                                                                  +

                                                                                8. After the configuration, click OK.

                                                                                  After the node pool parameters are updated, go to the Nodes page to check whether the node to which the node pool belongs is updated. You can reset the node to synchronize the configuration updates for the node pool.

                                                                                9. diff --git a/docs/cce/umn/cce_10_0654.html b/docs/cce/umn/cce_10_0654.html index 60ed3ea7..30605fa3 100644 --- a/docs/cce/umn/cce_10_0654.html +++ b/docs/cce/umn/cce_10_0654.html @@ -2,7 +2,7 @@

                                                                                  Synchronizing Node Pools

                                                                                  After the configuration of a node pool is updated, some configurations cannot be automatically synchronized for existing nodes. You can manually synchronize configurations for these nodes.

                                                                                  -
                                                                                  • Do not delete or reset nodes during batch synchronization. Otherwise, the synchronization of node pool configuration may fail.
                                                                                  • This operation involves resetting nodes. Workloads running on a node may be interrupted due to standalone deployment or insufficient schedulable resources. Evaluate the upgrade risks and perform the upgrade during off-peak hours. Alternatively, specify a disruption budget for your key applications to ensure the availability of these applications during the upgrade.
                                                                                  • During configuration synchronization for existing nodes, the nodes will be reset, and the system disks and data disks will be cleared. Back up important data before the synchronization.
                                                                                  • Only some node pool parameters can be synchronized by resetting nodes. The constraints are as follows:
                                                                                    • When editing the resource tags of the node pool. The modified configuration takes effect only for new nodes. To synchronize the configuration to the existing nodes, manually reset the existing nodes.
                                                                                    • Updates of kubernetes labels and taints are automatically synchronized to existing nodes. You do not need to reset nodes.
                                                                                    +
                                                                                    • Do not delete or reset nodes during batch synchronization. Otherwise, the synchronization of node pool configuration may fail.
                                                                                    • This operation involves resetting nodes. Workloads running on a node may be interrupted due to standalone deployment or insufficient schedulable resources. Evaluate the upgrade risks and perform the upgrade during off-peak hours. Alternatively, specify a disruption budget for your key applications to ensure the availability of these applications during the upgrade.
                                                                                    • During configuration synchronization for existing nodes, the nodes will be reset, and the system disks and data disks will be cleared. Back up important data before the synchronization.
                                                                                    • Only some node pool parameters can be synchronized by resetting nodes. The constraints are as follows:
                                                                                      • The modification of resource tags of a node pool takes effect only on new nodes. To synchronize the modification onto existing nodes, manually reset the existing nodes.
                                                                                      • Changes to Kubernetes labels/taints in a node pool will be automatically synchronized to existing nodes. You do not need to reset these nodes.

                                                                                    Synchronizing a Single Node

                                                                                    1. Log in to the CCE console.
                                                                                    2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Nodes tab on the right.
                                                                                    3. Find upgrade in the Node Pool column of the existing nodes in the node pool.
                                                                                    4. Click update. In the dialog box that is displayed, confirm whether to reset the node immediately.
                                                                                    diff --git a/docs/cce/umn/cce_10_0655.html b/docs/cce/umn/cce_10_0655.html index 8243f588..13b917d5 100644 --- a/docs/cce/umn/cce_10_0655.html +++ b/docs/cce/umn/cce_10_0655.html @@ -2,8 +2,7 @@

                                                                                    Copying a Node Pool

                                                                                    You can copy the configuration of an existing node pool on the CCE console to create new node pools.

                                                                                    -
                                                                                    1. Log in to the CCE console.
                                                                                    2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
                                                                                    3. Locate the target node pool and choose More > Copy.

                                                                                      -

                                                                                    4. On the page displayed, the configurations of the selected node pool are shown. You can modify the configurations as needed. For details, see Creating a Node Pool. After confirming the configuration, click Next: Confirm.
                                                                                    5. On the Confirm page, confirm the node pool configurations and click Submit. Then, a new node pool is created based on the modified configurations.
                                                                                    +
                                                                                    1. Log in to the CCE console.
                                                                                    2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
                                                                                    3. Locate the target node pool and choose More > Copy in the Operation column.
                                                                                    4. In the Copy Resource Pool window, the configurations of the node pool to be copied are displayed. Modify the configurations as needed. For details, see Creating a Node Pool. After confirming the configuration, click Next: Confirm.
                                                                                    5. On the Confirm page, confirm the node pool configurations and click Submit. Then, a new node pool is created based on the modified configurations.

                                                                                  Warning event

                                                                                  -

                                                                                  Listening object: /dev/kmsg

                                                                                  +

                                                                                  Listening object: /dev/kmsg

                                                                                  Matching rule: "task \\S+:\\w+ blocked for more than \\w+ seconds\\."

                                                                                  Warning event

                                                                                  -

                                                                                  Listening object: /dev/kmsg

                                                                                  +

                                                                                  Listening object: /dev/kmsg

                                                                                  Matching rule: Remounting filesystem read-only

                                                                                  Table 3 Checking system components

                                                                                  Check Item

                                                                                  +
                                                                                  - - - - - - - - - - - @@ -136,20 +136,20 @@ - - - - - - @@ -211,18 +211,18 @@
                                                                                  Table 3 Checking system components

                                                                                  Check Item

                                                                                  Function

                                                                                  +

                                                                                  Function

                                                                                  Description

                                                                                  +

                                                                                  Description

                                                                                  Container network component error

                                                                                  +

                                                                                  Container network component error

                                                                                  CNIProblem

                                                                                  Check the status of the CNI components (container network components).

                                                                                  +

                                                                                  Check the status of the CNI components (container network components).

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  Container runtime component error

                                                                                  +

                                                                                  Container runtime component error

                                                                                  CRIProblem

                                                                                  Check the status of Docker and containerd of the CRI components (container runtime components).

                                                                                  +

                                                                                  Check the status of Docker and containerd of the CRI components (container runtime components).

                                                                                  Check object: Docker or containerd

                                                                                  +

                                                                                  Check object: Docker or containerd

                                                                                  Frequent restarts of Kubelet

                                                                                  +

                                                                                  Frequent restarts of Kubelet

                                                                                  FrequentKubeletRestart

                                                                                  Periodically backtrack system logs to check whether the key component Kubelet restarts frequently.

                                                                                  +

                                                                                  Periodically backtrack system logs to check whether the key component Kubelet restarts frequently.

                                                                                  • Default threshold: 10 restarts within 10 minutes

                                                                                    If Kubelet restarts for 10 times within 10 minutes, it indicates that the system restarts frequently and a fault alarm is generated.

                                                                                    +
                                                                                  • Default threshold: 10 restarts within 10 minutes

                                                                                    If Kubelet restarts for 10 times within 10 minutes, it indicates that the system restarts frequently and a fault alarm is generated.

                                                                                  • Listening object: logs in the /run/log/journal directory

                                                                                  Periodically backtrack system logs to check whether the container runtime containerd restarts frequently.

                                                                                  kubelet error

                                                                                  +

                                                                                  kubelet error

                                                                                  KubeletProblem

                                                                                  Check the status of the key component Kubelet.

                                                                                  +

                                                                                  Check the status of the key component Kubelet.

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  kube-proxy error

                                                                                  +

                                                                                  kube-proxy error

                                                                                  KubeProxyProblem

                                                                                  Check the status of the key component kube-proxy.

                                                                                  +

                                                                                  Check the status of the key component kube-proxy.

                                                                                  None

                                                                                  +

                                                                                  None

                                                                                  -
                                                                                  Table 5 Checking the storage

                                                                                  Check Item

                                                                                  +
                                                                                  - - - - - @@ -249,10 +249,10 @@

                                                                                  Typical scenario: When creating a node, a user configures two data disks as a persistent volume storage pool. Some data disks are deleted by mistake.

                                                                                  - - @@ -260,10 +260,10 @@
                                                                                  for dir in `df -h | grep -v "Mounted on" | awk "{print \\$NF}"`;do cd $dir; done && echo "ok"
                                                                                  - - @@ -276,10 +276,10 @@ - -
                                                                                  Table 5 Checking the storage

                                                                                  Check Item

                                                                                  Function

                                                                                  +

                                                                                  Function

                                                                                  Description

                                                                                  Disk read-only

                                                                                  +

                                                                                  Disk read-only

                                                                                  DiskReadonly

                                                                                  Periodically perform write tests on the system disk and CCE data disks (including the CRI logical disk and Kubelet logical disk) of the node to check the availability of key disks.

                                                                                  +

                                                                                  Periodically perform write tests on the system disk and CCE data disks (including the CRI logical disk and Kubelet logical disk) of the node to check the availability of key disks.

                                                                                  Detection paths:

                                                                                  • /mnt/paas/kubernetes/kubelet/
                                                                                  • /var/lib/docker/
                                                                                  • /var/lib/containerd/
                                                                                  • /var/paas/sys/log/cceaddon-npd/
                                                                                  @@ -230,10 +230,10 @@

                                                                                  Currently, additional data disks are not supported.

                                                                                  emptyDir storage pool error

                                                                                  +

                                                                                  emptyDir storage pool error

                                                                                  EmptyDirVolumeGroupStatusError

                                                                                  Check whether the ephemeral volume group on the node is normal.

                                                                                  +

                                                                                  Check whether the ephemeral volume group on the node is normal.

                                                                                  Impact: Pods that depend on the storage pool cannot write data to the temporary volume. The temporary volume is remounted as a read-only file system by the kernel due to an I/O error.

                                                                                  Typical scenario: When creating a node, a user configures two data disks as a temporary volume storage pool. Some data disks are deleted by mistake. As a result, the storage pool becomes abnormal.

                                                                                  Mount point error

                                                                                  +

                                                                                  Mount point error

                                                                                  MountPointProblem

                                                                                  Check the mount point on the node.

                                                                                  +

                                                                                  Check the mount point on the node.

                                                                                  Exceptional definition: You cannot access the mount point by running the cd command.

                                                                                  Typical scenario: Network File System (NFS), for example, obsfs and s3fs is mounted to a node. When the connection is abnormal due to network or peer NFS server exceptions, all processes that access the mount point are suspended. For example, during a cluster upgrade, a kubelet is restarted, and all mount points are scanned. If the abnormal mount point is detected, the upgrade fails.

                                                                                  Suspended disk I/O

                                                                                  +

                                                                                  Suspended disk I/O

                                                                                  DiskHung

                                                                                  Check whether I/O suspension occurs on all disks on the node, that is, whether I/O read and write operations are not responded.

                                                                                  +

                                                                                  Check whether I/O suspension occurs on all disks on the node, that is, whether I/O read and write operations are not responded.

                                                                                  Definition of I/O suspension: The system does not respond to disk I/O requests, and some processes are in the D state.

                                                                                  Typical scenario: Disks cannot respond due to abnormal OS hard disk drivers or severe faults on the underlying network.

                                                                                  Slow disk I/O

                                                                                  +

                                                                                  Slow disk I/O

                                                                                  DiskSlow

                                                                                  Check whether all disks on the node have slow I/Os, that is, whether I/Os respond slowly.

                                                                                  +

                                                                                  Check whether all disks on the node have slow I/Os, that is, whether I/Os respond slowly.

                                                                                  Typical scenario: EVS disks have slow I/Os due to network fluctuation.

                                                                                  • Check object: all data disks
                                                                                  • Source:

                                                                                    /proc/diskstat

                                                                                    diff --git a/docs/cce/umn/cce_10_0660.html b/docs/cce/umn/cce_10_0660.html index 3ee809b3..20c570e0 100644 --- a/docs/cce/umn/cce_10_0660.html +++ b/docs/cce/umn/cce_10_0660.html @@ -9,8 +9,10 @@

                                                                                    Procedure for Default Node Pools

                                                                                    1. Log in to the CCE console.
                                                                                    2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
                                                                                    3. Click Upgrade next to the default node pool.
                                                                                    4. In the displayed Operating System Upgrade window, configure parameters.

                                                                                      • Target Operating System: shows the image of the target version. You do not need to configure this parameter.
                                                                                      • Upgrade Policy: Node Reset is supported.
                                                                                      • Max. Nodes for Batch Upgrade: maximum number of nodes that will be unavailable during node upgrade. Nodes will be unavailable during synchronization by resetting the nodes. Properly configure this parameter to prevent pod scheduling failures caused by too many unavailable nodes in the cluster.
                                                                                      • View Node: Select the nodes to be upgraded.
                                                                                      • Login Mode:
                                                                                        • Key Pair

                                                                                          Select the key pair used to log in to the node. You can select a shared key.

                                                                                          A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create Key Pair.

                                                                                        -
                                                                                      • Pre-installation Command: Enter a maximum of 1000 characters.

                                                                                        The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

                                                                                        -
                                                                                      • Post-installation Command: Enter a maximum of 1000 characters.

                                                                                        The script will be executed after Kubernetes software is installed and will not affect the installation.

                                                                                        +
                                                                                      • Pre-installation script:

                                                                                        Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

                                                                                        +

                                                                                        The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

                                                                                        +
                                                                                      • Post-installation script:

                                                                                        Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

                                                                                        +

                                                                                        The script will be executed after Kubernetes software is installed and will not affect the installation.

                                                                                    5. Click OK.
                                                                                    diff --git a/docs/cce/umn/cce_10_0672.html b/docs/cce/umn/cce_10_0672.html index 8d2d4d78..ead170e4 100644 --- a/docs/cce/umn/cce_10_0672.html +++ b/docs/cce/umn/cce_10_0672.html @@ -13,7 +13,7 @@
                                                                                  • - diff --git a/docs/cce/umn/cce_10_0678.html b/docs/cce/umn/cce_10_0678.html index d1d4ea23..825e48aa 100644 --- a/docs/cce/umn/cce_10_0678.html +++ b/docs/cce/umn/cce_10_0678.html @@ -5,9 +5,13 @@
                                                                                    diff --git a/docs/cce/umn/cce_10_0680.html b/docs/cce/umn/cce_10_0680.html index b5894211..ca39233e 100644 --- a/docs/cce/umn/cce_10_0680.html +++ b/docs/cce/umn/cce_10_0680.html @@ -5,7 +5,7 @@

                                                                                    Constraints

                                                                                    • This function applies to CCE standard clusters and CCE Turbo clusters of v1.19 or later, but not to clusters using container tunnel networking.
                                                                                    • The container CIDR block or container subnet cannot be deleted after being added. Exercise caution when performing this operation.
                                                                                    -

                                                                                    Adding a Container CIDR Block for a CCE Standard Cluster

                                                                                    1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                                    2. On the Overview page, locate the Networking Configuration area and click Add.
                                                                                    3. Configure the container CIDR block to be added. You can click to add multiple container CIDR blocks at a time.

                                                                                      New container CIDR blocks cannot conflict with service CIDR blocks, VPC CIDR blocks, and existing container CIDR blocks.

                                                                                      +

                                                                                      Adding a Container CIDR Block for a CCE Standard Cluster

                                                                                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                                      2. On the Overview page, locate the Networking Configuration area and click Add Container CIDR Block.
                                                                                      3. Configure the container CIDR block to be added. You can click to add multiple container CIDR blocks at a time.

                                                                                        New container CIDR blocks cannot conflict with service CIDR blocks, VPC CIDR blocks, and existing container CIDR blocks.

                                                                                      4. Click OK.
                                                                                      diff --git a/docs/cce/umn/cce_10_0681.html b/docs/cce/umn/cce_10_0681.html index bc536632..fed3cb1b 100644 --- a/docs/cce/umn/cce_10_0681.html +++ b/docs/cce/umn/cce_10_0681.html @@ -1,68 +1,88 @@

                                                                                      Creating a LoadBalancer Service

                                                                                      -

                                                                                      Scenario

                                                                                      LoadBalancer Services can access workloads from the public network through ELB, which is more reliable than EIP-based access. The LoadBalancer access address is in the format of IP address of public network load balancer:Access port, for example, 10.117.117.117:80.

                                                                                      +

                                                                                      Application Scenarios

                                                                                      LoadBalancer Services can access workloads from the public network through ELB, which is more reliable than EIP-based access. The LoadBalancer access address is in the format of IP address of public network load balancer:Access port, for example, 10.117.117.117:80.

                                                                                      In this access mode, requests are transmitted through an ELB load balancer to a node and then forwarded to the destination pod through the Service.

                                                                                      -
                                                                                      Figure 1 LoadBalancer
                                                                                      +
                                                                                      Figure 1 LoadBalancer

                                                                                      When CCE Turbo clusters and dedicated load balancers are used, passthrough networking is supported to reduce service latency and ensure zero performance loss.

                                                                                      External access requests are directly forwarded from a load balancer to pods. Internal access requests can be forwarded to a pod through a Service.

                                                                                      -
                                                                                      Figure 2 Passthrough networking
                                                                                      +
                                                                                      Figure 2 Passthrough networking

                                                                                      Constraints

                                                                                      • LoadBalancer Services allow workloads to be accessed from public networks through ELB. This access mode has the following restrictions:
                                                                                        • Automatically created load balancers should not be used by other resources. Otherwise, these load balancers cannot be completely deleted.
                                                                                        • Do not change the listener name for the load balancer in clusters of v1.15 and earlier. Otherwise, the load balancer cannot be accessed.
                                                                                        -
                                                                                      • After a Service is created, if the affinity setting is switched from the cluster level to the node level, the connection tracing table will not be cleared. You are advised not to modify the Service affinity setting after the Service is created. To modify it, create a Service again.
                                                                                      • If the service affinity is set to the node level (that is, externalTrafficPolicy is set to Local), the cluster may fail to access the Service by using the ELB address. For details, see Why a Service Fail to Be Accessed from Within the Cluster.
                                                                                      • CCE Turbo clusters support only cluster-level service affinity.
                                                                                      • Dedicated ELB load balancers can be used only in clusters of v1.17 and later.
                                                                                      • Dedicated load balancers must be of the network type (TCP/UDP) supporting private networks (with a private IP). If the Service needs to support HTTP, the specifications of dedicated load balancers must use HTTP/HTTPS (application load balancing) in addition to TCP/UDP (network load balancing).
                                                                                      • In a CCE cluster, if the cluster-level affinity is configured for a LoadBalancer Service, requests are distributed to the node ports of each node using SNAT when entering the cluster. The number of node ports cannot exceed the number of available node ports on the node. If the service affinity is at the node level (Local), there is no such constraint. In a CCE Turbo cluster, this constraint applies to shared load balancers, but not dedicated ones. Use dedicated load balancers in CCE Turbo clusters.
                                                                                      • When the cluster service forwarding (proxy) mode is IPVS, the node IP cannot be configured as the external IP of the Service. Otherwise, the node is unavailable.
                                                                                      • In a cluster using the IPVS proxy mode, if the ingress and Service use the same ELB load balancer, the ingress cannot be accessed from the nodes and containers in the cluster because kube-proxy mounts the LoadBalancer Service address to the ipvs-0 bridge. This bridge intercepts the traffic of the load balancer connected to the ingress. Use different ELB load balancers for the ingress and Service.
                                                                                      +
                                                                                    4. After a Service is created, if the affinity setting is switched from the cluster level to the node level, the connection tracing table will not be cleared. You are advised not to modify the Service affinity setting after the Service is created. To modify it, create a Service again.
                                                                                    5. If the service affinity is set to the node level (that is, externalTrafficPolicy is set to Local), the cluster may fail to access the Service by using the ELB address. For details, see Why a Service Fail to Be Accessed from Within the Cluster.
                                                                                    6. In a CCE Turbo cluster that utilizes a Cloud Native 2.0 network model, node-level affinity is supported only when the Service backend is connected to a HostNetwork pod.
                                                                                    7. Dedicated ELB load balancers can be used only in clusters of v1.17 and later.
                                                                                    8. Dedicated load balancers must be of the network type (TCP/UDP) supporting private networks (with a private IP). If the Service needs to support HTTP, the dedicated load balancers must be of the network (TCP/UDP) or application load balancing (HTTP/HTTPS) type.
                                                                                    9. In a CCE cluster, if the cluster-level affinity is configured for a LoadBalancer Service, requests are distributed to the node ports of each node using SNAT when entering the cluster. The number of node ports cannot exceed the number of available node ports on the node. If the service affinity is at the node level (Local), there is no such constraint. In a CCE Turbo cluster, this constraint applies to shared load balancers, but not dedicated ones. Use dedicated load balancers in CCE Turbo clusters.
                                                                                    10. When the cluster service forwarding (proxy) mode is IPVS, the node IP cannot be configured as the external IP of the Service. Otherwise, the node is unavailable.
                                                                                    11. In a cluster using the IPVS proxy mode, if the ingress and Service use the same ELB load balancer, the ingress cannot be accessed from the nodes and containers in the cluster because kube-proxy mounts the LoadBalancer Service address to the ipvs-0 bridge. This bridge intercepts the traffic of the load balancer connected to the ingress. Use different load balancers for the ingress and Service.

                                                                                  Creating a LoadBalancer Service

                                                                                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                                  2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
                                                                                  3. Configure parameters.

                                                                                    • Service Name: Specify a Service name, which can be the same as the workload name.
                                                                                    • Service Type: Select LoadBalancer.
                                                                                    • Namespace: Namespace to which the workload belongs.
                                                                                    • Service Affinity: For details, see externalTrafficPolicy (Service Affinity).
                                                                                      • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
                                                                                      • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
                                                                                      -
                                                                                    • Selector: Add a label and click Confirm. A Service selects a pod based on the added label. You can also click Reference Workload Label to reference the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
                                                                                    • Load Balancer

                                                                                      Select the load balancer to interconnect. Only load balancers in the same VPC as the cluster are supported. If no load balancer is available, click Create Load Balancer to create one on the ELB console.

                                                                                      -
                                                                                      The CCE console supports automatic creation of load balancers. Select Auto create from the drop-down list box and configure the following parameters:
                                                                                      • Instance Name: Enter a load balancer name.
                                                                                      • Public Access: If enabled, an EIP with 5 Mbit/s bandwidth will be created.
                                                                                      • Subnet, AZ, and Specifications (available only for dedicated load balancers): Configure the subnet, AZ, and specifications. Only dedicated load balancers of the network type (TCP/UDP) can be automatically created.
                                                                                      -
                                                                                      -

                                                                                      You can click Edit in the Set ELB area and configure load balancer parameters in the Set ELB dialog box.

                                                                                      -
                                                                                      • Algorithm: Three algorithms are available: weighted round robin, weighted least connections algorithm, or source IP hash.
                                                                                        • Weighted round robin: Requests are forwarded to different servers based on their weights, which indicate server processing performance. Backend servers with higher weights receive proportionately more requests, whereas equal-weighted servers receive the same number of requests. This algorithm is often used for short connections, such as HTTP services.
                                                                                        • Weighted least connections: In addition to the weight assigned to each server, the number of connections processed by each backend server is considered. Requests are forwarded to the server with the lowest connections-to-weight ratio. Building on least connections, the weighted least connections algorithm assigns a weight to each server based on their processing capability. This algorithm is often used for persistent connections, such as database connections.
                                                                                        • Source IP hash: The source IP address of each request is calculated using the hash algorithm to obtain a unique hash key, and all backend servers are numbered. The generated key allocates the client to a particular server. This enables requests from different clients to be distributed in load balancing mode and ensures that requests from the same client are forwarded to the same server. This algorithm applies to TCP connections without cookies.
                                                                                        -
                                                                                        -
                                                                                      • Type: This function is disabled by default. You can select Source IP address. Source IP address-based sticky session means that access requests from the same IP address are forwarded to the same backend server.

                                                                                        When the distribution policy uses the source IP hash, sticky session cannot be set.

                                                                                        -
                                                                                        -
                                                                                      -
                                                                                    • Health Check: Configure health check for the load balancer.
                                                                                      • Global health check: applies only to ports using the same protocol. You are advised to select Custom health check.
                                                                                      • Custom health check: applies to ports using different protocols. For details about the YAML definition for custom health check, see Configuring Health Check for Multiple Ports.
                                                                                      - -
                                                                                      Table 1 Health check parameters

                                                                                      Parameter

                                                                                      +
                                                                                    • Selector: Add a label and click Confirm. A Service selects a pod based on the added label. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
                                                                                    • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address.. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
                                                                                    • Load Balancer: Select a load balancer type and creation mode.

                                                                                      A load balancer can be dedicated or shared.

                                                                                      +
                                                                                      You can select Use existing or Auto create to obtain a load balancer. For details about the configuration of different creation modes, see Table 1. +
                                                                                      - - - - - - - - - - - - - -
                                                                                      Table 1 Load balancer configurations

                                                                                      How to Create

                                                                                      Description

                                                                                      +

                                                                                      Configuration

                                                                                      Protocol

                                                                                      +

                                                                                      Use existing

                                                                                      When the protocol of Port is set to TCP, the TCP and HTTP are supported. When the protocol of Port is set to UDP, the UDP is supported.

                                                                                      -
                                                                                      • Check Path (supported only by HTTP for health check): specifies the health check URL. The check path must start with a slash (/) and contain 1 to 80 characters.
                                                                                      +

                                                                                      Only the load balancers in the same VPC as the cluster can be selected. If no load balancer is available, click Create Load Balancer to create one on the ELB console.

                                                                                      Port

                                                                                      +

                                                                                      Auto create

                                                                                      By default, the service port (Node Port and container port of the Service) is used for health check. You can also specify another port for health check. After the port is specified, a service port named cce-healthz will be added for the Service.

                                                                                      -
                                                                                      • Node Port: If a shared load balancer is used or no ENI instance is associated, the node port is used as the health check port. If this parameter is not specified, a random port is used. The value ranges from 30000 to 32767.
                                                                                      • Container Port: When a dedicated load balancer is associated with an ENI instance, the container port is used for health check. The value ranges from 1 to 65535.
                                                                                      -

                                                                                      Check Period (s)

                                                                                      -

                                                                                      Specifies the maximum interval between health checks. The value ranges from 1 to 50.

                                                                                      -

                                                                                      Timeout (s)

                                                                                      -

                                                                                      Specifies the maximum timeout duration for each health check. The value ranges from 1 to 50.

                                                                                      -

                                                                                      Max. Retries

                                                                                      -

                                                                                      Specifies the maximum number of health check retries. The value ranges from 1 to 10.

                                                                                      +
                                                                                      • Instance Name: Enter a load balancer name.
                                                                                      • Public Access: If enabled, an EIP with 5 Mbit/s bandwidth will be created.
                                                                                      • AZ: available only to dedicated load balancers. You can create load balancers in multiple AZs to improve service availability. You can deploy a load balancer in multiple AZs for high availability.
                                                                                      • Specifications (available only to dedicated load balancers)
                                                                                        • Fixed: applies to stable traffic, billed based on specifications.
                                                                                        +
                                                                                      -
                                                                                    • Port
                                                                                      • Protocol: protocol used by the Service.
                                                                                      • Service Port: port used by the Service. The port number ranges from 1 to 65535.
                                                                                      • Container Port: port on which the workload listens. For example, Nginx uses port 80 by default.
                                                                                      • Health Check: If Health Check is set to Custom health check, you can configure health check for ports using different protocols. For details, see Table 1.
                                                                                      + +

                                                                                      You can click in the Set ELB area and configure load balancer parameters in the Set ELB dialog box.

                                                                                      +
                                                                                      • Algorithm: Three algorithms are available: weighted round robin, weighted least connections algorithm, or source IP hash.
                                                                                        • Weighted round robin: Requests are forwarded to different servers based on their weights, which indicate server processing performance. Backend servers with higher weights receive proportionately more requests, whereas equal-weighted servers receive the same number of requests. This algorithm is often used for short connections, such as HTTP services.
                                                                                        • Weighted least connections: In addition to the weight assigned to each server, the number of connections processed by each backend server is considered. Requests are forwarded to the server with the lowest connections-to-weight ratio. Building on least connections, the weighted least connections algorithm assigns a weight to each server based on their processing capability. This algorithm is often used for persistent connections, such as database connections.
                                                                                        • Source IP hash: The source IP address of each request is calculated using the hash algorithm to obtain a unique hash key, and all backend servers are numbered. The generated key allocates the client to a particular server. This enables requests from different clients to be distributed in load balancing mode and ensures that requests from the same client are forwarded to the same server. This algorithm applies to TCP connections without cookies.
                                                                                        +
                                                                                        +
                                                                                      • Type: This function is disabled by default. You can select Source IP address. Source IP address-based sticky session means that access requests from the same IP address are forwarded to the same backend server.

                                                                                        When the distribution policy uses the source IP hash, sticky session cannot be set.

                                                                                        +
                                                                                        +
                                                                                      +
                                                                                    • Health Check: Configure health check for the load balancer.
                                                                                      • Global health check: applies only to ports using the same protocol. You are advised to select Custom health check.
                                                                                      • Custom health check: applies to ports using different protocols. For details about the YAML definition for custom health check, see Configuring Health Check on Multiple Service Ports.
                                                                                      + +
                                                                                      + + + + + + + + + + + + + + + + + + + +
                                                                                      Table 2 Health check parameters

                                                                                      Parameter

                                                                                      +

                                                                                      Description

                                                                                      +

                                                                                      Protocol

                                                                                      +

                                                                                      When the protocol of Port is set to TCP, the TCP and HTTP are supported. When the protocol of Port is set to UDP, the UDP is supported.

                                                                                      +
                                                                                      • Check Path (supported only by HTTP for health check): specifies the health check URL. The check path must start with a slash (/) and contain 1 to 80 characters.
                                                                                      +

                                                                                      Port

                                                                                      +

                                                                                      By default, the service port (NodePort or container port of the Service) is used for health check. You can also specify another port for health check. After the port is specified, a service port named cce-healthz will be added for the Service.

                                                                                      +
                                                                                      • Node Port: If a shared load balancer is used or no ENI instance is associated, the node port is used as the health check port. If this parameter is not specified, a random port is used. The value ranges from 30000 to 32767.
                                                                                      • Container Port: When a dedicated load balancer is associated with an ENI instance, the container port is used for health check. The value ranges from 1 to 65535.
                                                                                      +

                                                                                      Check Period (s)

                                                                                      +

                                                                                      Specifies the maximum interval between health checks. The value ranges from 1 to 50.

                                                                                      +

                                                                                      Timeout (s)

                                                                                      +

                                                                                      Specifies the maximum timeout duration for each health check. The value ranges from 1 to 50.

                                                                                      +

                                                                                      Max. Retries

                                                                                      +

                                                                                      Specifies the maximum number of health check retries. The value ranges from 1 to 10.

                                                                                      +
                                                                                      +
                                                                                      +
                                                                                    • Ports
                                                                                      • Protocol: protocol used by the Service.
                                                                                      • Service Port: port used by the Service. The port number ranges from 1 to 65535.
                                                                                      • Container Port: port on which the workload listens. For example, Nginx uses port 80 by default.
                                                                                      • Health Check: If Health Check is set to Custom health check, you can configure health check for ports using different protocols. For details, see Table 2.

                                                                                      When a LoadBalancer Service is created, a random node port number (NodePort) is automatically generated.

                                                                                      -
                                                                                    • Annotation: The LoadBalancer Service has some advanced CCE functions, which are implemented by annotations. For details, see Using Annotations to Configure Load Balancing.
                                                                                    • +
                                                                                    • Annotation: The LoadBalancer Service has some advanced CCE functions, which are implemented by annotations. For details, see Using Annotations to Balance Load.
                                                                                    • Click OK.
                                                                                    • Using kubectl to Create a Service (Using an Existing Load Balancer)

                                                                                      You can set the Service when creating a workload using kubectl. This section uses an Nginx workload as an example to describe how to add a LoadBalancer Service using kubectl.

                                                                                      @@ -120,10 +140,10 @@ spec: nodePort: 31128 # Port number of the node. If this parameter is not specified, a random port number ranging from 30000 to 32767 is generated. type: LoadBalancer

                                                                                      -

                                                                                      The preceding example uses annotations to implement some advanced functions of load balancing, such as sticky session and health check. For details, see Table 2.

                                                                                      -

                                                                                      For more annotations and examples related to advanced functions, see Using Annotations to Configure Load Balancing.

                                                                                      +

                                                                                      The preceding example uses annotations to implement some advanced functions of load balancing, such as sticky session and health check. For details, see Table 3.

                                                                                      +

                                                                                      For more annotations and examples related to advanced functions, see Using Annotations to Balance Load.

                                                                                      -
                                                                                      Table 2 annotations parameters

                                                                                      Parameter

                                                                                      +
                                                                                      @@ -190,7 +210,7 @@ spec: - @@ -210,7 +230,7 @@ spec: - @@ -219,7 +239,7 @@ spec:
                                                                                      Table 3 annotations parameters

                                                                                      Parameter

                                                                                      Mandatory

                                                                                      No

                                                                                      Table 3 object

                                                                                      +

                                                                                      Table 4 object

                                                                                      Sticky session timeout.

                                                                                      No

                                                                                      Table 4 object

                                                                                      +

                                                                                      Table 5 object

                                                                                      ELB health check configuration items.

                                                                                      -
                                                                                      Table 3 elb.session-affinity-option data structure

                                                                                      Parameter

                                                                                      +
                                                                                      @@ -243,7 +263,7 @@ spec:
                                                                                      Table 4 elb.session-affinity-option data structure

                                                                                      Parameter

                                                                                      Mandatory

                                                                                      -
                                                                                      Table 4 elb.health-check-option data structure

                                                                                      Parameter

                                                                                      +
                                                                                      @@ -323,7 +343,7 @@ spec: kubernetes ClusterIP 10.247.0.1 <none> 443/TCP 3d nginx LoadBalancer 10.247.130.196 10.78.42.242 80:31540/TCP 51s

                                                                                    • Enter the URL in the address box of the browser, for example, 10.78.42.242:80. 10.78.42.242 indicates the IP address of the load balancer, and 80 indicates the access port displayed on the CCE console.

                                                                                      The Nginx is accessible.

                                                                                      -
                                                                                      Figure 3 Accessing Nginx through the LoadBalancer Service
                                                                                      +
                                                                                      Figure 3 Accessing Nginx through the LoadBalancer Service

                                                                                    • Using kubectl to Create a Service (Automatically Creating a Load Balancer)

                                                                                      You can set the Service when creating a workload using kubectl. This section uses an Nginx workload as an example to describe how to add a LoadBalancer Service using kubectl.

                                                                                      @@ -361,9 +381,10 @@ metadata: kubernetes.io/elb.autocreate: '{ "type": "public", "bandwidth_name": "cce-bandwidth-1551163379627", - "bandwidth_chargemode": "traffic", + "bandwidth_chargemode": "traffic", "bandwidth_size": 5, "bandwidth_sharetype": "PER", + "vip_subnet_cidr_id": "*****", "vip_address": "**.**.**.**", "eip_type": "5_bgp" }' @@ -403,12 +424,14 @@ metadata: kubernetes.io/elb.autocreate: '{ "type": "public", "bandwidth_name": "cce-bandwidth-1626694478577", - "bandwidth_chargemode": "traffic", + "bandwidth_chargemode": "traffic", "bandwidth_size": 5, "bandwidth_sharetype": "PER", "eip_type": "5_bgp", + "vip_subnet_cidr_id": "*****", "vip_address": "**.**.**.**", "elb_virsubnet_ids": ["*****"], + "ipv6_vip_virsubnet_id": "*****", "available_zone": [ "" ], @@ -437,10 +460,10 @@ spec: type: LoadBalancer

                                                                                      -

                                                                                      The preceding example uses annotations to implement some advanced functions of load balancing, such as sticky session and health check. For details, see Table 5.

                                                                                      -

                                                                                      For more annotations and examples related to advanced functions, see Using Annotations to Configure Load Balancing.

                                                                                      +

                                                                                      The preceding example uses annotations to implement some advanced functions of load balancing, such as sticky session and health check. For details, see Table 6.

                                                                                      +

                                                                                      For more annotations and examples related to advanced functions, see Using Annotations to Balance Load.

                                                                                      -
                                                                                      Table 5 elb.health-check-option data structure

                                                                                      Parameter

                                                                                      Mandatory

                                                                                      Table 5 annotations parameters

                                                                                      Parameter

                                                                                      +
                                                                                      @@ -469,7 +492,7 @@ spec: @@ -492,7 +515,7 @@ spec: @@ -513,7 +536,7 @@ spec: - @@ -533,7 +556,7 @@ spec: - @@ -542,7 +565,7 @@ spec:
                                                                                      Table 6 annotations parameters

                                                                                      Parameter

                                                                                      Mandatory

                                                                                      Whether to automatically create a load balancer associated with the Service.

                                                                                      Example

                                                                                      -
                                                                                      • If a public network load balancer will be automatically created, set this parameter to the following value:

                                                                                        '{"type":"public","bandwidth_name":"cce-bandwidth-1551163379627","bandwidth_chargemode": "traffic" ,"bandwidth_size":5,"bandwidth_sharetype":"PER","eip_type":"5_bgp","name":"james"}'

                                                                                        +
                                                                                        • If a public network load balancer will be automatically created, set this parameter to the following value:

                                                                                          '{"type":"public","bandwidth_name":"cce-bandwidth-1551163379627","bandwidth_chargemode":"traffic,"bandwidth_size":5,"bandwidth_sharetype":"PER","eip_type":"5_bgp","name":"james"}'

                                                                                        • If a private network load balancer will be automatically created, set this parameter to the following value:

                                                                                          {"type":"inner","name":"A-location-d-test"}

                                                                                      Specifies the load balancing algorithm of the backend server group. The default value is ROUND_ROBIN.

                                                                                      Options:

                                                                                      -
                                                                                      • ROUND_ROBIN: weighted round robin algorithm
                                                                                      • LEAST_CONNECTIONS: weighted least connections algorithm
                                                                                      • SOURCE_IP: source IP hash algorithm
                                                                                      +
                                                                                      • ROUND_ROBIN: weighted round robin algorithm
                                                                                      • LEAST_CONNECTIONS: weighted least connections algorithm
                                                                                      • SOURCE_IP: source IP hash algorithm
                                                                                      NOTE:

                                                                                      If this parameter is set to SOURCE_IP, the weight setting (weight field) of backend servers bound to the backend server group is invalid, and sticky session cannot be enabled.

                                                                                      No

                                                                                      Table 3 object

                                                                                      +

                                                                                      Table 4 object

                                                                                      Sticky session timeout.

                                                                                      No

                                                                                      Table 4 object

                                                                                      +

                                                                                      Table 5 object

                                                                                      ELB health check configuration items.

                                                                                      -
                                                                                      Table 6 elb.autocreate data structure

                                                                                      Parameter

                                                                                      +
                                                                                      @@ -592,7 +615,7 @@ spec: + + + + + - @@ -655,6 +690,7 @@ spec: @@ -665,6 +701,7 @@ spec: @@ -682,6 +719,16 @@ spec: ] + + + + +
                                                                                      Table 7 elb.autocreate data structure

                                                                                      Parameter

                                                                                      Mandatory

                                                                                      Bandwidth mode.

                                                                                      • traffic: billed by traffic
                                                                                      -

                                                                                      Default: traffic

                                                                                      +

                                                                                      Default: traffic

                                                                                      bandwidth_size

                                                                                      @@ -627,15 +650,26 @@ spec:

                                                                                      The specific type varies with regions. For details, see the EIP console.

                                                                                      vip_subnet_cidr_id

                                                                                      +

                                                                                      No

                                                                                      +

                                                                                      String

                                                                                      +

                                                                                      Subnet where a load balancer is located. The subnet must belong to the VPC where the cluster resides.

                                                                                      +

                                                                                      If this parameter is not specified, the ELB load balancer and the cluster are in the same subnet.

                                                                                      +

                                                                                      This field can be specified only for clusters of v1.21 or later.

                                                                                      +

                                                                                      vip_address

                                                                                      No

                                                                                      String

                                                                                      Specifies the private IP address of the load balancer. Only IPv4 addresses are supported.

                                                                                      +

                                                                                      Private IP address of the load balancer. Only IPv4 addresses are supported.

                                                                                      The IP address must be in the ELB CIDR block. If this parameter is not specified, an IP address will be automatically assigned from the ELB CIDR block.

                                                                                      -

                                                                                      This parameter is available only for clusters of v1.23.11-r0, v1.25.6-r0, v1.27.3-r0, or later versions.

                                                                                      +

                                                                                      This parameter is available only in clusters of v1.23.11-r0, v1.25.6-r0, v1.27.3-r0, or later versions.

                                                                                      available_zone

                                                                                      @@ -645,6 +679,7 @@ spec:

                                                                                      Array of strings

                                                                                      AZ where the load balancer is located.

                                                                                      +

                                                                                      You can obtain all supported AZs by getting the AZ list.

                                                                                      This parameter is available only for dedicated load balancers.

                                                                                      String

                                                                                      Flavor name of the layer-4 load balancer.

                                                                                      +

                                                                                      You can obtain all supported types by getting the flavor list.

                                                                                      This parameter is available only for dedicated load balancers.

                                                                                      String

                                                                                      Flavor name of the layer-7 load balancer.

                                                                                      +

                                                                                      You can obtain all supported types by getting the flavor list.

                                                                                      This parameter is available only for dedicated load balancers. The value of this parameter must be the same as that of l4_flavor_name, that is, both are elastic specifications or fixed specifications.

                                                                                      ipv6_vip_virsubnet_id

                                                                                      +

                                                                                      No

                                                                                      +

                                                                                      String

                                                                                      +

                                                                                      Specifies the ID of the IPv6 subnet where the load balancer resides. IPv6 must be enabled for the corresponding subnet. This parameter is mandatory only when the dual-stack clusters are used.

                                                                                      +

                                                                                      This parameter is available only for dedicated load balancers.

                                                                                      +
                                                                                      @@ -701,7 +748,7 @@ spec: kubernetes ClusterIP 10.247.0.1 <none> 443/TCP 3d nginx LoadBalancer 10.247.130.196 10.78.42.242 80:31540/TCP 51s

                                                                                    • Enter the URL in the address box of the browser, for example, 10.78.42.242:80. 10.78.42.242 indicates the IP address of the load balancer, and 80 indicates the access port displayed on the CCE console.

                                                                                      The Nginx is accessible.

                                                                                      -
                                                                                      Figure 4 Accessing Nginx through the LoadBalancer Service
                                                                                      +
                                                                                      Figure 4 Accessing Nginx through the LoadBalancer Service

                                                                                    • diff --git a/docs/cce/umn/cce_10_0683.html b/docs/cce/umn/cce_10_0683.html index dc26f38d..224cb7df 100644 --- a/docs/cce/umn/cce_10_0683.html +++ b/docs/cce/umn/cce_10_0683.html @@ -1,17 +1,65 @@ -

                                                                                      Service Using HTTP or HTTPS

                                                                                      -

                                                                                      Constraints

                                                                                      • Only clusters of v1.19.16 or later support HTTP or HTTPS.
                                                                                      • Do not connect an Ingress and a Service that uses HTTP or HTTPS to the same listener of the same load balancer. Otherwise, a port conflict occurs.
                                                                                      +

                                                                                      Configuring an HTTP or HTTPS Service

                                                                                      +

                                                                                      Constraints

                                                                                      • Only clusters of v1.19.16 or later support HTTP or HTTPS. +
                                                                                        + + + + + + + + + + + + + + + + + + + + + + + + +
                                                                                        Table 1 Scenarios where a load balancer supports HTTP or HTTPS

                                                                                        ELB Type

                                                                                        +

                                                                                        Application scenario

                                                                                        +

                                                                                        Whether to Support HTTP or HTTPS

                                                                                        +

                                                                                        Description

                                                                                        +

                                                                                        Shared load balancer

                                                                                        +

                                                                                        Interconnecting with an existing load balancer

                                                                                        +

                                                                                        Yes

                                                                                        +

                                                                                        None

                                                                                        +

                                                                                        Automatically creating a load balancer

                                                                                        +

                                                                                        Yes

                                                                                        +

                                                                                        None

                                                                                        +

                                                                                        Dedicated load balancer

                                                                                        +

                                                                                        Interconnecting with an existing load balancer

                                                                                        +

                                                                                        Yes (A YAML file is required.)

                                                                                        +
                                                                                        • For versions earlier than v1.19.16-r50, v1.21.11-r10, v1.23.9-r10, v1.25.4-r10 and v1.27.1-r10, the load balancer flavor must support both the layer-4 and layer-7 routing.
                                                                                        • For v1.19.16-r50, v1.21.11-r10, v1.23.9-r10, v1.25.4-r10, v1.27.1-r10, and later versions, the load balancer flavor must support layer-7 routing.
                                                                                        +

                                                                                        Automatically creating a load balancer

                                                                                        +

                                                                                        Yes (A YAML file is required.)

                                                                                        +
                                                                                        • For versions earlier than v1.19.16-r50, v1.21.11-r10, v1.23.9-r10, v1.25.4-r10 and v1.27.1-r10, the load balancer flavor must support both the layer-4 and layer-7 routing.
                                                                                        • For v1.19.16-r50, v1.21.11-r10, v1.23.9-r10, v1.25.4-r10, v1.27.1-r10, and later versions, the load balancer flavor must support layer-7 routing.
                                                                                        +
                                                                                        -

                                                                                        Service Using HTTP

                                                                                        The following annotations need to be added:

                                                                                        +
                                                                                      • Do not connect an ingress and a Service that uses HTTP or HTTPS to the same listener of the same load balancer. Otherwise, a port conflict occurs.
                                                                                      +
                                                                                      +

                                                                                      Using kubectl

                                                                                      If a Service is HTTP/HTTPS-compliant, add the following annotations:

                                                                                      • kubernetes.io/elb.protocol-port: "https:443,http:80"

                                                                                        The value of protocol-port must be the same as the port in the spec.ports field of the Service. The format is Protocol:Port. The port matches the one in the service.spec.ports field and is released as the corresponding protocol.

                                                                                        -
                                                                                      • kubernetes.io/elb.cert-id: "17e3b4f4bc40471c86741dc3aa211379"

                                                                                        cert-id indicates the certificate ID in ELB certificate management. When https is configured for protocol-port, the certificate of the ELB listener will be set to the cert-id certificate. When multiple HTTPS services are released, the same certificate is used.

                                                                                        +
                                                                                      • kubernetes.io/elb.cert-id: "17e3b4f4bc40471c86741dc3aa211379"

                                                                                        cert-id indicates the certificate ID in ELB certificate management. When https is configured for protocol-port, the certificate of the ELB listener will be set to the server certificate. When multiple HTTPS Services are released, they will use the same certificate.

                                                                                      -

                                                                                      The following is a configuration example. The two ports in spec.ports correspond to those in kubernetes.io/elb.protocol-port. Ports 443 and 80 are enabled for HTTPS and HTTP requests, respectively.

                                                                                      +

                                                                                      The following is a configuration example for automatically creating a dedicated load balancer, in which key configurations are marked in red:

                                                                                      +
                                                                                      • Different ELB types and cluster versions have different requirements on flavors. For details, see Table 1.
                                                                                      • The two ports in spec.ports must correspond to those in kubernetes.io/elb.protocol-port. In this example, ports 443 and 80 are enabled with HTTPS and HTTP, respectively.
                                                                                      apiVersion: v1
                                                                                       kind: Service
                                                                                       metadata:
                                                                                         annotations:
                                                                                      +# Specify the Layer 4 and Layer 7 flavors in the parameters for automatically creating a load balancer.
                                                                                           kubernetes.io/elb.autocreate: '
                                                                                             {
                                                                                                 "type": "public",
                                                                                      @@ -26,9 +74,9 @@ metadata:
                                                                                                 "l7_flavor_name": "L7_flavor.elb.s2.small",
                                                                                                 "l4_flavor_name": "L4_flavor.elb.s1.medium"
                                                                                             }'
                                                                                      -    kubernetes.io/elb.class: performance
                                                                                      -    kubernetes.io/elb.protocol-port: "https:443,http:80"
                                                                                      -    kubernetes.io/elb.cert-id: "17e3b4f4bc40471c86741dc3aa211379"
                                                                                      +    kubernetes.io/elb.class: performance  # Dedicated load balancer
                                                                                      +    kubernetes.io/elb.protocol-port: "https:443,http:80"  # HTTP/HTTPS and port number, which must be the same as the port numbers in spec.ports
                                                                                      +    kubernetes.io/elb.cert-id: "17e3b4f4bc40471c86741dc3aa211379"  # Certificate ID of the LoadBalancer Service
                                                                                         labels:
                                                                                           app: nginx
                                                                                           name: test
                                                                                      diff --git a/docs/cce/umn/cce_10_0684.html b/docs/cce/umn/cce_10_0684.html
                                                                                      index 6bfa6276..b2912b01 100644
                                                                                      --- a/docs/cce/umn/cce_10_0684.html
                                                                                      +++ b/docs/cce/umn/cce_10_0684.html
                                                                                      @@ -1,8 +1,8 @@
                                                                                       
                                                                                       
                                                                                      -

                                                                                      Configuring Health Check for Multiple Ports

                                                                                      +

                                                                                      Configuring Health Check on Multiple Service Ports

                                                                                      The annotation field related to the health check of the LoadBalancer Service is upgraded from Kubernetes.io/elb.health-check-option to Kubernetes.io/elb.health-check-options. Each Service port can be configured separately, and you can configure only some ports. If the port protocol does not need to be configured separately, the original annotation field is still available and does not need to be modified.

                                                                                      -

                                                                                      Constraints

                                                                                      • This feature takes effect only in the following versions:
                                                                                        • v1.19: v1.19.16-r5 or later
                                                                                        • v1.21: v1.21.8-r0 or later
                                                                                        • v1.23: v1.23.6-r0 or later
                                                                                        • v1.25: v1.25.2-r0 or later
                                                                                        +

                                                                                        Constraints

                                                                                        • This feature is available in the following versions:
                                                                                          • v1.19: v1.19.16-r5 or later
                                                                                          • v1.21: v1.21.8-r0 or later
                                                                                          • v1.23: v1.23.6-r0 or later
                                                                                          • v1.25: v1.25.2-r0 or later
                                                                                          • Versions later than v1.25
                                                                                        • kubernetes.io/elb.health-check-option and kubernetes.io/elb.health-check-options cannot be configured at the same time.
                                                                                        • The target_service_port field is mandatory and must be unique.
                                                                                        • For a TCP port, the health check protocol can only be TCP or HTTP. For a UDP port, the health check protocol must be UDP.

                                                                                        Procedure

                                                                                        The following is an example of using the kubernetes.io/elb.health-check-options annotation:
                                                                                        apiVersion: v1
                                                                                        @@ -34,7 +34,8 @@ metadata:
                                                                                         		"max_retries": "3",
                                                                                         		"path": "/",
                                                                                         		"target_service_port": "TCP:2",
                                                                                        -		"monitor_port": "22"
                                                                                        +		"monitor_port": "22",
                                                                                        +                "expected_codes": "200-399,401,404"
                                                                                         	}
                                                                                             ]'
                                                                                         spec:
                                                                                        @@ -56,84 +57,84 @@ spec:
                                                                                           type: LoadBalancer
                                                                                           loadBalancerIP: **.**.**.**
                                                                                        -
                                                                                        Table 1 Data structure description of the elb.health-check-options field

                                                                                        Parameter

                                                                                        +
                                                                                        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/cce/umn/cce_10_0685.html b/docs/cce/umn/cce_10_0685.html index 03910808..d68c3f8a 100644 --- a/docs/cce/umn/cce_10_0685.html +++ b/docs/cce/umn/cce_10_0685.html @@ -2,7 +2,7 @@

                                                                                        Setting the Pod Ready Status Through the ELB Health Check

                                                                                        The ready status of the pod is associated with the ELB health check. After the health check is successful, the pod is ready. This association works with the strategy.rollingUpdate.maxSurge and strategy.rollingUpdate.maxUnavailable parameters of the pod to implement graceful rolling upgrade.

                                                                                        -

                                                                                        Constraints

                                                                                        • This feature takes effect only in the following versions:
                                                                                          • v1.19: v1.19.16-r5 or later
                                                                                          • v1.21: v1.21.8-r0 or later
                                                                                          • v1.23: v1.23.6-r0 or later
                                                                                          • v1.25: v1.25.2-r0 or later
                                                                                          +

                                                                                          Constraints

                                                                                          • This feature is available in the following versions:
                                                                                            • v1.19: v1.19.16-r5 or later
                                                                                            • v1.21: v1.21.8-r0 or later
                                                                                            • v1.23: v1.23.6-r0 or later
                                                                                            • v1.25: v1.25.2-r0 or later
                                                                                            • Versions later than v1.25
                                                                                          • This function applies only to passthrough scenarios, that is, scenarios where dedicated load balancers are used in CCE Turbo clusters.
                                                                                          • To use this function, configure the readinessGates field in the pod and specify the label target-health.elb.k8s.cce/{serviceName}, where {serviceName} indicates the service name.
                                                                                          • The pod ready status takes effect only when the ELB backend is initially connected. The subsequent health check status does not affect the pod ready status.

                                                                                          Setting the Pod Ready Status Through the ELB Health Check

                                                                                          To use Pod readiness Gates, perform the following steps:

                                                                                          diff --git a/docs/cce/umn/cce_10_0686.html b/docs/cce/umn/cce_10_0686.html index 39f23e77..7728fcf0 100644 --- a/docs/cce/umn/cce_10_0686.html +++ b/docs/cce/umn/cce_10_0686.html @@ -1,30 +1,30 @@ -

                                                                                          ELB Ingresses

                                                                                          +

                                                                                          LoadBalancer Ingresses

                                                                                          diff --git a/docs/cce/umn/cce_10_0687.html b/docs/cce/umn/cce_10_0687.html index f1ba9873..6f511e99 100644 --- a/docs/cce/umn/cce_10_0687.html +++ b/docs/cce/umn/cce_10_0687.html @@ -1,11 +1,11 @@ -

                                                                                          Configuring HTTPS Certificates for ELB Ingresses

                                                                                          -

                                                                                          Ingress supports TLS certificate configuration and secures your Services with HTTPS.

                                                                                          -

                                                                                          Currently, you can use the TLS secret certificate configured in the cluster and the ELB certificate.

                                                                                          +

                                                                                          Configuring an HTTPS Certificate for a LoadBalancer Ingress

                                                                                          +

                                                                                          Ingresses support TLS certificates and secure your Services with HTTPS.

                                                                                          +

                                                                                          You can use a TLS secret certificate configured in the cluster and the ELB certificate.

                                                                                          If HTTPS is enabled for the same port of the same load balancer of multiple ingresses, you must select the same certificate.

                                                                                          -

                                                                                          Using a TLS Secret Certificate

                                                                                          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                                                          2. Ingress supports two TLS secret types: kubernetes.io/tls and IngressTLS. IngressTLS is used as an example. For details, see Creating a Secret. For details about examples of the kubernetes.io/tls secret and its description, see TLS Secret.

                                                                                            Run the following command to create a YAML file named ingress-test-secret.yaml (the file name can be customized):

                                                                                            +

                                                                                            Using a TLS Secret Certificate

                                                                                            1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                                                            2. Ingress supports two TLS secret types: kubernetes.io/tls and IngressTLS. IngressTLS is used as an example. For details, see Creating a Secret. For details about examples of the kubernetes.io/tls secret and its description, see TLS secrets.

                                                                                              Create a YAML file named ingress-test-secret.yaml. The file name can be customized.

                                                                                              vi ingress-test-secret.yaml

                                                                                              The YAML file is configured as follows:
                                                                                              apiVersion: v1
                                                                                               data:
                                                                                              @@ -22,7 +22,7 @@ metadata:
                                                                                               

                                                                                              In the preceding information, tls.crt and tls.key are only examples. Replace them with the actual files. The values of tls.crt and tls.key are Base64-encoded.

                                                                                            3. Create a secret.

                                                                                              kubectl create -f ingress-test-secret.yaml

                                                                                              -

                                                                                              If information similar to the following is displayed, the secret is being created:

                                                                                              +

                                                                                              If information similar to the following is displayed, the secret has been created:

                                                                                              secret/ingress-test-secret created

                                                                                              View the created secret.

                                                                                              kubectl get secrets

                                                                                              @@ -81,7 +81,7 @@ metadata: '{ "type": "public", "bandwidth_name": "cce-bandwidth-******", - "bandwidth_chargemode": "traffic", + "bandwidth_chargemode": "traffic", "bandwidth_size": 5, "bandwidth_sharetype": "PER", "eip_type": "5_bgp", @@ -104,7 +104,7 @@ spec: service: name: <your_service_name> # Replace it with the name of your target Service. port: - number: 8080 # Replace 8080 with the port number of your target Service. + number: 8080 # Replace 8080 with the port number of your target Service. property: ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH pathType: ImplementationSpecific @@ -140,7 +140,7 @@ spec:
                                                                                        Table 1 elb.health-check-options

                                                                                        Parameter

                                                                                        Mandatory

                                                                                        +

                                                                                        Mandatory

                                                                                        Type

                                                                                        +

                                                                                        Type

                                                                                        Description

                                                                                        +

                                                                                        Description

                                                                                        target_service_port

                                                                                        +

                                                                                        target_service_port

                                                                                        Yes

                                                                                        +

                                                                                        Yes

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Port for health check specified by spec.ports. The value consists of the protocol and port number, for example, TCP:80.

                                                                                        +

                                                                                        Port for health check specified by spec.ports. The value consists of the protocol and port number, for example, TCP:80.

                                                                                        monitor_port

                                                                                        +

                                                                                        monitor_port

                                                                                        No

                                                                                        +

                                                                                        No

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Re-specified port for health check. If this parameter is not specified, the service port is used by default.

                                                                                        +

                                                                                        Re-specified port for health check. If this parameter is not specified, the service port is used by default.

                                                                                        NOTE:

                                                                                        Ensure that the port is in the listening state on the node where the pod is located. Otherwise, the health check result will be affected.

                                                                                        delay

                                                                                        +

                                                                                        delay

                                                                                        No

                                                                                        +

                                                                                        No

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Health check interval (s)

                                                                                        +

                                                                                        Health check interval (s)

                                                                                        Value range: 1 to 50. Default value: 5

                                                                                        timeout

                                                                                        +

                                                                                        timeout

                                                                                        No

                                                                                        +

                                                                                        No

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Health check timeout, in seconds.

                                                                                        +

                                                                                        Health check timeout, in seconds.

                                                                                        Value range: 1 to 50. Default value: 10

                                                                                        max_retries

                                                                                        +

                                                                                        max_retries

                                                                                        No

                                                                                        +

                                                                                        No

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Maximum number of health check retries.

                                                                                        +

                                                                                        Maximum number of health check retries.

                                                                                        Value range: 1 to 10. Default value: 3

                                                                                        protocol

                                                                                        +

                                                                                        protocol

                                                                                        No

                                                                                        +

                                                                                        No

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Health check protocol.

                                                                                        +

                                                                                        Health check protocol.

                                                                                        Default value: protocol of the associated Service

                                                                                        Value options: TCP, UDP, or HTTP

                                                                                        path

                                                                                        +

                                                                                        path

                                                                                        No

                                                                                        +

                                                                                        No

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Health check URL. This parameter needs to be configured when the protocol is HTTP.

                                                                                        +

                                                                                        Health check URL. This parameter needs to be configured when the protocol is HTTP.

                                                                                        Default value: /

                                                                                        Value range: 1-80 characters

                                                                                        Array of strings

                                                                                        When HTTPS is used, this parameter must be added to specify the secret certificate.

                                                                                        -

                                                                                        Multiple independent domain names and certificates can be added. For details, see Configuring the Server Name Indication (SNI) for ELB Ingresses.

                                                                                        +

                                                                                        Multiple independent domain names and certificates can be added. For details, see Configuring SNI for a LoadBalancer Ingress.

                                                                                        secretName

                                                                                        @@ -205,7 +205,7 @@ ingress-test * 121.**.**.** 80 10s

                                                                                      • Enter https://121.**.**.**:443 in the address box of the browser to access the workload (for example, Nginx workload).

                                                                                        121.**.**.** indicates the IP address of the unified load balancer.

                                                                                      • -

                                                                                        Using the ELB Certificate

                                                                                        To use the ELB certificate, you can specify the annotations kubernetes.io/elb.tls-certificate-ids.

                                                                                        +

                                                                                        Using the ELB Certificate

                                                                                        To use the ELB certificate, you can specify the annotations kubernetes.io/elb.tls-certificate-ids.

                                                                                        1. If you specify both the IngressTLS certificate and the ELB certificate, the latter is used.
                                                                                        2. CCE does not check whether the ELB certificate is valid. It only checks whether the certificate exists.
                                                                                        3. Only clusters of v1.19.16-r2, v1.21.5-r0, v1.23.3-r0, or later support the ELB certificate.

                                                                                        For clusters of v1.21 or earlier:

                                                                                        @@ -250,17 +250,38 @@ spec: service: name: <your_service_name> # Replace it with the name of your target Service. port: - number: 8080 # Replace 8080 with the port number of your target Service. + number: 8080 # Replace 8080 with the port number of your target Service. property: ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH pathType: ImplementationSpecific ingressClassName: cce + +
                                                                                        + + + + + + + + + +
                                                                                        Table 3 Key parameters

                                                                                        Parameter

                                                                                        +

                                                                                        Type

                                                                                        +

                                                                                        Description

                                                                                        +

                                                                                        kubernetes.io/elb.tls-certificate-ids

                                                                                        +

                                                                                        String

                                                                                        +

                                                                                        ELB certificate IDs, which are separated by comma (,). The list length is greater than or equal to 1. The first ID in the list is the server certificate, and the other IDs are SNI certificates in which a domain name must be contained.

                                                                                        +

                                                                                        If an SNI certificate cannot be found based on the domain name requested by the client, the server certificate will be returned by default.

                                                                                        +

                                                                                        To obtain the certificate, log in to the CCE console, choose Service List > Networking > Elastic Load Balance, and click Certificates in the navigation pane. In the load balancer list, copy the ID under the target certificate name.

                                                                                        +
                                                                                        +
                                                                                        diff --git a/docs/cce/umn/cce_10_0688.html b/docs/cce/umn/cce_10_0688.html index 46ed62db..65aaeb82 100644 --- a/docs/cce/umn/cce_10_0688.html +++ b/docs/cce/umn/cce_10_0688.html @@ -1,7 +1,8 @@ -

                                                                                        Configuring the Server Name Indication (SNI) for ELB Ingresses

                                                                                        -
                                                                                        SNI allows multiple TLS-based access domain names to be provided for external systems using the same IP address and port number. Different domain names can use different security certificates.
                                                                                        • This function is supported only in clusters of v1.15.11 and later.
                                                                                        • The SNI option is available only when HTTPS is used.
                                                                                        +

                                                                                        Configuring SNI for a LoadBalancer Ingress

                                                                                        +

                                                                                        An SNI certificate is an extended server certificate that allows the same IP address and port number to provide multiple access domain names for external systems. Different security certificates can be used based on the domain names requested by clients to ensure HTTPS communication security.

                                                                                        +
                                                                                        When configuring SNI, you need to add a certificate associated with a domain name. The client submits the requested domain name information when initiating an SSL handshake request. After receiving the SSL request, the load balancer searches for the certificate based on the domain name. If the certificate is found, the load balancer will return it to the client. If the certificate is not found, the load balancer will return the default server certificate.
                                                                                        • This function is supported only in clusters of v1.15.11 and later.
                                                                                        • The SNI option is available only when HTTPS is used.
                                                                                        • Only one domain name can be specified for each SNI certificate. Wildcard-domain certificates are supported.
                                                                                        • Security policy (kubernetes.io/elb.tls-ciphers-policy) is supported only in clusters of v1.17.11 or later.
                                                                                        @@ -60,7 +61,7 @@ metadata: '{ "type": "public", "bandwidth_name": "cce-bandwidth-******", - "bandwidth_chargemode": "traffic", + "bandwidth_chargemode": "traffic", "bandwidth_size": 5, "bandwidth_sharetype": "PER", "eip_type": "5_bgp", @@ -89,7 +90,7 @@ spec: service: name: <your_service_name> # Replace it with the name of your target Service. port: - number: 8080 # Replace 8080 with the port number of your target Service. + number: 8080 # Replace 8080 with the port number of your target Service. property: ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH pathType: ImplementationSpecific @@ -98,7 +99,7 @@ spec:
                                                                                        diff --git a/docs/cce/umn/cce_10_0689.html b/docs/cce/umn/cce_10_0689.html index c793ea7d..72dd75eb 100644 --- a/docs/cce/umn/cce_10_0689.html +++ b/docs/cce/umn/cce_10_0689.html @@ -1,8 +1,9 @@ -

                                                                                        ELB Ingresses Routing to Multiple Services

                                                                                        +

                                                                                        LoadBalancer Ingresses to Multiple Services

                                                                                        Ingresses can route to multiple backend Services based on different matching policies. The spec field in the YAML file is set as below. You can access www.example.com/foo, www.example.com/bar, and foo.example.com/ to route to three different backend Services.

                                                                                        The URL registered in an ingress forwarding policy must be the same as the URL used to access the backend Service. Otherwise, a 404 error will be returned.

                                                                                        +

                                                                                        For example, the default access URL of the Nginx application is /usr/share/nginx/html. When adding /test to the ingress forwarding policy, ensure the access URL of your Nginx application contains /usr/share/nginx/html/test. Otherwise, error 404 will be returned.

                                                                                        ...
                                                                                         spec:
                                                                                        @@ -18,7 +19,7 @@ spec:
                                                                                                   ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
                                                                                               - path: '/bar'
                                                                                                 backend:
                                                                                        -          serviceName: <your_service_name>  # Replace it with the name of your target Service.
                                                                                        +          serviceName: <your_service_name>  # Replace it with the name of your target Service.
                                                                                                   servicePort: 80
                                                                                                 property:
                                                                                                   ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
                                                                                        @@ -27,14 +28,14 @@ spec:
                                                                                               paths:
                                                                                               - path: '/'
                                                                                                 backend:
                                                                                        -          serviceName: <your_service_name>  # Replace it with the name of your target Service.
                                                                                        +          serviceName: <your_service_name>  # Replace it with the name of your target Service.
                                                                                                   servicePort: 80
                                                                                                 property:
                                                                                                   ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
                                                                                        diff --git a/docs/cce/umn/cce_10_0691.html b/docs/cce/umn/cce_10_0691.html index 4e8b9668..924967b1 100644 --- a/docs/cce/umn/cce_10_0691.html +++ b/docs/cce/umn/cce_10_0691.html @@ -1,11 +1,11 @@ -

                                                                                        Interconnecting ELB Ingresses with HTTPS Backend Services

                                                                                        -

                                                                                        Ingress can interconnect with backend services of different protocols. By default, the backend proxy channel of an ingress is an HTTP channel. To create an HTTPS channel, add the following configuration to the annotations field:

                                                                                        +

                                                                                        Interconnecting LoadBalancer Ingresses with HTTPS Backend Services

                                                                                        +

                                                                                        Ingresses can interconnect with backend services of different protocols. By default, the backend proxy channel of an ingress is HTTP-compliant. To create an HTTPS channel, add the following configuration to the annotations field:

                                                                                        kubernetes.io/elb.pool-protocol: https
                                                                                        -

                                                                                        Constraints

                                                                                        • This feature only applies to clusters of v1.23.8, v1.25.3, and later.
                                                                                        • Ingress can interconnect with HTTPS backend services only when dedicated load balancers are used.
                                                                                        • When interconnecting with HTTPS backend services, set Client Protocol of ingress to HTTPS.
                                                                                        +

                                                                                        Constraints

                                                                                        • This function is available only in clusters of v1.23.8, v1.25.3, or later.
                                                                                        • Ingresses can interconnect with HTTPS backend services only when dedicated load balancers are used.
                                                                                        • When an ingress interconnects with an HTTPS backend service, the ingress protocol must be HTTPS.
                                                                                        -

                                                                                        Interconnecting with HTTPS Backend Services

                                                                                        An ingress configuration example:

                                                                                        +

                                                                                        Configuration Example

                                                                                        An ingress configuration example is as follows:

                                                                                        apiVersion: networking.k8s.io/v1
                                                                                         kind: Ingress
                                                                                         metadata:
                                                                                        @@ -38,7 +38,7 @@ spec:
                                                                                         
                                                                                        diff --git a/docs/cce/umn/cce_10_0694.html b/docs/cce/umn/cce_10_0694.html index 1d20555a..f85b6c53 100644 --- a/docs/cce/umn/cce_10_0694.html +++ b/docs/cce/umn/cce_10_0694.html @@ -1,34 +1,11 @@ -

                                                                                        ELB Ingresses Using HTTP/2

                                                                                        +

                                                                                        Configuring HTTP/2 for a LoadBalancer Ingress

                                                                                        Ingresses can use HTTP/2 to expose Services. Connections from the load balancer to your application use HTTP/1.X by default. If your application is capable of receiving HTTP2 requests, you can add the following field to the ingress annotation to enable the use of HTTP/2:

                                                                                        kubernetes.io/elb.http2-enable: 'true'
                                                                                        -

                                                                                        The following shows the YAML file for associating with an existing load balancer:

                                                                                        -

                                                                                        For clusters of v1.21 or earlier:

                                                                                        -
                                                                                        apiVersion: networking.k8s.io/v1beta1
                                                                                        -kind: Ingress 
                                                                                        -metadata: 
                                                                                        -  name: ingress-test
                                                                                        -  annotations: 
                                                                                        -    kubernetes.io/elb.id: <your_elb_id>  # Replace it with the ID of your existing load balancer.
                                                                                        -    kubernetes.io/elb.ip: <your_elb_ip>  # Replace it with the IP of your existing load balancer.
                                                                                        -    kubernetes.io/elb.port: '443'
                                                                                        -    kubernetes.io/ingress.class: cce
                                                                                        -    kubernetes.io/elb.http2-enable: 'true' # Enable HTTP/2.
                                                                                        -spec:
                                                                                        -  tls:
                                                                                        -  - secretName: ingress-test-secret
                                                                                        -  rules: 
                                                                                        -  - host: ''
                                                                                        -    http: 
                                                                                        -      paths: 
                                                                                        -      - path: '/'
                                                                                        -        backend: 
                                                                                        -          serviceName: <your_service_name>  # Replace it with the name of your target Service.
                                                                                        -          servicePort: 80                   # Replace it with the port number of your target Service.
                                                                                        -        property:
                                                                                        -          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
                                                                                        -
                                                                                        For clusters of v1.23 or later:
                                                                                        apiVersion: networking.k8s.io/v1
                                                                                        +
                                                                                        • An HTTPS-compliant load balancer supports HTTP/2.
                                                                                        • This function is available in clusters of version v1.23.13-r0, v1.25.8-r0, v1.27.5-r0, or v1.28.3-r0.
                                                                                        • If the advanced configuration for enabling HTTP/2 or the target annotation is deleted, the ELB configuration will not be modified.
                                                                                        +
                                                                                        +
                                                                                        The following shows an example YAML file where an existing load balancer is associated:
                                                                                        apiVersion: networking.k8s.io/v1
                                                                                         kind: Ingress 
                                                                                         metadata: 
                                                                                           name: ingress-test
                                                                                        @@ -49,31 +26,30 @@ spec:
                                                                                                   service:
                                                                                                     name: <your_service_name>  # Replace it with the name of your target Service.
                                                                                                     port: 
                                                                                        -              number: 8080             # Replace 8080 with the port number of your target Service.
                                                                                        +              number: 8080             # Replace 8080 with the port number of your target Service.
                                                                                                 property:
                                                                                                   ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
                                                                                                 pathType: ImplementationSpecific
                                                                                           ingressClassName: cce 
                                                                                        -

                                                                                        Table 6 HTTP/2 parameters

                                                                                        -

                                                                                        Parameter

                                                                                        +
                                                                                        - - - - - - - - -
                                                                                        Table 1 HTTP/2 parameters

                                                                                        Parameter

                                                                                        Mandatory

                                                                                        +

                                                                                        Mandatory

                                                                                        Type

                                                                                        +

                                                                                        Type

                                                                                        Description

                                                                                        +

                                                                                        Description

                                                                                        kubernetes.io/elb.http2-enable

                                                                                        +

                                                                                        kubernetes.io/elb.http2-enable

                                                                                        No

                                                                                        +

                                                                                        No

                                                                                        Bool

                                                                                        +

                                                                                        String

                                                                                        Whether HTTP/2 is enabled. Request forwarding using HTTP/2 improves the access performance between your application and the load balancer. However, the load balancer still uses HTTP 1.X to forward requests to the backend server. This parameter is supported in clusters of v1.19.16-r0, v1.21.3-r0, and later versions.

                                                                                        +

                                                                                        Whether HTTP/2 is enabled. Request forwarding using HTTP/2 improves the access performance between your application and the load balancer. However, the load balancer still uses HTTP/1.x to forward requests to the backend server.

                                                                                        Options:

                                                                                        • true: enabled
                                                                                        • false: disabled (default value)

                                                                                        Note: HTTP/2 can be enabled or disabled only when the listener uses HTTPS. This parameter is invalid when the listener protocol is HTTP, and defaults to false.

                                                                                        @@ -85,7 +61,7 @@ spec:
                                                                                        diff --git a/docs/cce/umn/cce_10_0695.html b/docs/cce/umn/cce_10_0695.html index bde60db4..c0569c6b 100644 --- a/docs/cce/umn/cce_10_0695.html +++ b/docs/cce/umn/cce_10_0695.html @@ -1,9 +1,9 @@ -

                                                                                        Configuring ELB Ingresses Using Annotations

                                                                                        -

                                                                                        By adding annotations to a YAML file, you can implement more advanced ingress functions. This section describes the annotations that can be used when you create an ingress of the ELB type.

                                                                                        - -

                                                                                        Interconnecting with ELB

                                                                                        +

                                                                                        Configuring a LoadBalancer Ingress Using Annotations

                                                                                        +

                                                                                        By adding annotations to a YAML file, you can implement more advanced ingress functions. This section describes the annotations that can be used when you create a LoadBalancer ingress.

                                                                                        + +

                                                                                        Interconnection with ELB

                                                                                        - - @@ -71,7 +71,7 @@ -
                                                                                        Table 1 Annotations for interconnecting with ELB

                                                                                        Parameter

                                                                                        Type

                                                                                        @@ -28,19 +28,19 @@

                                                                                        String

                                                                                        • cce: The self-developed ELB ingress is used.
                                                                                        +
                                                                                        • cce: A proprietary LoadBalancer ingress is used.

                                                                                        This parameter is mandatory when an ingress is created by calling the API.

                                                                                        -

                                                                                        For clusters of v1.23 or later, use the parameter ingressClassName. For details, see Using kubectl to Create an ELB Ingress.

                                                                                        +

                                                                                        For clusters of v1.23 or later, use the parameter ingressClassName. For details, see Using kubectl to Create a LoadBalancer Ingress.

                                                                                        Only clusters of v1.21 or earlier

                                                                                        kubernetes.io/elb.port

                                                                                        Integer

                                                                                        +

                                                                                        String

                                                                                        This parameter indicates the external port registered with the address of the LoadBalancer Service.

                                                                                        -

                                                                                        Supported range: 1 to 65535

                                                                                        +

                                                                                        The value ranges from 1 to 65535.

                                                                                        NOTE:

                                                                                        Some ports are high-risk ports and are blocked by default, for example, port 21.

                                                                                        kubernetes.io/elb.autocreate

                                                                                        Table 5 Object

                                                                                        +

                                                                                        Table 7 Object

                                                                                        Mandatory when load balancers are automatically created.

                                                                                        Example

                                                                                        @@ -97,7 +97,7 @@
                                                                                        -
                                                                                        To use the preceding annotations, perform the following steps: +
                                                                                        The following shows how to use the preceding annotations:

                                                                                        Using HTTP/2

                                                                                        @@ -115,235 +115,332 @@

                                                                                        String

                                                                                        Whether HTTP/2 is enabled. Request forwarding using HTTP/2 improves the access performance between your application and the load balancer. However, the load balancer still uses HTTP 1.X to forward requests to the backend server. This parameter is supported in clusters of v1.19.16-r0, v1.21.3-r0, and later.

                                                                                        +

                                                                                        Whether HTTP/2 is enabled. Request forwarding using HTTP/2 improves the access performance between your application and the load balancer. However, the load balancer still uses HTTP/1.x to forward requests to the backend server.

                                                                                        Options:

                                                                                        • true: enabled
                                                                                        • false: disabled (default value)

                                                                                        Note: HTTP/2 can be enabled or disabled only when the listener uses HTTPS. This parameter is invalid and defaults to false when the listener protocol is HTTP.

                                                                                        v1.19.16-r0, v1.21.3-r0, or later

                                                                                        +

                                                                                        v1.23.13-r0, v1.25.8-r0, v1.27.5-r0, v1.28.3-r0, or later

                                                                                        -

                                                                                        For details about the application scenarios, see ELB Ingresses Using HTTP/2.

                                                                                        +

                                                                                        For details, see Configuring HTTP/2 for a LoadBalancer Ingress.

                                                                                        + +

                                                                                        Configuring ELB Certificates

                                                                                        +
                                                                                        + + + + + + + + + + + +
                                                                                        Table 3 ELB certificate annotations

                                                                                        Parameter

                                                                                        +

                                                                                        Type

                                                                                        +

                                                                                        Description

                                                                                        +

                                                                                        Supported Cluster Version

                                                                                        +

                                                                                        kubernetes.io/elb.tls-certificate-ids

                                                                                        +

                                                                                        String

                                                                                        +

                                                                                        ELB certificate IDs, which are separated by comma (,). The list length is greater than or equal to 1. The first ID in the list is the server certificate, and the other IDs are SNI certificates in which a domain name must be contained.

                                                                                        +

                                                                                        To obtain the certificate, log in to the CCE console, choose Service List > Networking > Elastic Load Balance, and click Certificates in the navigation pane. In the load balancer list, copy the ID under the target certificate name.

                                                                                        +

                                                                                        v1.19.16-r2, v1.21.5-r0, v1.23.3-r0, or later

                                                                                        +
                                                                                        +
                                                                                        +

                                                                                        For details, see Using the ELB Certificate.

                                                                                        Interconnecting with HTTPS Backend Services

                                                                                        -
                                                                                        Table 3 Annotations for interconnecting with HTTPS backend services

                                                                                        Parameter

                                                                                        +
                                                                                        - - - - - - -
                                                                                        Table 4 Annotations for interconnecting with HTTPS backend services

                                                                                        Parameter

                                                                                        Type

                                                                                        +

                                                                                        Type

                                                                                        Description

                                                                                        +

                                                                                        Description

                                                                                        Supported Cluster Version

                                                                                        +

                                                                                        Supported Cluster Version

                                                                                        kubernetes.io/elb.pool-protocol

                                                                                        +

                                                                                        kubernetes.io/elb.pool-protocol

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        To interconnect with HTTPS backend services, set this parameter to https.

                                                                                        +

                                                                                        To interconnect with HTTPS backend services, set this parameter to https.

                                                                                        v1.23.8, v1.25.3, or later

                                                                                        +

                                                                                        v1.23.8, v1.25.3, or later

                                                                                        -

                                                                                        For details about the application scenarios, see Interconnecting ELB Ingresses with HTTPS Backend Services.

                                                                                        +

                                                                                        For details, see Interconnecting LoadBalancer Ingresses with HTTPS Backend Services.

                                                                                        Configuring Timeout for an Ingress

                                                                                        -
                                                                                        Table 4 Annotations of configuring Ingress redirection rules

                                                                                        Parameter

                                                                                        +
                                                                                        - - - - - - - - - - - - - - -
                                                                                        Table 5 Annotations of configuring ingress redirection rules

                                                                                        Parameter

                                                                                        Type

                                                                                        +

                                                                                        Type

                                                                                        Description

                                                                                        +

                                                                                        Description

                                                                                        Supported Cluster Version

                                                                                        +

                                                                                        Supported Cluster Version

                                                                                        kubernetes.io/elb.keepalive_timeout

                                                                                        +

                                                                                        kubernetes.io/elb.keepalive_timeout

                                                                                        Integer

                                                                                        +

                                                                                        String

                                                                                        Timeout for client connections. If there are no requests reaching the load balancer after the timeout duration elapses, the load balancer will disconnect the connection with the client and establish a new connection when there is a new request.

                                                                                        +

                                                                                        Timeout for client connections. If there are no requests reaching the load balancer during the timeout duration, the load balancer will disconnect the connection from the client and establish a new connection when there is a new request.

                                                                                        Value:

                                                                                        • For TCP listeners, the value ranges from 10 to 4000 (in seconds). The default value is 300.
                                                                                        • For HTTP or HTTPS listeners, the value ranges from 0 to 4000 (in seconds). The default value is 60.

                                                                                        For UDP listeners, this parameter does not take effect.

                                                                                        v1.19.16-r30, v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, and later

                                                                                        +

                                                                                        Dedicated load balancers: v1.19.16-r30, v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, or later

                                                                                        +

                                                                                        Shared load balancers: v1.23.13-r0, v1.25.8-r0, v1.27.5-r0, v1.28.3-r0, or later

                                                                                        kubernetes.io/elb.client_timeout

                                                                                        +

                                                                                        kubernetes.io/elb.client_timeout

                                                                                        Integer

                                                                                        +

                                                                                        String

                                                                                        Timeout duration for waiting for a response from a client. There are two situations:

                                                                                        -
                                                                                        • If the client fails to send a request header to the load balancer within the timeout duration, the request will be interrupted.
                                                                                        • If the interval between two consecutive request bodies reaching the load balancer is greater than the timeout duration, the connection will be disconnected.
                                                                                        +

                                                                                        Timeout for waiting for a request from a client. There are two cases:

                                                                                        +
                                                                                        • If the client fails to send a request header to the load balancer during the timeout duration, the request will be interrupted.
                                                                                        • If the interval between two consecutive request bodies reaching the load balancer is greater than the timeout duration, the connection will be disconnected.

                                                                                        The value ranges from 1 to 300 (in seconds). The default value is 60.

                                                                                        This parameter is available only for HTTP and HTTPS listeners.

                                                                                        Minimum value: 1

                                                                                        Maximum value: 300

                                                                                        Default value: 60

                                                                                        v1.19.16-r30, v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, and later

                                                                                        +

                                                                                        Dedicated load balancers: v1.19.16-r30, v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, or later

                                                                                        +

                                                                                        Shared load balancers: v1.23.13-r0, v1.25.8-r0, v1.27.5-r0, v1.28.3-r0, or later

                                                                                        kubernetes.io/elb.member_timeout

                                                                                        +

                                                                                        kubernetes.io/elb.member_timeout

                                                                                        Integer

                                                                                        +

                                                                                        String

                                                                                        Timeout duration for waiting for a response from a backend server. After a request is forwarded to the backend server, if the backend server does not respond within the duration specified by member_timeout, the load balancer will stop waiting and return return HTTP 504 Gateway Timeout.

                                                                                        +

                                                                                        Timeout for waiting for a response from a backend server. After a request is forwarded to the backend server, if the backend server does not respond within the duration specified by member_timeout, the load balancer will stop waiting and return HTTP 504 Gateway Timeout.

                                                                                        The value ranges from 1 to 300 (in seconds). The default value is 60.

                                                                                        This parameter is available only for HTTP and HTTPS listeners.

                                                                                        Minimum value: 1

                                                                                        Maximum value: 300

                                                                                        Default value: 60

                                                                                        v1.19.16-r30, v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, and later

                                                                                        +

                                                                                        Dedicated load balancers: v1.19.16-r30, v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, or later

                                                                                        +

                                                                                        Shared load balancers: v1.23.13-r0, v1.25.8-r0, v1.27.5-r0, v1.28.3-r0, or later

                                                                                        -

                                                                                        For details about the application scenarios, see Configuring Timeout for an ELB Ingress.

                                                                                        +

                                                                                        For details, see Configuring Timeout for a LoadBalancer Ingress.

                                                                                        -

                                                                                        Data Structure

                                                                                        -
                                                                                        + + + + + + + + + + + + - - @@ -59,10 +87,10 @@ spec: - - @@ -70,7 +98,7 @@ spec: -
                                                                                        Table 5 elb.autocreate data structure

                                                                                        Parameter

                                                                                        +

                                                                                        Configuring a Custom Listening Port

                                                                                        A custom listening port can be configured for an ingress. In this way, both ports 80 and 443 can be exposed.

                                                                                        + +
                                                                                        - - - - - - - + + +
                                                                                        Table 6 Annotations for a custom listening port

                                                                                        Parameter

                                                                                        Mandatory

                                                                                        +

                                                                                        Type

                                                                                        Type

                                                                                        +

                                                                                        Description

                                                                                        Description

                                                                                        +

                                                                                        Supported Cluster Version

                                                                                        name

                                                                                        +

                                                                                        kubernetes.io/elb.listen-ports

                                                                                        No

                                                                                        +

                                                                                        String

                                                                                        String

                                                                                        +

                                                                                        Create multiple listening ports for an ingress. The port number ranges from 1 to 65535.

                                                                                        +

                                                                                        The following is an example for JSON characters:

                                                                                        +
                                                                                        kubernetes.io/elb.listen-ports: '[{"HTTP":80},{"HTTPS":443}]'
                                                                                        +
                                                                                        • Only the listening ports that comply with both HTTP and HTTPS are allowed.
                                                                                        • Only newly created ingresses are allowed. Additionally, after multiple listening ports are configured, annotations cannot be modified or deleted.
                                                                                        • If both kubernetes.io/elb.listen-ports and kubernetes.io/elb.port are configured, kubernetes.io/elb.listen-ports takes a higher priority.
                                                                                        • Ingress configuration items such as the blocklist, trustlist, and timeout concurrently take effect on multiple listening ports.
                                                                                        • Advanced forwarding policies are not supported.

                                                                                        Name of the automatically created load balancer.

                                                                                        +

                                                                                        v1.23.14-r0, v1.25.9-r0, v1.27.6-r0, v1.28.4-r0, or later

                                                                                        +
                                                                                        +
                                                                                        +

                                                                                        For example, if an existing ELB is used, the configuration is as follows:

                                                                                        +
                                                                                        apiVersion: networking.k8s.io/v1
                                                                                        +kind: Ingress
                                                                                        +metadata:
                                                                                        +  annotations:
                                                                                        +    kubernetes.io/elb.id: 2c623150-17bf-45f1-ae6f-384b036f547e     # ID of an existing load balancer
                                                                                        +    kubernetes.io/elb.class: performance    # Load balancer type
                                                                                        +    kubernetes.io/elb.listen-ports: '[{"HTTP": 80},{"HTTPS": 443}]'    # Multi-listener configuration
                                                                                        +    kubernetes.io/elb.tls-certificate-ids: 6cfb43c9de1a41a18478b868e34b0a82,6cfb43c9de1a41a18478b868e34b0a82   # HTTPS certificate configuration
                                                                                        +  name: test-https
                                                                                        +  namespace: default
                                                                                        +spec:
                                                                                        +  ingressClassName: cce
                                                                                        +  rules:
                                                                                        +  - host: xxx.com
                                                                                        +    http:
                                                                                        +      paths:
                                                                                        +      - backend:
                                                                                        +          service:
                                                                                        +            name: test
                                                                                        +            port:
                                                                                        +              number: 8888
                                                                                        +        path: /
                                                                                        +        pathType: ImplementationSpecific
                                                                                        +        property:
                                                                                        +          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
                                                                                        +
                                                                                        +

                                                                                        Parameters for Automatically Creating a Load Balancer

                                                                                        +
                                                                                        + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + - - - - - - - - - - - - + + + + +
                                                                                        Table 7 elb.autocreate data structure

                                                                                        Parameter

                                                                                        +

                                                                                        Mandatory

                                                                                        +

                                                                                        Type

                                                                                        +

                                                                                        Description

                                                                                        +

                                                                                        name

                                                                                        +

                                                                                        No

                                                                                        +

                                                                                        String

                                                                                        +

                                                                                        Name of the automatically created load balancer.

                                                                                        The value can contain 1 to 64 characters. Only letters, digits, underscores (_), hyphens (-), and periods (.) are allowed.

                                                                                        Default: cce-lb+service.UID

                                                                                        type

                                                                                        +

                                                                                        type

                                                                                        No

                                                                                        +

                                                                                        No

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Network type of the load balancer.

                                                                                        +

                                                                                        Network type of the load balancer.

                                                                                        • public: public network load balancer
                                                                                        • inner: private network load balancer

                                                                                        Default: inner

                                                                                        bandwidth_name

                                                                                        +

                                                                                        bandwidth_name

                                                                                        Yes for public network load balancers

                                                                                        +

                                                                                        Yes for public network load balancers

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Bandwidth name. The default value is cce-bandwidth-******.

                                                                                        +

                                                                                        Bandwidth name. The default value is cce-bandwidth-******.

                                                                                        The value can contain 1 to 64 characters. Only letters, digits, underscores (_), hyphens (-), and periods (.) are allowed.

                                                                                        bandwidth_chargemode

                                                                                        +

                                                                                        bandwidth_chargemode

                                                                                        No

                                                                                        +

                                                                                        No

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Bandwidth mode.

                                                                                        +

                                                                                        Bandwidth mode.

                                                                                        • traffic: billed by traffic
                                                                                        -

                                                                                        Default: traffic

                                                                                        +

                                                                                        Default: traffic

                                                                                        bandwidth_size

                                                                                        +

                                                                                        bandwidth_size

                                                                                        Yes for public network load balancers

                                                                                        +

                                                                                        Yes for public network load balancers

                                                                                        Integer

                                                                                        +

                                                                                        Integer

                                                                                        Bandwidth size. The default value is 1 to 2000 Mbit/s. Configure this parameter based on the bandwidth range allowed in your region.

                                                                                        +

                                                                                        Bandwidth size. The default value is 1 to 2000 Mbit/s. Configure this parameter based on the bandwidth range allowed in your region.

                                                                                        The minimum increment for bandwidth adjustment varies depending on the bandwidth range.
                                                                                        • The minimum increment is 1 Mbit/s if the allowed bandwidth does not exceed 300 Mbit/s.
                                                                                        • The minimum increment is 50 Mbit/s if the allowed bandwidth ranges from 300 Mbit/s to 1000 Mbit/s.
                                                                                        • The minimum increment is 500 Mbit/s if the allowed bandwidth exceeds 1000 Mbit/s.

                                                                                        bandwidth_sharetype

                                                                                        +

                                                                                        bandwidth_sharetype

                                                                                        Yes for public network load balancers

                                                                                        +

                                                                                        Yes for public network load balancers

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Bandwidth sharing mode.

                                                                                        +

                                                                                        Bandwidth sharing mode.

                                                                                        • PER: dedicated bandwidth

                                                                                        eip_type

                                                                                        +

                                                                                        eip_type

                                                                                        Yes for public network load balancers

                                                                                        +

                                                                                        Yes for public network load balancers

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        EIP type.

                                                                                        +

                                                                                        EIP type.

                                                                                        • 5_bgp: dynamic BGP

                                                                                        The specific type varies with regions. For details, see the EIP console.

                                                                                        vip_address

                                                                                        +

                                                                                        vip_subnet_cidr_id

                                                                                        No

                                                                                        +

                                                                                        No

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Specifies the private IP address of the load balancer. Only IPv4 addresses are supported.

                                                                                        -

                                                                                        The IP address must be in the ELB CIDR block. If this parameter is not specified, an IP address will be automatically assigned from the ELB CIDR block.

                                                                                        -

                                                                                        This parameter is available only for clusters of v1.23.11-r0, v1.25.6-r0, v1.27.3-r0, or later versions.

                                                                                        +

                                                                                        Subnet where a load balancer is located. The subnet must belong to the VPC where the cluster resides.

                                                                                        +

                                                                                        If this parameter is not specified, the ELB load balancer and the cluster are in the same subnet.

                                                                                        +

                                                                                        This field can be specified only for clusters of v1.21 or later.

                                                                                        available_zone

                                                                                        +

                                                                                        vip_address

                                                                                        Yes

                                                                                        +

                                                                                        No

                                                                                        Array of strings

                                                                                        +

                                                                                        String

                                                                                        AZ where the load balancer is located.

                                                                                        +

                                                                                        Private IP address of the load balancer. Only IPv4 addresses are supported.

                                                                                        +

                                                                                        The IP address must be in the ELB CIDR block. If this parameter is not specified, an IP address will be automatically assigned from the ELB CIDR block.

                                                                                        +

                                                                                        This parameter is available only in clusters of v1.23.11-r0, v1.25.6-r0, v1.27.3-r0, or later versions.

                                                                                        +

                                                                                        available_zone

                                                                                        +

                                                                                        Yes

                                                                                        +

                                                                                        Array of strings

                                                                                        +

                                                                                        AZ where the load balancer is located.

                                                                                        +

                                                                                        You can obtain all supported AZs by getting the AZ list.

                                                                                        This parameter is available only for dedicated load balancers.

                                                                                        l4_flavor_name

                                                                                        +

                                                                                        l4_flavor_name

                                                                                        Yes

                                                                                        +

                                                                                        Yes

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Flavor name of the layer-4 load balancer.

                                                                                        +

                                                                                        Flavor name of the layer-4 load balancer.

                                                                                        +

                                                                                        You can obtain all supported types by getting the flavor list.

                                                                                        This parameter is available only for dedicated load balancers.

                                                                                        l7_flavor_name

                                                                                        +

                                                                                        l7_flavor_name

                                                                                        No

                                                                                        +

                                                                                        No

                                                                                        String

                                                                                        +

                                                                                        String

                                                                                        Flavor name of the layer-7 load balancer.

                                                                                        +

                                                                                        Flavor name of the layer-7 load balancer.

                                                                                        +

                                                                                        You can obtain all supported types by getting the flavor list.

                                                                                        This parameter is available only for dedicated load balancers. The value of this parameter must be the same as that of l4_flavor_name, that is, both are elastic specifications or fixed specifications.

                                                                                        elb_virsubnet_ids

                                                                                        +

                                                                                        elb_virsubnet_ids

                                                                                        No

                                                                                        +

                                                                                        No

                                                                                        Array of strings

                                                                                        +

                                                                                        Array of strings

                                                                                        Subnet where the backend server of the load balancer is located. If this parameter is left blank, the default cluster subnet is used. Load balancers occupy different number of subnet IP addresses based on their specifications. Do not use the subnet CIDR blocks of other resources (such as clusters and nodes) as the load balancer CIDR block.

                                                                                        +

                                                                                        Subnet where the backend server of the load balancer is located. If this parameter is left blank, the default cluster subnet is used. Load balancers occupy different number of subnet IP addresses based on their specifications. Do not use the subnet CIDR blocks of other resources (such as clusters and nodes) as the load balancer CIDR block.

                                                                                        This parameter is available only for dedicated load balancers.

                                                                                        Example:

                                                                                        "elb_virsubnet_ids": [
                                                                                        @@ -351,6 +448,16 @@
                                                                                          ]

                                                                                        ipv6_vip_virsubnet_id

                                                                                        +

                                                                                        No

                                                                                        +

                                                                                        String

                                                                                        +

                                                                                        Specifies the ID of the IPv6 subnet where the load balancer resides. IPv6 must be enabled for the corresponding subnet. This parameter is mandatory only when the dual-stack clusters are used.

                                                                                        +

                                                                                        This parameter is available only for dedicated load balancers.

                                                                                        +
                                                                                        @@ -358,7 +465,7 @@
                                                                                        diff --git a/docs/cce/umn/cce_10_0702.html b/docs/cce/umn/cce_10_0702.html index a8e7d4e2..98817d9d 100644 --- a/docs/cce/umn/cce_10_0702.html +++ b/docs/cce/umn/cce_10_0702.html @@ -68,6 +68,27 @@

                                                                                        Scheduling Workloads

                                                                                        Resource utilization-based scheduling

                                                                                        +

                                                                                        Scheduling policies are optimized for computing resources to effectively reduce resource fragments on each node and maximize computing resource utilization.

                                                                                        +

                                                                                        Resource Usage-based Scheduling

                                                                                        +

                                                                                        Priority-based scheduling

                                                                                        +

                                                                                        Scheduling policies are customized based on service importance and priorities to guarantee the resources of key services.

                                                                                        +

                                                                                        Priority-based Scheduling

                                                                                        +

                                                                                        AI performance-based scheduling

                                                                                        +

                                                                                        Scheduling policies are configured based on the nature and resource usage of AI tasks to increase the throughput of cluster services and improve service performance.

                                                                                        +

                                                                                        AI Performance-based Scheduling

                                                                                        +

                                                                                        NUMA affinity scheduling

                                                                                        Volcano targets to lift the limitation to make scheduler NUMA topology aware so that:

                                                                                        diff --git a/docs/cce/umn/cce_10_0721.html b/docs/cce/umn/cce_10_0721.html index ca77812b..a831f62f 100644 --- a/docs/cce/umn/cce_10_0721.html +++ b/docs/cce/umn/cce_10_0721.html @@ -1,13 +1,13 @@

                                                                                        Overview

                                                                                        -

                                                                                        +

                                                                                        Volcano is a Kubernetes-based batch processing platform that supports machine learning, deep learning, bioinformatics, genomics, and other big data applications. It provides general-purpose, high-performance computing capabilities, such as job scheduling, heterogeneous chip management, and job running management.

                                                                                        Volcano Scheduler

                                                                                        Volcano Scheduler is a pod scheduling component, which consists of a series of actions and plugins. Actions should be executed in every step. Plugins provide the action algorithm details in different scenarios. Volcano Scheduler features high scalability. You can specify actions and plugins as needed.

                                                                                        -
                                                                                        Figure 1 Volcano Scheduler workflow
                                                                                        +
                                                                                        Figure 1 Volcano Scheduler workflow

                                                                                        The working process of Volcano Scheduler is as follows:

                                                                                        1. Identify and cache the job submitted by the client.
                                                                                        2. Start a periodical session. A scheduling cycle begins.
                                                                                        3. Send jobs that are not scheduled the to-be-scheduled queue of the session.
                                                                                        4. Traverse all jobs to be scheduled and perform actions such as enqueue, allocate, preempt, reclaim, and backfill in the configured sequence to find the most appropriate node for each job. Bind the job to the node. The specific algorithm logic executed in action depends on the implementation of each function in the registered plugin.
                                                                                        5. Close the session.
                                                                                        -

                                                                                        Custom Volcano Resources

                                                                                        • A PodGroup is a custom Volcano resource type. It is a group of pods with strong association and is mainly used in batch scheduling, for example, ps and worker tasks in TensorFlow.
                                                                                        • A Queue contains a group of PodGroups. It is also the basis for the PodGroups to obtain cluster resources.
                                                                                        • Volcano Job (vcjob for short) is a custom job resource type. Different from Kubernetes Jobs, vcjob supports specified scheduler, the minimum number of running pods, tasks, lifecycle management, specified queues, and priority-based scheduling. Volcano Job is more suitable for high-performance computing scenarios such as machine learning, big data, and scientific computing.
                                                                                        +

                                                                                        Custom Volcano Resources

                                                                                        • A pod group is a custom Volcano resource type. It is a group of pods with strong association and is mainly used in batch scheduling, for example, ps and worker tasks in TensorFlow.
                                                                                        • A Queue contains a group of PodGroups. It is also the basis for the PodGroups to obtain cluster resources.
                                                                                        • Volcano Job (vcjob for short) is a custom job resource type. Different from Kubernetes Jobs, vcjob supports specified scheduler, the minimum number of running pods, tasks, lifecycle management, specified queues, and priority-based scheduling. Volcano Job is more suitable for high-performance computing scenarios such as machine learning, big data, and scientific computing.
                                                                                        diff --git a/docs/cce/umn/cce_10_0722.html b/docs/cce/umn/cce_10_0722.html index e5b319d8..d72a512e 100644 --- a/docs/cce/umn/cce_10_0722.html +++ b/docs/cce/umn/cce_10_0722.html @@ -3,7 +3,7 @@

                                                                                        Scheduling Workloads

                                                                                        Volcano is a Kubernetes-based batch processing platform with high-performance general computing capabilities like task scheduling engine, heterogeneous chip management, and task running management. It provides end users with computing frameworks from multiple domains such as AI, big data, gene, and rendering. It also offers job scheduling, job management, and queue management for computing applications.

                                                                                        Kubernetes typically uses its default scheduler to schedule workloads. To use Volcano, specify Volcano for your workloads. For details about the Kubernetes scheduler, see Specify schedulers for pods.

                                                                                        -

                                                                                        Constraints

                                                                                        When a large number of workloads are scheduled, Volcano prints a large number of logs. In this case, you can use Volcano with LTS. Otherwise, the disk space of the node where Volcano resides may be used up. For details, see Collecting Data Plane Logs.

                                                                                        +

                                                                                        Constraints

                                                                                        When a large number of workloads are scheduled, Volcano prints a large number of logs. In this case, you can use Volcano with LTS. Otherwise, the disk space of the node where Volcano resides may be used up. For details, see Collecting Container Logs.

                                                                                        Using Volcano

                                                                                        When using Volcano to schedule workloads, you only need to configure schedulerName in the spec field of the pod and set the parameter to volcano. The following is an example:
                                                                                        apiVersion: apps/v1
                                                                                         kind: Deployment
                                                                                        diff --git a/docs/cce/umn/cce_10_0727.html b/docs/cce/umn/cce_10_0727.html
                                                                                        index 9837fa78..3e8bdbf2 100644
                                                                                        --- a/docs/cce/umn/cce_10_0727.html
                                                                                        +++ b/docs/cce/umn/cce_10_0727.html
                                                                                        @@ -5,37 +5,37 @@
                                                                                         

                                                                                        Constraints

                                                                                        To enable AS, the CCE Cluster Autoscaler add-on must be installed in the target cluster.

                                                                                        Procedure

                                                                                        1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                                        2. In the navigation pane, choose Nodes. On the Node Pools tab, locate the row containing the target node pool and click Auto Scaling.

                                                                                          • If the auto scaling add-on has not been installed, configure add-on parameters based on service requirements, click Install, and wait until the add-on is installed. For details about add-on configurations, see CCE Cluster Autoscaler.
                                                                                          • If the auto scaling add-on has been installed, directly configure auto scaling policies.
                                                                                          -

                                                                                        3. Configure auto scaling policies.

                                                                                          AS Configuration

                                                                                          -
                                                                                          • Customize scale-out rules.: Click Add Rule. In the dialog box displayed, configure parameters. You can add multiple node scaling rules, a maximum of one CPU usage-based rule, and one memory usage-based rule. The total number of rules cannot exceed 10.
                                                                                            The following table lists custom rules. -
                                                                                            - - diff --git a/docs/cce/umn/cce_10_0730.html b/docs/cce/umn/cce_10_0730.html index bc01503f..897a272b 100644 --- a/docs/cce/umn/cce_10_0730.html +++ b/docs/cce/umn/cce_10_0730.html @@ -1,12 +1,40 @@ -

                                                                                            Configuring Timeout for an ELB Ingress

                                                                                            -

                                                                                            The ELB ingresses support the following timeout settings:

                                                                                            -
                                                                                            • Idle timeout setting for client connections: Maximum duration for keeping a connection when no client request is received. If no request is received during this period, the load balancer closes the connection and establishes a new one with the client when the next request arrives.
                                                                                            • Timeout setting for waiting for a request from a client: If the client fails to send a request header to the load balancer within the timeout duration or the interval for sending body data exceeds a specified period, the load balancer will close the connection.
                                                                                            • Timeout setting for waiting for a response from a backend server: If the backend server fails to respond after the timeout duration elapses, the load balancer will stop waiting and return HTTP 504 Gateway Timeout to the client.
                                                                                            -

                                                                                            Constraints

                                                                                            • This feature takes effect only in the following versions:
                                                                                              • v1.19: v1.19.16-r30 or later
                                                                                              • v1.21: v1.21.10-r10 or later
                                                                                              • v1.23: v1.23.8-r10 or later
                                                                                              • v1.25: v1.25.3-r10 or later
                                                                                              -
                                                                                            • The timeout can be configure for Ingresses only when dedicated load balancers are used.
                                                                                            • If you delete the timeout configuration during Ingress update, the timeout configuration on the existing listeners will be retained.
                                                                                            +

                                                                                            Configuring Timeout for a LoadBalancer Ingress

                                                                                            +

                                                                                            LoadBalancer ingresses support the following timeout settings:

                                                                                            +
                                                                                            • Idle timeout setting for client connections: Maximum duration for keeping a connection when no client request is received. If no request is received during this period, the load balancer closes the connection and establishes a new one with the client when the next request arrives.
                                                                                            • Timeout for waiting for a request from a client: If the client fails to send a request header to the load balancer during the timeout duration or the interval for sending body data exceeds a specified period, the load balancer will release the connection.
                                                                                            • Timeout setting for waiting for a response from a backend server: If the backend server fails to respond during the timeout duration, the load balancer will stop waiting and return HTTP 504 Gateway Timeout to the client.
                                                                                            +

                                                                                            Constraints

                                                                                            • The following table lists the scenarios where timeout can be configured for a Service. +
                                                                                            Table 1 Custom rules

                                                                                            Rule Type

                                                                                            +

                                                                                          • Configure auto scaling policies.

                                                                                            AS Configuration

                                                                                            +
                                                                                            • Customized Rule: Click Add Rule. In the dialog box displayed, configure parameters. You can add multiple node scaling policies, a maximum of one CPU usage-based rule, and one memory usage-based rule. The total number of rules cannot exceed 10.
                                                                                              The following table lists custom rules. +
                                                                                              - - - - -
                                                                                              Table 1 Custom rules

                                                                                              Rule Type

                                                                                              Configuration

                                                                                              +

                                                                                              Configuration

                                                                                              Metric-based

                                                                                              +

                                                                                              Metric-based

                                                                                              • Trigger: Select CPU allocation rate or Memory allocation rate and enter a value. The value must be greater than the scale-in percentage configured in the auto scaling add-on.
                                                                                                NOTE:
                                                                                                • Resource allocation (%) = Resources requested by pods in the node pool/Resources allocatable to pods in the node pool
                                                                                                • If multiple rules meet the conditions, the rules are executed in either of the following modes:

                                                                                                  If rules based on the CPU allocation rate and memory allocation rate are configured and two or more rules meet the scale-out conditions, the rule that will add the most nodes will be executed.

                                                                                                  -

                                                                                                  If a rule based on the CPU allocation rate and a periodic rule are configured and they both meet the scale-out conditions, one of them will be executed randomly. The rule executed first (rule A) changes the node pool to the scaling state. As a result, the other rule (rule B) cannot be executed. After rule A is executed and the node pool status becomes normal, rule B will not be executed.

                                                                                                  -
                                                                                                • If rules based on the CPU allocation rate and memory allocation rate are configured, the policy detection period varies with the processing logic of each loop of the Autoscaler add-on. A scale-out is triggered once the conditions are met, but it is constrained by other factors such as the cooldown period and node pool status.
                                                                                                • When the number of nodes in the cluster reaches the upper limit, or the CPU or memory usage reaches the upper limit of the autoscaler add-on, node scale-out will not be triggered.
                                                                                                +
                                                                                              • Trigger: Select CPU allocation rate or Memory allocation rate and enter a value. The value must be greater than the scale-in percentage configured in the auto scaling add-on.
                                                                                                NOTE:
                                                                                                • Resource allocation (%) = Resources requested by pods in the node pool/Resources allocatable to pods in the node pool
                                                                                                • If multiple rules meet the conditions, the rules are executed in either of the following modes:

                                                                                                  If rules based on the CPU allocation rate and memory allocation rate are configured and two or more rules meet the scale-out conditions, the rule that will add the most nodes will be executed.

                                                                                                  +

                                                                                                  If a rule based on the CPU allocation rate and a periodic rule are configured and they both meet the scale-out conditions, one of them will be executed randomly. The rule executed first (rule A) changes the node pool to the scaling state. As a result, the other rule (rule B) cannot be executed. After rule A is executed and the node pool status becomes normal, rule B will not be executed.

                                                                                                  +
                                                                                                • If rules based on the CPU allocation rate and memory allocation rate are configured, the policy detection period varies with the processing logic of each loop of the Autoscaler add-on. A scale-out is triggered once the conditions are met, but it is constrained by other factors such as the cooldown period and node pool status.
                                                                                                • When the number of nodes in the cluster reaches the upper limit, or the CPU or memory usage reaches the upper limit of the autoscaler add-on, node scale-out will not be triggered.
                                                                                                -
                                                                                              • Action: Configure an action to be performed when the triggering condition is met.
                                                                                                • Custom: Add a specified number of nodes to a node pool.
                                                                                                • Auto calculation: When the trigger condition is met, nodes are automatically added and the allocation rate is restored to a value lower than the threshold. The formula is as follows:

                                                                                                  Number of nodes to be added = [Resource request of pods in the node pool/(Available resources of a single node x Target allocation rate)] – Number of current nodes + 1

                                                                                                  +
                                                                                                • Action: Configure an action to be performed when the triggering condition is met.
                                                                                                  • Custom: Add a specified number of nodes to a node pool.
                                                                                                  • Auto calculation: When the trigger condition is met, nodes are automatically added and the allocation rate is restored to a value lower than the threshold. The formula is as follows:

                                                                                                    Number of nodes to be added = [Resource request of pods in the node pool/(Available resources of a single node x Target allocation rate)] – Number of current nodes + 1

                                                                                              Periodic

                                                                                              +

                                                                                              Periodic

                                                                                              • Trigger Time: You can select a specific time every day, every week, every month, or every year.
                                                                                              • Action: specifies an action to be carried out when the trigger time is reached. A specified number of nodes will be added to the node pool.
                                                                                              +
                                                                                              • Trigger Time: You can select a specific time every day, every week, every month, or every year.
                                                                                              • Action: specifies an action to be carried out when the trigger time is reached. A specified number of nodes will be added to the node pool.
                                                                                              -
                                                                                            • Nodes: The number of nodes in a node pool will always be within the range during auto scaling.
                                                                                            • Cooldown Period: a period during which the nodes added in the current node pool cannot be scaled in.
                                                                                            -

                                                                                            AS Object

                                                                                            -

                                                                                            Specifications: Configure whether to enable auto scaling for node flavors in a node pool.

                                                                                            +
                                                                                          • Nodes: The number of nodes in a node pool will always be within the range during auto scaling.
                                                                                          • Cooldown Period: a period during which the nodes added in the current node pool cannot be scaled in.
                                                                                          • +

                                                                                            AS Object

                                                                                            +

                                                                                            Specification selection: Configure whether to enable auto scaling for node flavors in a node pool.

                                                                                          • Click OK.
                                                                                          • diff --git a/docs/cce/umn/cce_10_0729.html b/docs/cce/umn/cce_10_0729.html index 1f940c53..b7162ede 100644 --- a/docs/cce/umn/cce_10_0729.html +++ b/docs/cce/umn/cce_10_0729.html @@ -1,11 +1,33 @@ -

                                                                                            Configuring Timeout for a LoadBalancer Service

                                                                                            +

                                                                                            Configuring Timeout for a Service

                                                                                            LoadBalancer Services allow you to configure timeout, which is the maximum duration for keeping a connection if no request is received from the client. If no request is received during this period, the load balancer closes the connection and establishes a new one with the client when the next request arrives.

                                                                                            -

                                                                                            Constraints

                                                                                            • This feature takes effect only in the following versions:
                                                                                              • v1.19: v1.19.16-r30 or later
                                                                                              • v1.21: v1.21.10-r10 or later
                                                                                              • v1.23: v1.23.8-r10 or later
                                                                                              • v1.25: v1.25.3-r10 or later
                                                                                              -
                                                                                            • The timeout can be configured only for the LoadBalancer Services using dedicated load balancers.
                                                                                            • If you delete the timeout configuration during Service update, the timeout configuration on the existing listeners will be retained.
                                                                                            +

                                                                                            Constraints

                                                                                            • The following table lists the scenarios where timeout can be configured for a Service. +
                                                                                              + + + + + + + + + + + +

                                                                                              Timeout Type

                                                                                              +

                                                                                              Load Balancer Type

                                                                                              +

                                                                                              Restrictions

                                                                                              +

                                                                                              Cluster Version

                                                                                              +

                                                                                              Idle timeout

                                                                                              +

                                                                                              Dedicated

                                                                                              +

                                                                                              None

                                                                                              +
                                                                                              • v1.19: v1.19.16-r30 or later
                                                                                              • v1.21: v1.21.10-r10 or later
                                                                                              • v1.23: v1.23.8-r10 or later
                                                                                              • v1.25: v1.25.3-r10 or later
                                                                                              • Other clusters of later versions
                                                                                              +
                                                                                              -

                                                                                              Procedure

                                                                                              Use annotations to configure timeout. The following shows an example:
                                                                                              apiVersion: v1 
                                                                                              +
                                                                                            • If you delete the timeout configuration during a Service update, the timeout configuration on the existing listeners will be retained.
                                                                                            +
                                                                                            +

                                                                                            Using kubectl

                                                                                            Use annotations to configure timeout. The following shows an example:
                                                                                            apiVersion: v1 
                                                                                             kind: Service 
                                                                                             metadata: 
                                                                                               annotations:
                                                                                            @@ -38,9 +60,9 @@ spec:
                                                                                             
                                                                                             

                                                                                            No

                                                                                            Integer

                                                                                            +

                                                                                            String

                                                                                            Timeout for client connections. If there are no requests reaching the load balancer after the timeout duration elapses, the load balancer will disconnect the connection with the client and establish a new connection when there is a new request.

                                                                                            +

                                                                                            Timeout for client connections. If there are no requests reaching the load balancer during the timeout duration, the load balancer will disconnect the connection from the client and establish a new connection when there is a new request.

                                                                                            Value:

                                                                                            • For TCP listeners, the value ranges from 10 to 4000 (in seconds). The default value is 300.
                                                                                            • For HTTP, HTTPS, and TERMINATED_HTTPS listeners, the value ranges from 10 to 4000 (in seconds). The default value is 60.
                                                                                            • For UDP listeners, the value ranges from 10 to 4000 (in seconds). The default value is 300.
                                                                                            + + + + + + + + + + + + + + + +

                                                                                            Timeout Type

                                                                                            +

                                                                                            Load Balancer Type

                                                                                            +

                                                                                            Supported Cluster Version

                                                                                            +

                                                                                            Idle Timeout

                                                                                            +

                                                                                            Dedicated

                                                                                            +
                                                                                            • v1.19: v1.19.16-r30 or later
                                                                                            • v1.21: v1.21.10-r10 or later
                                                                                            • v1.23: v1.23.8-r10 or later
                                                                                            • v1.25: v1.25.3-r10 or later
                                                                                            • Other clusters of later versions
                                                                                            +

                                                                                            Request Timeout

                                                                                            +

                                                                                            Dedicated

                                                                                            +

                                                                                            Response Timeout

                                                                                            +

                                                                                            Dedicated

                                                                                            +
                                                                                            -

                                                                                            Configuring Timeout

                                                                                            An Ingress configuration example is as follows:

                                                                                            +
                                                                                          • If you delete the timeout configuration during an ingress update, the timeout configuration on the existing listeners will be retained.
                                                                                          +
                                                                                        +

                                                                                        Using kubectl

                                                                                        An ingress configuration example is as follows:

                                                                                        apiVersion: networking.k8s.io/v1
                                                                                         kind: Ingress
                                                                                         metadata:
                                                                                        @@ -49,9 +77,9 @@ spec:
                                                                                         

                                                                                        No

                                                                                        Integer

                                                                                        +

                                                                                        String

                                                                                        Timeout for client connections. If there are no requests reaching the load balancer after the timeout duration elapses, the load balancer will disconnect the connection with the client and establish a new connection when there is a new request.

                                                                                        +

                                                                                        Timeout for client connections. If there are no requests reaching the load balancer during the timeout duration, the load balancer will disconnect the connection from the client and establish a new connection when there is a new request.

                                                                                        The value ranges from 0 to 4000 (in seconds). The default value is 60.

                                                                                        No

                                                                                        Integer

                                                                                        +

                                                                                        String

                                                                                        Timeout duration for waiting for a response from a client. There are two situations:

                                                                                        -
                                                                                        • If the client fails to send a request header to the load balancer within the timeout duration, the request will be interrupted.
                                                                                        • If the interval between two consecutive request bodies reaching the load balancer is greater than the timeout duration, the connection will be disconnected.
                                                                                        +

                                                                                        Timeout for waiting for a request from a client. There are two situations:

                                                                                        +
                                                                                        • If the client fails to send a request header to the load balancer during the timeout duration, the request will be interrupted.
                                                                                        • If the interval between two consecutive request bodies reaching the load balancer is greater than the timeout duration, the connection will be disconnected.

                                                                                        The value ranges from 1 to 300 (in seconds). The default value is 60.

                                                                                        No

                                                                                        Integer

                                                                                        +

                                                                                        String

                                                                                        Timeout duration for waiting for a response from a backend server. After a request is forwarded to the backend server, if the backend server does not respond within the duration specified by member_timeout, the load balancer will stop waiting and return HTTP 504 Gateway Timeout.

                                                                                        The value ranges from 1 to 300 (in seconds). The default value is 60.

                                                                                        @@ -83,7 +111,7 @@ spec:
                                                                                        diff --git a/docs/cce/umn/cce_10_0766.html b/docs/cce/umn/cce_10_0766.html index 176bed3b..eca3c0dc 100644 --- a/docs/cce/umn/cce_10_0766.html +++ b/docs/cce/umn/cce_10_0766.html @@ -1,25 +1,25 @@ -

                                                                                        Descheduler

                                                                                        -

                                                                                        Scheduling in a cluster is the process of binding pending pods to nodes, and is performed by a component called kube-scheduler or Volcano scheduler. The scheduler uses a series of algorithms to compute the optimal node for running pods. However, Kubernetes clusters are dynamic and their state changes over time. For example, if a node needs to be maintained, all pods on the node will be evicted to other nodes. After the maintenance is complete, the evicted pods will not automatically return back to the node because descheduling will not be triggered once a pod is bound to a node. Due to these changes, the load of a cluster may be unbalanced after the cluster runs for a period of time.

                                                                                        -

                                                                                        CCE has resolved this issue by using the Volcano scheduler to evict pods that do not comply with the configured policy so that pods can be rescheduled. In this way, the cluster load is balanced and resource fragmentation is minimized.

                                                                                        -

                                                                                        Descheduling Features

                                                                                        Load-aware Descheduling

                                                                                        +

                                                                                        Descheduling

                                                                                        +

                                                                                        Scheduling in a cluster is the process of binding pending pods to nodes, and is performed by a component called kube-scheduler or Volcano Scheduler. The scheduler uses a series of algorithms to compute the optimal node for running pods. However, Kubernetes clusters are dynamic and their state changes over time. For example, if a node needs to be maintained, all pods on the node will be evicted to other nodes. After the maintenance is complete, the evicted pods will not automatically return back to the node because descheduling will not be triggered once a pod is bound to a node. Due to these changes, the load of a cluster may be unbalanced after the cluster runs for a period of time.

                                                                                        +

                                                                                        CCE has resolved this issue by using Volcano Scheduler to evict pods that do not comply with the configured policy so that pods can be rescheduled. In this way, the cluster load is balanced and resource fragmentation is minimized.

                                                                                        +

                                                                                        Features

                                                                                        Load-aware Descheduling

                                                                                        During Kubernetes cluster management, over-utilized nodes are due to high CPU or memory usage, which affects the stable running of pods on these nodes and increases the probability of node faults. To dynamically balance the resource usage between nodes in a cluster, a cluster resource view is required based on node monitoring metrics. During cluster management, real-time monitoring can be used to detect issues such as high resource usage on a node, node faults, and excessive number of pods on a node so that the system can take measures promptly, for example, by migrating some pods from an over-utilized node to under-utilized nodes.

                                                                                        -
                                                                                        Figure 1 Load-aware descheduling
                                                                                        +
                                                                                        Figure 1 Load-aware descheduling

                                                                                        When using this add-on, ensure the highThresholds value is greater than the lowThresholds value. Otherwise, the descheduler cannot work.

                                                                                        • Appropriately utilized node: a node whose resource usage is greater than or equal to 30% and less than or equal to 80%. The resource usage of appropriately utilized nodes is within the expected range.
                                                                                        • Over-utilized node: a node whose resource usage is higher than 80%. Some pods will be evicted from over-utilized nodes to reduce its resource usage to be less than or equal to 80%. The descheduler will schedule the evicted pods to under-utilized nodes.
                                                                                        • Under-utilized node: a node whose resource usage is lower than 30%.

                                                                                        HighNodeUtilization

                                                                                        -

                                                                                        This policy finds nodes that are under-utilized and evicts pods from the nodes in the hope that these pods will be scheduled compactly into fewer nodes. This policy must be used with the binpack policy of the Volcano scheduler or the MostAllocated policy of the kube-scheduler scheduler. Thresholds can be configured for CPU and memory.

                                                                                        +

                                                                                        This policy finds nodes that are under-utilized and evicts pods from the nodes in the hope that these pods will be scheduled compactly into fewer nodes. This policy must be used with the bin packing policy of Volcano Scheduler or the MostAllocated policy of the kube-scheduler scheduler. Thresholds can be configured for CPU and memory.

                                                                                        -

                                                                                        Prerequisites

                                                                                        +

                                                                                        Prerequisites

                                                                                        -

                                                                                        Constraints

                                                                                        • Pods need to be rescheduled using a scheduler, and no scheduler can label pods or nodes. Therefore, an evicted pod might be rescheduled to the original node.
                                                                                        • Descheduling does not support anti-affinity between pods. An evicted pod is in anti-affinity relationship with other running pods. Therefore, the scheduler may still schedule the pod back to the node where the pod was evicted from.
                                                                                        • When configuring load-aware descheduling, you are required to enable load-aware scheduling on the Volcano scheduler. When configuring HighNodeUtilization, you are required to enable binpack scheduling on the Volcano scheduler.
                                                                                        +

                                                                                        Constraints

                                                                                        • Pods need to be rescheduled using a scheduler, and no scheduler can label pods or nodes. Therefore, an evicted pod might be rescheduled to the original node.
                                                                                        • Descheduling does not support anti-affinity between pods. An evicted pod is in anti-affinity relationship with other running pods. Therefore, the scheduler may still schedule the pod back to the node where the pod was evicted from.
                                                                                        • When configuring load-aware descheduling, you are required to enable load-aware scheduling on Volcano Scheduler. When configuring HighNodeUtilization, you are required to enable bin packing on Volcano Scheduler.
                                                                                        -

                                                                                        Configuring a Load-aware Descheduler Policy

                                                                                        When configuring a load-aware descheduler policy, do as follows to enable load-aware descheduling on the Volcano scheduler:

                                                                                        -
                                                                                        1. Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane, locate Volcano Scheduler on the right, and click Install or Edit.
                                                                                        2. In the Parameters area, modify Advanced Settings to configure the load-aware descheduler policy and enable the usage add-on (an open-source Volcano add-on).

                                                                                          {
                                                                                          +

                                                                                          Configuring a Load-aware Descheduling Policy

                                                                                          When configuring a load-aware descheduling policy, do as follows to enable load-aware scheduling on Volcano Scheduler:

                                                                                          +
                                                                                          1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Settings and click the Scheduling tab on the right side of the page. Then, enable load-aware scheduling.
                                                                                          2. In the navigation pane, choose Add-ons. Locate Volcano Scheduler on the right and click Install or Edit.
                                                                                          3. In the Parameters area, modify Advanced Settings to configure the load-aware descheduling policy. The following shows a configuration example for Volcano 1.11.21 or later:

                                                                                            {
                                                                                               "colocation_enable": "",
                                                                                               "default_scheduler_conf": {
                                                                                            -    "actions": "allocate, backfill",
                                                                                            +    "actions": "allocate, backfill, preempt",
                                                                                                 "tiers": [
                                                                                                   {
                                                                                                     "plugins": [
                                                                                            @@ -32,16 +32,6 @@
                                                                                                       },
                                                                                                       {
                                                                                                         "name": "conformance"
                                                                                            -          },
                                                                                            -          {
                                                                                            -            "name": "usage",
                                                                                            -            "arguments": {
                                                                                            -              "usage.weight": 5,
                                                                                            -              "thresholds": {
                                                                                            -                "CPUUsageAvg.5m": 60,
                                                                                            -                "MEMUsageAvg.5m": 65
                                                                                            -              }
                                                                                            -            }
                                                                                                       }
                                                                                                     ]
                                                                                                   },
                                                                                            @@ -56,6 +46,19 @@
                                                                                                       },
                                                                                                       {
                                                                                                         "name": "nodeorder"
                                                                                            +          },
                                                                                            +          {
                                                                                            +            "name": "usage",
                                                                                            +            "enablePredicate": true,
                                                                                            +            "arguments": {
                                                                                            +              "usage.weight": 5,
                                                                                            +              "cpu.weight": 1,
                                                                                            +              "memory.weight": 1,
                                                                                            +              "thresholds": {
                                                                                            +                "cpu": 80,
                                                                                            +                "mem": 80
                                                                                            +              }
                                                                                            +            }
                                                                                                       }
                                                                                                     ]
                                                                                                   },
                                                                                            @@ -111,6 +114,7 @@
                                                                                                             "exclude": ["kube-system"]
                                                                                                           },
                                                                                                           "metrics": {
                                                                                            +                "type": "prometheus_adaptor"
                                                                                                           },
                                                                                                           "targetThresholds": {
                                                                                                             "cpu": 80,
                                                                                            @@ -136,53 +140,53 @@
                                                                                               "deschedulingInterval": "10m"
                                                                                             }
                                                                                            -
                                                                                            Table 1 Key parameters of a cluster descheduler policy

                                                                                            Parameter

                                                                                            +
                                                                                            - - - - - - -
                                                                                            Table 1 Key parameters of a cluster descheduling policy

                                                                                            Parameter

                                                                                            Description

                                                                                            +

                                                                                            Description

                                                                                            descheduler_enable

                                                                                            +

                                                                                            descheduler_enable

                                                                                            Whether to enable a cluster descheduler policy.

                                                                                            -
                                                                                            • true: The cluster descheduler policy is enabled.
                                                                                            • false: The cluster descheduler policy is disabled.
                                                                                            +

                                                                                            Whether to enable a cluster descheduling policy.

                                                                                            +
                                                                                            • true: The cluster descheduling policy is enabled.
                                                                                            • false: The cluster descheduling policy is disabled.

                                                                                            deschedulingInterval

                                                                                            +

                                                                                            deschedulingInterval

                                                                                            Descheduling period.

                                                                                            +

                                                                                            Descheduling period.

                                                                                            deschedulerPolicy

                                                                                            +

                                                                                            deschedulerPolicy

                                                                                            Cluster descheduler policy. For details, see Table 2.

                                                                                            +

                                                                                            Cluster descheduling policy. For details, see Table 2.

                                                                                            -
                                                                                            Table 2 deschedulerPolicy parameters

                                                                                            Parameter

                                                                                            +
                                                                                            - - - - - - -
                                                                                            Table 2 deschedulerPolicy parameters

                                                                                            Parameter

                                                                                            Description

                                                                                            +

                                                                                            Description

                                                                                            profiles.[].plugins.balance.enable.[]

                                                                                            +

                                                                                            profiles.[].plugins.balance.enable.[]

                                                                                            Descheduler policy for a cluster.

                                                                                            -

                                                                                            LoadAware: a load-aware descheduler policy is used.

                                                                                            +

                                                                                            Descheduling policy for a cluster.

                                                                                            +

                                                                                            LoadAware: a load-aware descheduling policy is used.

                                                                                            profiles.[].pluginConfig.[].name

                                                                                            +

                                                                                            profiles.[].pluginConfig.[].name

                                                                                            Configuration of a load-aware descheduler policy. Options:

                                                                                            -
                                                                                            • DefaultEvictor: default eviction policy
                                                                                            • LoadAware: a load-aware descheduler policy
                                                                                            +

                                                                                            Configuration of a load-aware descheduling policy. Options:

                                                                                            +
                                                                                            • DefaultEvictor: default eviction policy
                                                                                            • LoadAware: a load-aware descheduling policy

                                                                                            profiles.[].pluginConfig.[].args

                                                                                            +

                                                                                            profiles.[].pluginConfig.[].args

                                                                                            Descheduler policy configuration of a cluster.

                                                                                            +

                                                                                            Descheduling policy configuration of a cluster.

                                                                                            • Configurations for the DefaultEvictor policy:
                                                                                              • ignorePvcPods: whether PVC pods should be ignored or evicted. Value true indicates that the pods are ignored, and value false indicates that the pods are evicted. This configuration does not differentiate PVC types (local PVs, SFS, or EVS).
                                                                                              • nodeFit: whether to consider the existing scheduling configurations such as node affinity and taint on the node during descheduling. Value true indicates that the existing scheduling configurations will be considered, and value false indicates that those will be ignored.
                                                                                              • priorityThreshold: priority setting. If the priority of a pod is greater than or equal to the value of this parameter, the pod will not be evicted. Example:
                                                                                                {
                                                                                                   "value": 100
                                                                                                 }
                                                                                                @@ -190,7 +194,11 @@
                                                                                              • Configurations for the LoadAware policy:
                                                                                                • evictableNamespaces: namespaces where the eviction policy takes effect. The default value is the namespaces other than kube-system. Example:
                                                                                                  {
                                                                                                     "exclude": ["kube-system"]
                                                                                                   }
                                                                                                  -
                                                                                                • metrics: monitoring data collection mode. Prometheus can be used to obtain monitoring data.
                                                                                                  For Volcano 1.11.5 to 1.11.16, use Prometheus to obtain monitoring data. You need to enter the IP address of the Prometheus server. The following is an example:
                                                                                                  {
                                                                                                  +
                                                                                                • metrics: how monitoring data is obtained. Either the Custom Metrics API (prometheus_adaptor) or Prometheus can be used.
                                                                                                  For Volcano 1.11.17 and later versions, use Custom Metrics API to obtain monitoring data. The following is an example:
                                                                                                  {
                                                                                                  +  "type": "prometheus_adaptor"
                                                                                                  +}
                                                                                                  +
                                                                                                  +
                                                                                                  For Volcano 1.11.5 to 1.11.16, use Prometheus to obtain monitoring data. You need to enter the IP address of the Prometheus server. The following is an example:
                                                                                                  {
                                                                                                     "address": "http://10.247.119.103:9090",
                                                                                                     "type": "prometheus"
                                                                                                   }
                                                                                                  @@ -212,11 +220,11 @@

                                                                                                • Click OK.
                                                                                                • -

                                                                                                  Configuring a HighNodeUtilization Policy

                                                                                                  When configuring a HighNodeUtilization policy, do as follows to enable the binpack scheduling policy on the Volcano scheduler:

                                                                                                  -
                                                                                                  1. Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane, locate Volcano Scheduler on the right, and click Install or Edit.
                                                                                                  2. In the Parameters area, modify Advanced Settings to configure the HighNodeUtilization policy.

                                                                                                    {
                                                                                                    +

                                                                                                    Configuring a HighNodeUtilization Policy

                                                                                                    When configuring a HighNodeUtilization policy, do as follows to enable the bin packing policy on Volcano Scheduler:

                                                                                                    +
                                                                                                    1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Settings and click the Scheduling tab on the right side of the page. Then, enable bin packing. For details, see Bin Packing.
                                                                                                    2. In the navigation pane, choose Add-ons. Locate Volcano Scheduler on the right and click Install or Edit.
                                                                                                    3. In the Parameters area, modify Advanced Settings to configure the HighNodeUtilization policy.

                                                                                                      {
                                                                                                         "colocation_enable": "",
                                                                                                         "default_scheduler_conf": {
                                                                                                      -    "actions": "allocate, backfill",
                                                                                                      +    "actions": "allocate, backfill, preempt",
                                                                                                           "tiers": [
                                                                                                             {
                                                                                                               "plugins": [
                                                                                                      @@ -323,54 +331,54 @@
                                                                                                         "deschedulingInterval": "10m"
                                                                                                       }
                                                                                                      -
                                                                                                      Table 3 Key parameters of a cluster descheduler policy

                                                                                                      Parameter

                                                                                                      +
                                                                                                      - - - - - - -
                                                                                                      Table 3 Key parameters of a cluster descheduling policy

                                                                                                      Parameter

                                                                                                      Description

                                                                                                      +

                                                                                                      Description

                                                                                                      descheduler_enable

                                                                                                      +

                                                                                                      descheduler_enable

                                                                                                      Whether to enable a cluster descheduler policy.

                                                                                                      -
                                                                                                      • true: The cluster descheduler policy is enabled.
                                                                                                      • false: The cluster descheduler policy is disabled.
                                                                                                      +

                                                                                                      Whether to enable a cluster descheduling policy.

                                                                                                      +
                                                                                                      • true: The cluster descheduling policy is enabled.
                                                                                                      • false: The cluster descheduling policy is disabled.

                                                                                                      deschedulingInterval

                                                                                                      +

                                                                                                      deschedulingInterval

                                                                                                      Descheduling period.

                                                                                                      +

                                                                                                      Descheduling period.

                                                                                                      deschedulerPolicy

                                                                                                      +

                                                                                                      deschedulerPolicy

                                                                                                      Cluster descheduler policy. For details, see Table 4.

                                                                                                      +

                                                                                                      Cluster descheduling policy. For details, see Table 4.

                                                                                                      -
                                                                                                      Table 4 deschedulerPolicy parameters

                                                                                                      Parameter

                                                                                                      +
                                                                                                      - - - - - - - - diff --git a/docs/cce/umn/cce_bestpractice_0003.html b/docs/cce/umn/cce_bestpractice_0003.html index 233f80e9..026c696b 100644 --- a/docs/cce/umn/cce_bestpractice_0003.html +++ b/docs/cce/umn/cce_bestpractice_0003.html @@ -5,7 +5,7 @@

                                                                                                      No recoding or re-architecting is required. You only need to pack the entire application into a container image and deploy the container image on CCE.

                                                                                                      Introduction

                                                                                                      In this example, the enterprise management application is developed by enterprise A. This application is provided for third-party enterprises for use, and enterprise A is responsible for application maintenance.

                                                                                                      When a third-party enterprise needs to use this application, a suit of Tomcat application and MongoDB database must be deployed for the third-party enterprise. The MySQL database, used to store data of third-party enterprises, is provided by enterprise A.

                                                                                                      -
                                                                                                      Figure 1 Application architecture
                                                                                                      +
                                                                                                      Figure 1 Application architecture

                                                                                                      As shown in Figure 1, the application is a standard Tomcat application, and its backend interconnects with MongoDB and MySQL databases. For this type of applications, there is no need to split the architecture. The entire application is built as an image, and the MongoDB database is deployed in the same image as the Tomcat application. In this way, the application can be deployed or upgraded through the image.

                                                                                                      • Interconnecting with the MongoDB database for storing user files.
                                                                                                      • Interconnecting with the MySQL database for storing third-party enterprise data. The MySQL database is an external cloud database.
                                                                                                      diff --git a/docs/cce/umn/cce_bestpractice_00035.html b/docs/cce/umn/cce_bestpractice_00035.html index ea382973..8db74b6a 100644 --- a/docs/cce/umn/cce_bestpractice_00035.html +++ b/docs/cce/umn/cce_bestpractice_00035.html @@ -4,9 +4,9 @@

                                                                                                      In containers, multiple types of proxy servers may exist between a client and the container servers. After an external request is forwarded for multiple times, the source IP address of the client cannot be transmitted to the containers. As a result, Services in the containers cannot obtain the real source IP addresses of the client.

                                                                                                      Description

                                                                                                      Layer-7 forwarding:

                                                                                                      Ingresses: If this access mode is used, the client's source IP address is saved in the X-Forwarded-For field of the HTTP header by default. No other configuration is required.

                                                                                                      -
                                                                                                      • The LoadBalancer Ingresses use ELB for Layer 7 network access between the Internet and internal network (in the same VPC) based on the ELB service.
                                                                                                      +
                                                                                                      • LoadBalancer Ingresses use ELB for Layer 7 network access between the Internet and internal network (in the same VPC) based on the ELB service.

                                                                                                      Layer-4 forwarding:

                                                                                                      -
                                                                                                      • LoadBalancer: Use ELB to achieve load balancing. You can manually enable the Transfer Client IP Address option for TCP and UDP listeners of shared load balancers. By default, the Transfer Client IP Address option is enabled for TCP and UDP listeners of dedicated load balancers. You do not need to manually enable it.
                                                                                                      • NodePort: The container port is mapped to the node port. If the cluster-level affinity is selected, access requests will be forwarded through the node and the client source IP address cannot be obtained. If the node-level affinity is selected, access requests will not be forwarded and the client source IP address can be obtained.
                                                                                                      +
                                                                                                      • LoadBalancer: Use ELB to achieve load balancing. You can manually enable the Transfer Client IP Address option for TCP and UDP listeners of shared load balancers. By default, the Transfer Client IP Address option is enabled for TCP and UDP listeners of dedicated load balancers. You do not need to manually enable it.
                                                                                                      • NodePort: The container port is mapped to the node port. If the cluster-level affinity is selected, access requests will be forwarded through the node and the client source IP address cannot be obtained. If the node-level affinity is selected, access requests will not be forwarded and the client source IP address can be obtained.

                                                                                                      ELB Ingress

                                                                                                      For the ELB Ingresses (using HTTP- or HTTPS-compliant), the function of obtaining the source IP addresses of the client is enabled by default. No other operation is required.

                                                                                                      The real IP address is placed in the X-Forwarded-For HTTP header field by the load balancer in the following format:

                                                                                                      @@ -18,7 +18,7 @@
                                                                                                      ...
                                                                                                       10.0.0.7 - - [17/Aug/2023:01:30:11 +0000] "GET / HTTP/1.1" 200 19 "http://114.114.114.114:9421/" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.203" "100.125.**.**"

                                                                                                      100.125.**.** specifies the CIDR block of the load balancer, indicating that the traffic is forwarded through the load balancer.

                                                                                                      -

                                                                                                    4. Go to the ELB console and enable the function of obtaining the client IP address of the listener corresponding to the load balancer. Transparent transmission of source IP addresses is enabled for dedicated load balancers by default. You do not need to manually enable this function.

                                                                                                      1. Log in to the ELB console.
                                                                                                      2. Click in the upper left corner of the management console and select a region and a project.
                                                                                                      3. Click Service List. Under Networking, click Elastic Load Balance.
                                                                                                      4. On the Load Balancers page, click the name of the load balancer.
                                                                                                      5. Click the Listeners tab, locate the row containing the target listener, and click Edit. If modification protection exists, disable the protection on the basic information page of the listener and try again.
                                                                                                      6. Enable Transfer Client IP Address.
                                                                                                      +

                                                                                                    5. Go to the ELB console and enable the function of obtaining the client IP address of the listener corresponding to the load balancer. Transparent transmission of source IP addresses is enabled for dedicated load balancers by default. You do not need to manually enable this function.

                                                                                                      1. Log in to the ELB console.
                                                                                                      2. Click in the upper left corner of the management console and select a region and a project.
                                                                                                      3. Click Service List. Under Networking, click Elastic Load Balance.
                                                                                                      4. On the Load Balancers page, click the name of the load balancer.
                                                                                                      5. Click the Listeners tab, locate the row containing the target listener, and click Edit. If modification protection exists, disable the protection on the basic information page of the listener and try again.
                                                                                                      6. Enable Transfer Client IP Address.

                                                                                                    6. (Perform this step only for Nginx ingresses.) Edit the nginx-ingress add-on. In the nginx configuration parameter area, configure the configuration fields and information. For details about the parameter range, see community document. After the configuration is complete, update the add-on.

                                                                                                      {
                                                                                                           "enable-real-ip": "true",
                                                                                                           "forwarded-for-header": "X-Forwarded-For",
                                                                                                      @@ -38,15 +38,15 @@
                                                                                                       
                                                                                                    7. VPC and Container Tunnel Network Models

                                                                                                      To obtain source IP addresses, perform the following steps:

                                                                                                      -
                                                                                                      1. When creating a LoadBalancer Service on the CCE console, set Service Affinity to Node level instead of Cluster level.

                                                                                                        -

                                                                                                      2. Go to the ELB console and enable the function of obtaining the client IP address of the listener corresponding to the load balancer. Transparent transmission of source IP addresses is enabled for dedicated load balancers by default. You do not need to manually enable this function.

                                                                                                        1. Log in to the ELB console.
                                                                                                        2. Click in the upper left corner of the management console and select a region and a project.
                                                                                                        3. Click Service List. Under Networking, click Elastic Load Balance.
                                                                                                        4. On the Load Balancers page, click the name of the load balancer.
                                                                                                        5. Click the Listeners tab, locate the row containing the target listener, and click Edit. If modification protection exists, disable the protection on the basic information page of the listener and try again.
                                                                                                        6. Enable Transfer Client IP Address.
                                                                                                        +
                                                                                                        1. When creating a LoadBalancer Service on the CCE console, set Service Affinity to Node-level instead of Cluster-level.

                                                                                                          +

                                                                                                        2. Go to the ELB console and enable the function of obtaining the client IP address of the listener corresponding to the load balancer. Transparent transmission of source IP addresses is enabled for dedicated load balancers by default. You do not need to manually enable this function.

                                                                                                          1. Log in to the ELB console.
                                                                                                          2. Click in the upper left corner of the management console and select a region and a project.
                                                                                                          3. Click Service List. Under Networking, click Elastic Load Balance.
                                                                                                          4. On the Load Balancers page, click the name of the load balancer.
                                                                                                          5. Click the Listeners tab, locate the row containing the target listener, and click Edit. If modification protection exists, disable the protection on the basic information page of the listener and try again.
                                                                                                          6. Enable Transfer Client IP Address.

                                                                                                        Cloud Native Network 2.0 Model (CCE Turbo Clusters)

                                                                                                        -

                                                                                                        In the Cloud Native Network 2.0 model, when a shared load balancer is used for load balancing, the service affinity cannot be set to Node level. As a result, source IP addresses cannot be obtained. To obtain a source IP address, you must use a dedicated load balancer. External access to the container does not need to pass through the forwarding plane.

                                                                                                        -

                                                                                                        By default, transparent transmission of source IP addresses is enabled for dedicated load balancers. You do not need to manually enable Transfer Client IP Address on the ELB console. Instead, you only need to select a dedicated load balancer when creating an ENI LoadBalancer Service on the CCE console.

                                                                                                        +

                                                                                                        In the Cloud Native Network 2.0 model, when a shared load balancer is used for load balancing, the service affinity cannot be set to Node-level. As a result, source IP addresses cannot be obtained. To obtain a source IP address, you must use a dedicated load balancer. External access to the container does not need to pass through the forwarding plane.

                                                                                                        +

                                                                                                        By default, transparent transmission of source IP addresses is enabled for dedicated load balancers. You do not need to manually enable Transfer Client IP Address on the ELB console. Instead, you only need to select a dedicated load balancer when creating an ENI LoadBalancer Service on the CCE console.

                                                                                                      -

                                                                                                      NodePort

                                                                                                      Set the service affinity of a NodePort Service to Node level instead of Cluster level. That is, set spec.externalTrafficPolicy of the Service to Local.

                                                                                                      +

                                                                                                      NodePort

                                                                                                      Set the service affinity of a NodePort Service to Node-level instead of Cluster-level. That is, set spec.externalTrafficPolicy of the Service to Local.

                                                                                                      When a node (using Cloud Native Network 2.0) accesses a NodePort Service, source IP addresses can be obtained only when hostNetwork is enabled for workloads.

                                                                                                      diff --git a/docs/cce/umn/cce_bestpractice_0004.html b/docs/cce/umn/cce_bestpractice_0004.html index 550224e1..02c7e5a5 100644 --- a/docs/cce/umn/cce_bestpractice_0004.html +++ b/docs/cce/umn/cce_bestpractice_0004.html @@ -2,7 +2,7 @@

                                                                                                      Containerization Process

                                                                                                      The following figure illustrates the process of containerizing an application.

                                                                                                      -
                                                                                                      Figure 1 Process of containerizing an application
                                                                                                      +
                                                                                                      Figure 1 Process of containerizing an application
                                                                                                      diff --git a/docs/cce/umn/cce_bestpractice_0009.html b/docs/cce/umn/cce_bestpractice_0009.html index 8707b3f6..876aff1a 100644 --- a/docs/cce/umn/cce_bestpractice_0009.html +++ b/docs/cce/umn/cce_bestpractice_0009.html @@ -11,7 +11,7 @@

                                                                                                      Procedure

                                                                                                      1. Log in as the root user to the device running Docker.
                                                                                                      2. Enter the apptest directory.

                                                                                                        cd apptest

                                                                                                        ll

                                                                                                        Ensure that files used to build the image are stored in the same directory.

                                                                                                        -

                                                                                                        +

                                                                                                      3. Build an image.

                                                                                                        docker build -t apptest .

                                                                                                      4. Upload the image to SWR. For details, see Uploading an Image Through the Client.
                                                                                                      diff --git a/docs/cce/umn/cce_bestpractice_0010.html b/docs/cce/umn/cce_bestpractice_0010.html index 0100479c..83424856 100644 --- a/docs/cce/umn/cce_bestpractice_0010.html +++ b/docs/cce/umn/cce_bestpractice_0010.html @@ -39,9 +39,9 @@
                                                                                                      Table 4 deschedulerPolicy parameters

                                                                                                      Parameter

                                                                                                      Description

                                                                                                      +

                                                                                                      Description

                                                                                                      profiles.[].plugins.balance.enable.[]

                                                                                                      +

                                                                                                      profiles.[].plugins.balance.enable.[]

                                                                                                      Descheduler policy for a cluster.

                                                                                                      +

                                                                                                      Descheduling policy for a cluster.

                                                                                                      HighNodeUtilization: the policy for minimizing CPU and memory fragments is used.

                                                                                                      profiles.[].pluginConfig.[].name

                                                                                                      +

                                                                                                      profiles.[].pluginConfig.[].name

                                                                                                      Configuration of a load-aware descheduler policy. Options:

                                                                                                      +

                                                                                                      Configuration of a load-aware descheduling policy. Options:

                                                                                                      • DefaultEvictor: default eviction policy
                                                                                                      • HighNodeUtilization: policy for minimizing CPU and memory fragments

                                                                                                      profiles.[].pluginConfig.[].args

                                                                                                      +

                                                                                                      profiles.[].pluginConfig.[].args

                                                                                                      Descheduler policy configuration of a cluster.

                                                                                                      -
                                                                                                      • Configurations for the DefaultEvictor policy:
                                                                                                        • ignorePvcPods: whether PVC pods should be ignored or evicted. Value true indicates that the pods are ignored, and value false indicates that the pods are evicted.
                                                                                                        • nodeFit: whether to consider the existing scheduling configurations such as node affinity and taint on the node during descheduling. Value true indicates that the existing scheduling configurations will be considered, and value false indicates that those will be ignored.
                                                                                                        • priorityThreshold: priority setting. If the priority of a pod is greater than or equal to the value of this parameter, the pod will not be evicted. Example:
                                                                                                          {
                                                                                                          +

                                                                                                      Descheduling policy configuration of a cluster.

                                                                                                      +
                                                                                                      • Configurations for the DefaultEvictor policy:
                                                                                                        • ignorePvcPods: whether PVC pods should be ignored or evicted. Value true indicates that the pods are ignored, and value false indicates that the pods are evicted. This configuration does not differentiate PVC types (local PVs, SFS, or EVS).
                                                                                                        • nodeFit: whether to consider the existing scheduling configurations such as node affinity and taint on the node during descheduling. Value true indicates that the existing scheduling configurations will be considered, and value false indicates that those will be ignored.
                                                                                                        • priorityThreshold: priority setting. If the priority of a pod is greater than or equal to the value of this parameter, the pod will not be evicted. Example:
                                                                                                          {
                                                                                                             "value": 100
                                                                                                           }
                                                                                                        @@ -391,18 +399,18 @@

                                                                                                      • Click OK.
                                                                                                      • Use Cases

                                                                                                        HighNodeUtilization

                                                                                                        -
                                                                                                        1. Check the nodes in a cluster. It is found that some nodes are under-utilized.

                                                                                                          -
                                                                                                        2. Edit the Volcano parameters to enable the descheduler and set the CPU and memory usage thresholds to 25. When the CPU and memory usage of a node is less than 25%, pods on the node will be evicted.

                                                                                                          -
                                                                                                        3. After the policy takes effect, pods on the node with IP address 192.168.44.152 will be migrated to the node with IP address 192.168.54.65 for minimized resource fragments.

                                                                                                          +
                                                                                                          1. Check the nodes in a cluster. It is found that some nodes are under-utilized.

                                                                                                            +
                                                                                                          2. Edit the Volcano parameters to enable the descheduler and set the CPU and memory usage thresholds to 25. When the CPU and memory usage of a node is less than 25%, pods on the node will be evicted.

                                                                                                            +
                                                                                                          3. After the policy takes effect, pods on the node with IP address 192.168.44.152 will be migrated to the node with IP address 192.168.54.65 for minimized resource fragments.

                                                                                                        -

                                                                                                        Common Issues

                                                                                                        If an input parameter is incorrect, for example, the entered value is beyond the accepted value range or in an incorrect format, an event will be generated.

                                                                                                        -

                                                                                                        +

                                                                                                        Common Issues

                                                                                                        If an input parameter is incorrect, for example, the entered value is beyond the accepted value range or in an incorrect format, an event will be generated. In this case, modify the parameter setting as prompted.

                                                                                                        +

                                                                                                        diff --git a/docs/cce/umn/cce_10_0767.html b/docs/cce/umn/cce_10_0767.html index aaadf2b5..88bcb521 100644 --- a/docs/cce/umn/cce_10_0767.html +++ b/docs/cce/umn/cce_10_0767.html @@ -10,13 +10,13 @@

                                                                                                        Parameters:

                                                                                                        • Weight: weight of the soft affinity add-on in the node pool.
                                                                                                        • MaxNodeScore: maximum score (100) of a node.
                                                                                                        • haveLabel: whether the labels configured in the add-on are available on a node. If yes, the value is 1. If no, the value is 0.
                                                                                                        -

                                                                                                        Prerequisites

                                                                                                        +

                                                                                                        Prerequisites

                                                                                                        -

                                                                                                        Enabling Soft Affinity Scheduling for Volcano Node Pools

                                                                                                        1. Configure labels for affinity scheduling in the node pool.

                                                                                                          1. Log in to the CCE console.
                                                                                                          2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab.
                                                                                                          3. Click Update of the target node pool. On the page that is displayed, configure labels in the Kubernetes Label area.
                                                                                                          +

                                                                                                          Configuring Soft Affinity Scheduling for Volcano Node Pools

                                                                                                          1. Configure labels for affinity scheduling in the node pool.

                                                                                                            1. Log in to the CCE console.
                                                                                                            2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab.
                                                                                                            3. Click Update of the target node pool. On the page that is displayed, configure labels in the Kubernetes Label area.

                                                                                                          2. Choose Add-ons in the navigation pane, locate Volcano Scheduler on the right, click Install or Edit, and configure Volcano scheduler parameters in the Parameters area.

                                                                                                            {
                                                                                                                 "ca_cert": "",
                                                                                                                 "default_scheduler_conf": {
                                                                                                            -        "actions": "allocate, backfill",
                                                                                                            +        "actions": "allocate, backfill, preempt",
                                                                                                                     "tiers": [
                                                                                                                         {
                                                                                                                             "plugins": [
                                                                                                            @@ -92,7 +92,7 @@
                                                                                                             
                                                                                                          diff --git a/docs/cce/umn/cce_10_0768.html b/docs/cce/umn/cce_10_0768.html index 1c7a86c0..6a9dd0d5 100644 --- a/docs/cce/umn/cce_10_0768.html +++ b/docs/cce/umn/cce_10_0768.html @@ -1,13 +1,13 @@ -

                                                                                                          Resource Utilization-based Scheduling

                                                                                                          +

                                                                                                          Resource Usage-based Scheduling

                                                                                                      Reliability

                                                                                                      After the multi-master node mode is enabled, three master nodes will be created. If a master node is faulty, the cluster can still be available without affecting service functions. In commercial scenarios, it is advised to enable the multi-master node mode.

                                                                                                      +

                                                                                                      When creating a cluster, select a proper network model, such as container tunnel network or VPC network.

                                                                                                      +
                                                                                                      When creating a cluster, select a proper network model as needed.
                                                                                                      • Select VPC network or Tunnel network for your CCE standard cluster.
                                                                                                      • Select Cloud Native Network 2.0 for your CCE Turbo cluster.
                                                                                                      +

                                                                                                      Deployment

                                                                                                      -

                                                                                                    8. Create a cluster and a node.

                                                                                                      1. Log in to the CCE console. On the Clusters page, click Create Cluster and select the type of the cluster to be created.

                                                                                                        Configure cluster parameters and select the VPC created in 1.

                                                                                                        -
                                                                                                      2. Buy a node and select the key pair created in 1 as the login mode.
                                                                                                      -

                                                                                                    9. Deploy a workload on CCE.

                                                                                                      1. Log in to the CCE console, click the created cluster, choose Workloads in the navigation pane, and click Create Workload in the upper right corner.
                                                                                                      2. Configure the following parameters, and retain the default settings for other parameters:
                                                                                                        • Workload Name: Set it to apptest.
                                                                                                        • Pods: Set it to 1.
                                                                                                        +

                                                                                                      3. Create a cluster and a node.

                                                                                                        1. Log in to the CCE console. On the Clusters page, click Create Cluster and select the type for the cluster to be created.

                                                                                                          Configure cluster parameters and select the VPC created in 1.

                                                                                                          +
                                                                                                        2. Create a node and select the key pair created in 1 as the login option.
                                                                                                        +

                                                                                                      4. Deploy a workload on CCE.

                                                                                                        1. Log in to the CCE console and click the name of the cluster to access the cluster console. In the navigation pane, choose Workloads and click Create Workload.
                                                                                                        2. Configure the following parameters, and retain the default settings for other parameters:
                                                                                                          • Workload Name: Set it to apptest.
                                                                                                          • Pods: Set it to 1.
                                                                                                        3. In the Container Settings area, select the image uploaded in Building and Uploading an Image.
                                                                                                        4. In the Container Settings area, choose Environment Variables and add environment variables for interconnecting with the MySQL database. The environment variables are set in the startup script.

                                                                                                          In this example, interconnection with the MySQL database is implemented through configuring the environment variables. Determine whether to use environment variables based on your service requirements.

                                                                                                          @@ -77,7 +77,7 @@
                                                                                                        5. In the Container Settings area, choose Data Storage and configure cloud storage for persistent data storage.

                                                                                                          In this example, the MongoDB database is used and persistent data storage is also needed, so you need to configure cloud storage. Determine whether to use cloud storage based on your service requirements.

                                                                                                          The mounted path must be the same as the MongoDB storage path in the Docker startup script. For details, see the startup script. In this example, the path is /usr/local/mongodb/data.

                                                                                                          -
                                                                                                        6. In the Service Settings area, click to add a service, configure workload access parameters, and click OK.

                                                                                                          In this example, the application will be accessible from public networks by using an elastic IP address.

                                                                                                          +
                                                                                                        7. In the Service Settings area, click to add a service, configure workload access parameters, and click OK.

                                                                                                          In this example, the application will be accessible from public networks by using an elastic IP address.

                                                                                                          • Service Name: name of the application that can be accessed externally. In this example, this parameter is set to apptest.
                                                                                                          • Service Type: In this example, select NodePort.
                                                                                                          • Service Affinity
                                                                                                            • Cluster-level: The IP addresses and access ports of all nodes in a cluster can be used to access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
                                                                                                            • Node-level: Only the IP address and access port of the node where the workload is located can be used to access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
                                                                                                          • Port
                                                                                                            • Protocol: Set it to TCP.
                                                                                                            • Service Port: port for accessing the Service.
                                                                                                            • Container Port: port that the application will listen on the container. In this example, this parameter is set to 8080.
                                                                                                            • Node Port: Set it to Auto. The system automatically opens a real port on all nodes in the current cluster and then maps the port number to the container port.
                                                                                                            diff --git a/docs/cce/umn/cce_bestpractice_0013.html b/docs/cce/umn/cce_bestpractice_0013.html new file mode 100644 index 00000000..6677214b --- /dev/null +++ b/docs/cce/umn/cce_bestpractice_0013.html @@ -0,0 +1,19 @@ + + +

                                                                                                            Migrating Kubernetes Clusters to CCE

                                                                                                            +
                                                                                                            + + diff --git a/docs/cce/umn/cce_bestpractice_0308.html b/docs/cce/umn/cce_bestpractice_0014.html similarity index 55% rename from docs/cce/umn/cce_bestpractice_0308.html rename to docs/cce/umn/cce_bestpractice_0014.html index 9486d399..2049cf96 100644 --- a/docs/cce/umn/cce_bestpractice_0308.html +++ b/docs/cce/umn/cce_bestpractice_0014.html @@ -1,71 +1,71 @@ - +

                                                                                                            Planning Resources for the Target Cluster

                                                                                                            -

                                                                                                            CCE allows you to customize cluster resources to meet various service requirements. Table 1 lists the key performance parameters of a cluster and provides the planned values. You can set the parameters based on your service requirements. It is recommended that the performance configuration be the same as that of the source cluster.

                                                                                                            -

                                                                                                            After a cluster is created, the resource parameters marked with asterisks (*) in Table 1 cannot be modified.

                                                                                                            +

                                                                                                            CCE allows you to customize cluster resources to meet various service requirements. Table 1 lists the key performance parameters of a cluster and provides the planned values. You can set the parameters based on your service requirements. It is recommended that the performance configuration be the same as that of the source cluster.

                                                                                                            +

                                                                                                            After a cluster is created, the resource parameters marked with asterisks (*) in Table 1 cannot be modified.

                                                                                                            -
                                                                                                            Table 1 CCE cluster planning

                                                                                                            Resource

                                                                                                            +
                                                                                                            - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -74,7 +74,7 @@ diff --git a/docs/cce/umn/cce_bestpractice_00162.html b/docs/cce/umn/cce_bestpractice_00162.html index 50c88a71..9ceaa560 100644 --- a/docs/cce/umn/cce_bestpractice_00162.html +++ b/docs/cce/umn/cce_bestpractice_00162.html @@ -4,93 +4,93 @@

                                                                                                            CCE uses proprietary, high-performance container networking add-ons to support the tunnel network, Cloud Native 2.0 network, and VPC network models.

                                                                                                            After a cluster is created, the network model cannot be changed. Exercise caution when selecting a network model.

                                                                                                            -
                                                                                                            • Tunnel network: The container network is an overlay tunnel network on top of a VPC network and uses the VXLAN technology. This network model is applicable when there is no high requirements on performance. VXLAN encapsulates Ethernet packets as UDP packets for tunnel transmission. Though at some cost of performance, the tunnel encapsulation enables higher interoperability and compatibility with advanced features (such as network policy-based isolation), meeting the requirements of most applications.
                                                                                                              Figure 1 Container tunnel network
                                                                                                              -
                                                                                                            • VPC network: The container network uses VPC routing to integrate with the underlying network. This network model is applicable to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network. Each node is assigned a CIDR block of a fixed size. VPC networks are free from tunnel encapsulation overhead and outperform container tunnel networks. In addition, as VPC routing includes routes to node IP addresses and container network segment, container pods in the cluster can be directly accessed from outside the cluster.
                                                                                                              Figure 2 VPC network
                                                                                                              -
                                                                                                            • Cloud Native Network 2.0: The container network deeply integrates the elastic network interface (ENI) capability of VPC, uses the VPC CIDR block to allocate container addresses, and supports passthrough networking to containers through a load balancer.
                                                                                                              Figure 3 Cloud Native 2.0 Network
                                                                                                              +
                                                                                                              • Tunnel network: The container network is an overlay tunnel network on top of a VPC network and uses the VXLAN technology. This network model is applicable when there is no high requirements on performance. VXLAN encapsulates Ethernet packets as UDP packets for tunnel transmission. Though at some cost of performance, the tunnel encapsulation enables higher interoperability and compatibility with advanced features (such as network policy-based isolation), meeting the requirements of most applications.
                                                                                                                Figure 1 Container tunnel network
                                                                                                                +
                                                                                                              • VPC network: The container network uses VPC routing to integrate with the underlying network. This network model is applicable to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network. Each node is assigned a CIDR block of a fixed size. VPC networks are free from tunnel encapsulation overhead and outperform container tunnel networks. In addition, as VPC routing includes routes to node IP addresses and container network segment, container pods in the cluster can be directly accessed from outside the cluster.
                                                                                                                Figure 2 VPC network
                                                                                                                +
                                                                                                              • Cloud Native Network 2.0: The container network deeply integrates the elastic network interface (ENI) capability of VPC, uses the VPC CIDR block to allocate container addresses, and supports passthrough networking to containers through a load balancer.
                                                                                                                Figure 3 Cloud Native 2.0 network

                                                                                                              The following table lists the differences between the network models.

                                                                                                              -
                                                                                                            Table 1 CCE cluster planning

                                                                                                            Resource

                                                                                                            Key Performance Parameter

                                                                                                            +

                                                                                                            Key Performance Parameter

                                                                                                            Description

                                                                                                            +

                                                                                                            Description

                                                                                                            Example Value

                                                                                                            +

                                                                                                            Example Value

                                                                                                            Cluster

                                                                                                            +

                                                                                                            Cluster

                                                                                                            *Cluster Type

                                                                                                            +

                                                                                                            *Cluster Type

                                                                                                            • CCE cluster: supports VM nodes. You can run your containers in a secure and stable container runtime environment based on a high-performance network model.
                                                                                                            • CCE Turbo cluster: runs on a cloud native infrastructure that features software-hardware synergy to support passthrough networking, high security and reliability, and intelligent scheduling, and BMS nodes.
                                                                                                            +
                                                                                                            • CCE cluster: supports VM nodes. You can run your containers in a secure and stable container runtime environment based on a high-performance network model.
                                                                                                            • CCE Turbo cluster: runs on a cloud native infrastructure that features software-hardware synergy to support passthrough networking, high security and reliability, and intelligent scheduling, and BMS nodes.

                                                                                                            CCE cluster

                                                                                                            +

                                                                                                            CCE cluster

                                                                                                            *Network Model

                                                                                                            +

                                                                                                            *Network Model

                                                                                                            • VPC network: The container network uses VPC routing to integrate with the underlying network. This network model is applicable to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network.
                                                                                                            • Tunnel network: The container network is an overlay tunnel network on top of a VPC network and uses the VXLAN technology. This network model is applicable when there is no high requirements on performance.
                                                                                                            • Cloud Native Network 2.0: The container network deeply integrates the elastic network interface (ENI) capability of VPC, uses the VPC CIDR block to allocate container addresses, and supports passthrough networking to containers through a load balancer.
                                                                                                            +
                                                                                                            • VPC network: The container network uses VPC routing to integrate with the underlying network. This network model is applicable to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network.
                                                                                                            • Tunnel network: The container network is an overlay tunnel network on top of a VPC network and uses the VXLAN technology. This network model is applicable when there is no high requirements on performance.
                                                                                                            • Cloud Native Network 2.0: The container network deeply integrates the elastic network interface (ENI) capability of VPC, uses the VPC CIDR block to allocate container addresses, and supports passthrough networking to containers through a load balancer.

                                                                                                            VPC network

                                                                                                            +

                                                                                                            VPC network

                                                                                                            *Number of master nodes

                                                                                                            +

                                                                                                            *Number of master nodes

                                                                                                            • 3: Three master nodes will be created to deliver better DR performance. If one master node is faulty, the cluster can still be available without affecting service functions.
                                                                                                            • 1: A single master node will be created. This mode is not recommended in commercial scenarios.
                                                                                                            +
                                                                                                            • 3: Three master nodes will be created to deliver better DR performance. If one master node is faulty, the cluster can still be available without affecting service functions.
                                                                                                            • 1: A single master node will be created. This mode is not recommended in commercial scenarios.

                                                                                                            3

                                                                                                            +

                                                                                                            3

                                                                                                            Node

                                                                                                            +

                                                                                                            Node

                                                                                                            OS

                                                                                                            +

                                                                                                            OS

                                                                                                            • EulerOS
                                                                                                            • CentOS
                                                                                                            • Ubuntu
                                                                                                            +
                                                                                                            • EulerOS
                                                                                                            • CentOS
                                                                                                            • Ubuntu

                                                                                                            EulerOS

                                                                                                            +

                                                                                                            EulerOS

                                                                                                            Node Specifications (vary depending on the actual region)

                                                                                                            +

                                                                                                            Node Specifications (vary depending on the actual region)

                                                                                                            • General-purpose: provides a balance of computing, memory, and network resources. It is a good choice for many applications. General-purpose nodes can be used for web servers, workload development, workload testing, and small-scale databases.
                                                                                                            • Memory-optimized: provides higher memory capacity than general-purpose nodes and is suitable for relational databases, NoSQL, and other workloads that are both memory-intensive and data-intensive.
                                                                                                            • GPU-accelerated: provides powerful floating-point computing and is suitable for real-time, highly concurrent massive computing. Graphical processing units (GPUs) of P series are suitable for deep learning, scientific computing, and CAE. GPUs of G series are suitable for 3D animation rendering and CAD. GPU-accelerated nodes can be added only to clusters of v1.11 or later.
                                                                                                            • General computing-plus: provides stable performance and exclusive resources to enterprise-class workloads with high and stable computing performance.
                                                                                                            • Disk-intensive: supports local disk storage and provides high networking performance. It is designed for workloads requiring high throughput and data switching, such as big data workloads.
                                                                                                            +
                                                                                                            • General-purpose: provides a balance of computing, memory, and network resources. It is a good choice for many applications. General-purpose nodes can be used for web servers, workload development, workload testing, and small-scale databases.
                                                                                                            • Memory-optimized: provides higher memory capacity than general-purpose nodes and is suitable for relational databases, NoSQL, and other workloads that are both memory-intensive and data-intensive.
                                                                                                            • GPU-accelerated: provides powerful floating-point computing and is suitable for real-time, highly concurrent massive computing. Graphical processing units (GPUs) of P series are suitable for deep learning, scientific computing, and CAE. GPUs of G series are suitable for 3D animation rendering and CAD. GPU-accelerated nodes can be added only to clusters of v1.11 or later.
                                                                                                            • General computing-plus: provides stable performance and exclusive resources to enterprise-class workloads with high and stable computing performance.
                                                                                                            • Disk-intensive: supports local disk storage and provides high networking performance. It is designed for workloads requiring high throughput and data switching, such as big data workloads.

                                                                                                            General-purpose (node specifications: 4 vCPUs and 8 GiB memory)

                                                                                                            +

                                                                                                            General-purpose (node specifications: 4 vCPUs and 8 GiB memory)

                                                                                                            System Disk

                                                                                                            +

                                                                                                            System Disk

                                                                                                            • Common I/O: The backend storage media is SATA disks.
                                                                                                            • High I/O: The backend storage media is SAS disks.
                                                                                                            • Ultra-high I/O: The backend storage media is SSD disks.
                                                                                                            +
                                                                                                            • Common I/O: The backend storage media is SATA disks.
                                                                                                            • High I/O: The backend storage media is SAS disks.
                                                                                                            • Ultra-high I/O: The backend storage media is SSD disks.

                                                                                                            High I/O

                                                                                                            +

                                                                                                            High I/O

                                                                                                            Storage Type

                                                                                                            +

                                                                                                            Storage Type

                                                                                                            • EVS volumes: Mount an EVS volume to a container path. When containers are migrated, the attached EVS volumes are migrated accordingly. This storage mode is suitable for data that needs to be permanently stored.
                                                                                                            • SFS volumes: Create SFS volumes and mount them to a container path. The file system volumes created by the underlying SFS service can also be used. SFS volumes are applicable to persistent storage for frequent read/write in multiple workload scenarios, including media processing, content management, big data analysis, and workload analysis.
                                                                                                            • OBS volumes: Create OBS volumes and mount them to a container path. OBS volumes are applicable to scenarios such as cloud workload, data analysis, content analysis, and hotspot objects.
                                                                                                            • SFS Turbo volumes: Create SFS Turbo volumes and mount them to a container path. SFS Turbo volumes are fast, on-demand, and scalable, which makes them suitable for DevOps, containerized microservices, and enterprise office applications.
                                                                                                            +
                                                                                                            • EVS volumes: Mount an EVS volume to a container path. When containers are migrated, the attached EVS volumes are migrated accordingly. This storage mode is suitable for data that needs to be permanently stored.
                                                                                                            • SFS volumes: Create SFS volumes and mount them to a container path. The file system volumes created by the underlying SFS service can also be used. SFS volumes are applicable to persistent storage for frequent read/write in multiple workload scenarios, including media processing, content management, big data analysis, and workload analysis.
                                                                                                            • OBS volumes: Create OBS volumes and mount them to a container path. OBS volumes are applicable to scenarios such as cloud workload, data analysis, content analysis, and hotspot objects.
                                                                                                            • SFS Turbo volumes: Create SFS Turbo volumes and mount them to a container path. SFS Turbo volumes are fast, on-demand, and scalable, which makes them suitable for DevOps, containerized microservices, and enterprise office applications.

                                                                                                            EVS volumes

                                                                                                            +

                                                                                                            EVS volumes

                                                                                                            Table 1 Networking model comparison

                                                                                                            Dimension

                                                                                                            +
                                                                                                            - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/cce/umn/cce_bestpractice_00198.html b/docs/cce/umn/cce_bestpractice_00198.html index 36123ba3..97fa20ab 100644 --- a/docs/cce/umn/cce_bestpractice_00198.html +++ b/docs/cce/umn/cce_bestpractice_00198.html @@ -120,7 +120,7 @@ tmpfs tmpfs 1.8G 75M 1.8G 5% /tmp

                                                                                                          • Log in to the CCE console and click the cluster. In the navigation pane, choose Nodes. Click More > Sync Server Data in the row containing the target node.
                                                                                                          • Expanding the Capacity of a Data Disk Used by Container Engines

                                                                                                            CCE divides the data disk space for two parts by default. One part is used to store the Docker/containerd working directories, container images, and image metadata. The other is reserved for kubelet and emptyDir volumes. The available container engine space affects image pulls and container startup and running. This section uses Docker as an example to describe how to expand the container engine capacity.

                                                                                                            -
                                                                                                            1. Expand the capacity of the data disk on the EVS console.
                                                                                                            2. Log in to the CCE console and click the cluster. In the navigation pane, choose Nodes. Click More > Sync Server Data in the row containing the target node.
                                                                                                            3. Log in to the target node.
                                                                                                            4. Run lsblk to view the block device information of the node.

                                                                                                              A data disk is divided depending on the container storage Rootfs:

                                                                                                              +
                                                                                                              1. Expand the capacity of the data disk on the EVS console.
                                                                                                              2. Log in to the CCE console and click the cluster. In the navigation pane, choose Nodes. Click More > Sync Server Data in the row containing the target node.
                                                                                                              3. Log in to the target node.
                                                                                                              4. Run the lsblk command to check the block device information of the node.

                                                                                                                A data disk is divided depending on the container storage Rootfs:

                                                                                                                • Overlayfs: No independent thin pool is allocated. Image data is stored in the dockersys disk.
                                                                                                                  # lsblk
                                                                                                                   NAME                MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
                                                                                                                   vda                   8:0    0   50G  0 disk 
                                                                                                                  @@ -154,7 +154,7 @@ resize2fs /dev/vgpaas/dockersys

                                                                                                            -

                                                                                                            Expanding the Capacity of a Data Disk Used by kubelet

                                                                                                            CCE divides the data disk space for two parts by default. One part is used to store the Docker/containerd working directories, container images, and image metadata. The other is reserved for kubelet and emptyDir volumes. To expand the kubelet space, perform the following steps:

                                                                                                            +

                                                                                                            Expanding the Capacity of a Data Disk Used by kubelet

                                                                                                            CCE divides the data disk space for container engines and pods. The container engine space stores the Docker/containerd working directories, container images, and image metadata. The other is reserved for kubelet and emptyDir volumes. To expand the kubelet space, perform the following steps:

                                                                                                            1. Expand the capacity of the data disk on the EVS console.
                                                                                                            2. Log in to the CCE console and click the cluster. In the navigation pane, choose Nodes. Click More > Sync Server Data in the row containing the target node.
                                                                                                            3. Log in to the target node.
                                                                                                            4. Run lsblk to view the block device information of the node.

                                                                                                              # lsblk
                                                                                                               NAME                MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
                                                                                                               vda                   8:0    0   50G  0 disk 
                                                                                                              @@ -170,20 +170,16 @@ resize2fs /dev/vgpaas/kubernetes

                                                                                                              Expanding the Capacity of a Data Disk Used by Pod (basesize)

                                                                                                              1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                                                              2. Choose Nodes from the navigation pane.
                                                                                                              3. Click the Nodes tab, locate the row containing the target node, and choose More > Reset Node in the Operation column.

                                                                                                                Resetting a node may make unavailable the node-specific resources (such as local storage and workloads scheduled to this node). Exercise caution when performing this operation to avoid impact on running services.

                                                                                                              4. Click Yes.
                                                                                                              5. Reconfigure node parameters.

                                                                                                                If you need to adjust the container storage space, pay attention to the following configurations:

                                                                                                                -

                                                                                                                -
                                                                                                                Storage Settings: Click Expand next to the data disk to set the following parameters:
                                                                                                                • Allocate Disk Space: storage space used by the container engine to store the Docker/containerd working directory, container image data, and image metadata. Defaults to 90% of the data disk.
                                                                                                                • Allocate Pod Basesize: CCE allows you to set an upper limit for the disk space occupied by each workload pod (including the space occupied by container images). This setting prevents the pods from taking all the disk space available, which may cause service exceptions. It is recommended that the value be smaller than or equal to 80% of the container engine space.
                                                                                                                  • The capability of customizing pod basesize is related to the node OS and container storage rootfs.
                                                                                                                    • When the rootfs uses Device Mapper, the node supports custom pod basesize. The default storage space of a single container is 10 GiB.
                                                                                                                    • When the rootfs uses OverlayFS, most nodes do not support custom pod basesize. The storage space of a single container is not limited and defaults to the container engine space.

                                                                                                                      Only EulerOS 2.9 nodes in clusters of 1.19.16, 1.21.3, 1.23.3, and later versions support custom pod basesize.

                                                                                                                      -
                                                                                                                    -
                                                                                                                  • In the case of using Docker on EulerOS 2.9 nodes, basesize will not take effect if CAP_SYS_RESOURCE or privileged is configured for a container.
                                                                                                                  -
                                                                                                                  -
                                                                                                                -
                                                                                                                +

                                                                                                                Storage Settings: Click Expand next to the data disk to set the following parameters:

                                                                                                                +

                                                                                                                Space Allocation for Pods: indicates the base size of a pod. It is the maximum size that a workload's pods (including the container images) can grow to in the disk space. Proper settings can prevent pods from taking all the disk space available and avoid service exceptions. It is recommended that the value is less than or equal to 80% of the container engine space. This parameter is related to the node OS and container storage rootfs and is not supported in some scenarios. For details, see Data Disk Space Allocation.

                                                                                                                +

                                                                                                              6. After the node is reset, log in to the node and run the following command to access the container and check whether the container storage capacity has been expanded:

                                                                                                                docker exec -it container_id /bin/sh or kubectl exec -it container_id /bin/sh

                                                                                                                df -h

                                                                                                                -

                                                                                                                +

                                                                                                              Expanding a PVC

                                                                                                              Cloud storage:

                                                                                                              -
                                                                                                              • OBS and SFS: There is no storage restriction and capacity expansion is not required.
                                                                                                              • EVS:
                                                                                                                • You can expand the capacity of automatically created pay-per-use volumes on the console. The procedure is as follows:
                                                                                                                  1. Choose Storage in the navigation pane and click the PersistentVolumeClaims (PVCs) tab. Click More in the Operation column of the target PVC and select Scale-out.
                                                                                                                  2. Enter the capacity to be added and click OK.
                                                                                                                  +
                                                                                                                  • OBS and SFS: There is no storage restriction and capacity expansion is not required.
                                                                                                                  • EVS:
                                                                                                                    • You can expand the capacity of automatically created volumes on the console. The procedure is as follows:
                                                                                                                      1. Choose Storage in the navigation pane and click the PVCs tab. Click More in the Operation column of the target PVC and select Scale-out.
                                                                                                                      2. Enter the capacity to be added and click OK.
                                                                                                                  • For SFS Turbo, expand the capacity on the SFS console and then change the capacity in the PVC.
                                                                                                              diff --git a/docs/cce/umn/cce_bestpractice_00199.html b/docs/cce/umn/cce_bestpractice_00199.html index e2ca37ad..1258b637 100644 --- a/docs/cce/umn/cce_bestpractice_00199.html +++ b/docs/cce/umn/cce_bestpractice_00199.html @@ -3,14 +3,14 @@

                                                                                                              Mounting an Object Storage Bucket of a Third-Party Tenant

                                                                                                              This section describes how to mount OBS buckets and OBS parallel file systems (preferred) of third-party tenants.

                                                                                                              Application Scenarios

                                                                                                              The CCE cluster of a SaaS service provider needs to be mounted with the OBS bucket of a third-party tenant, as shown in Figure 1.

                                                                                                              -
                                                                                                              Figure 1 Mounting an OBS bucket of a third-party tenant
                                                                                                              +
                                                                                                              Figure 1 Mounting an OBS bucket of a third-party tenant
                                                                                                              1. The third-party tenant authorizes the SaaS service provider to access the OBS buckets or parallel file systems by setting the bucket policy and bucket ACL.
                                                                                                              2. The SaaS service provider statically imports the OBS buckets and parallel file systems of the third-party tenant.
                                                                                                              3. The SaaS service provider processes the service and writes the processing result (result file or result data) back to the OBS bucket of the third-party tenant.

                                                                                                              Precautions

                                                                                                              • Only parallel file systems and OBS buckets of third-party tenants in the same region can be mounted.
                                                                                                              • Only clusters where the everest add-on of v1.1.11 or later has been installed (the cluster version must be v1.15 or later) can be mounted with OBS buckets of third-party tenants.
                                                                                                              • The service platform of the SaaS service provider needs to manage the lifecycle of the third-party bucket PVs. When a PVC is deleted separately, the PV is not deleted. Instead, it will be retained. To do so, call the native Kubernetes APIs to create and delete static PVs.

                                                                                                              Authorizing the SaaS Service Provider to Access the OBS Buckets

                                                                                                              The following uses an OBS bucket as an example to describe how to set a bucket policy and bucket ACL to authorize the SaaS service provider. The configuration for an OBS parallel file system is the same.

                                                                                                              -
                                                                                                              1. Log in to the OBS console.
                                                                                                              2. In the bucket list, click a bucket name to access the Overview page.
                                                                                                              1. In the navigation pane, choose Permissions > Bucket Policies. On the displayed page, click Create to create a bucket policy.

                                                                                                                Figure 2 Creating a bucket policy
                                                                                                                -
                                                                                                                • Policy Mode: Select Customized.
                                                                                                                • Effect: Select Allow.
                                                                                                                • Principal: Select Include, select Cloud service user, and enter the account ID and user ID. The bucket policy is applied to the specified user.
                                                                                                                • Resources: Select the resources that can be operated.
                                                                                                                • Actions: Select the actions that can be operated.
                                                                                                                +
                                                                                                                1. Log in to the OBS console.
                                                                                                                2. In the bucket list, click a bucket name and access the Overview page.
                                                                                                                1. In the navigation pane, choose Permissions > Bucket Policies. On the displayed page, click Create to create a bucket policy.

                                                                                                                  Figure 2 Creating a bucket policy
                                                                                                                  +
                                                                                                                  • Policy Mode: Select Customized.
                                                                                                                  • Effect: Select Allow.
                                                                                                                  • Principal: Select Include, select Cloud service user, and enter the account ID and user ID. The bucket policy is applied to the specified user.
                                                                                                                  • Resources: Select the resources that can be operated.
                                                                                                                  • Actions: Select the actions that can be operated.

                                                                                                                2. In the navigation pane, choose Permissions > Bucket ACLs. In the right pane, click Add. Enter the account ID or account name of the authorized user, select Read and Write for Access to Bucket, select Read and Write for Access to ACL, and click OK.

                                                                                                              Statically Importing OBS Buckets and Parallel File Systems

                                                                                                              • Static PV of an OBS bucket:
                                                                                                                apiVersion: v1
                                                                                                                diff --git a/docs/cce/umn/cce_bestpractice_00221.html b/docs/cce/umn/cce_bestpractice_00221.html
                                                                                                                index 638363b2..6ef7fab4 100644
                                                                                                                --- a/docs/cce/umn/cce_bestpractice_00221.html
                                                                                                                +++ b/docs/cce/umn/cce_bestpractice_00221.html
                                                                                                                @@ -10,9 +10,11 @@
                                                                                                                 

                                                                                                                Configuration Method

                                                                                                                In the following example, only pods and Deployments in the test space can be viewed and added, and they cannot be deleted.

                                                                                                                1. Set the service account name to my-sa and namespace to test.

                                                                                                                  kubectl create sa my-sa -n test
                                                                                                                  -

                                                                                                                  +

                                                                                                                2. Configure the role table and assign operation permissions to different resources.

                                                                                                                  vi role-test.yaml
                                                                                                                  -
                                                                                                                  The content is as follows:
                                                                                                                  apiVersion: rbac.authorization.k8s.io/v1
                                                                                                                  +
                                                                                                                  The content is as follows:

                                                                                                                  In this example, the permission rules include the read-only permission (get/list/watch) of pods in the test namespace, and the read (get/list/watch) and create permissions of deployments.

                                                                                                                  +
                                                                                                                  +
                                                                                                                  apiVersion: rbac.authorization.k8s.io/v1
                                                                                                                   kind: Role
                                                                                                                   metadata:
                                                                                                                     annotations:
                                                                                                                  @@ -43,7 +45,7 @@ rules:
                                                                                                                   

                                                                                                                  Create a Role.

                                                                                                                  kubectl create -f role-test.yaml
                                                                                                                  -

                                                                                                                  +

                                                                                                                3. Create a RoleBinding and bind the service account to the role so that the user can obtain the corresponding permissions.

                                                                                                                  vi myrolebinding.yaml
                                                                                                                  The content is as follows:
                                                                                                                  apiVersion: rbac.authorization.k8s.io/v1
                                                                                                                   kind: RoleBinding
                                                                                                                  @@ -61,37 +63,46 @@ subjects:
                                                                                                                   

                                                                                                                  Create a RoleBinding.

                                                                                                                  kubectl create -f myrolebinding.yaml
                                                                                                                  -

                                                                                                                  -

                                                                                                                  The user information is configured. Now perform 4 to 6 to write the user information to the configuration file.

                                                                                                                  -

                                                                                                                4. Configure the cluster information.

                                                                                                                  1. Use the sa name my-sa to obtain the secret corresponding to the sa. In the following example, my-sa-token-z4967 in the first column is the secret name.
                                                                                                                  -
                                                                                                                  kubectl get secret -n test |grep my-sa
                                                                                                                  -

                                                                                                                  -
                                                                                                                  1. Decrypt the ca.crt file in the secret and export it.
                                                                                                                  -
                                                                                                                  kubectl get secret my-sa-token-5gpl4 -n test -oyaml |grep ca.crt: | awk '{print $2}' |base64 -d > /home/ca.crt
                                                                                                                  -
                                                                                                                  1. Set the cluster access mode. test-arm indicates the cluster to be accessed, 10.0.1.100 indicates the IP address of the API server in the cluster and /home/test.config indicates the path for storing the configuration file.
                                                                                                                    • If the internal API server address is used, run the following command:
                                                                                                                      kubectl config set-cluster test-arm --server=https://10.0.1.100:5443  --certificate-authority=/home/ca.crt  --embed-certs=true --kubeconfig=/home/test.config
                                                                                                                      -
                                                                                                                    • If the public API server address is used, run the following command:
                                                                                                                      kubectl config set-cluster test-arm --server=https://10.0.1.100:5443 --kubeconfig=/home/test.config --insecure-skip-tls-verify=true
                                                                                                                      +

                                                                                                                      +

                                                                                                                      The user information is configured. Now perform 5 to 7 to write the user information to the configuration file.

                                                                                                                      +

                                                                                                                    • Manually create a token that is valid for a long time for ServiceAccount.

                                                                                                                      vi my-sa-token.yaml
                                                                                                                      +
                                                                                                                      The content is as follows:
                                                                                                                      apiVersion: v1
                                                                                                                      +kind: Secret
                                                                                                                      +metadata:
                                                                                                                      +  name: my-sa-token-secret
                                                                                                                      +  namespace: test
                                                                                                                      +  annotations:
                                                                                                                      +    kubernetes.io/service-account.name: my-sa
                                                                                                                      +type: kubernetes.io/service-account-token
                                                                                                                      +
                                                                                                                      +

                                                                                                                      Create a token:

                                                                                                                      +
                                                                                                                      kubectl create -f my-sa-token.yaml
                                                                                                                      +

                                                                                                                    • Configure the cluster information.

                                                                                                                      1. Decrypt the ca.crt file in the secret and export it.
                                                                                                                      +
                                                                                                                      kubectl get secret my-sa-token-secret -n test -oyaml |grep ca.crt: | awk '{print $2}' |base64 -d > /home/ca.crt
                                                                                                                      +
                                                                                                                      1. Set a cluster access mode. test-arm specifies the cluster to be accessed. https://192.168.0.110:5443 specifies the apiserver IP address of the cluster. /home/test.config specifies the path for storing the configuration file.
                                                                                                                        • If the internal API server address is used, run the following command:
                                                                                                                          kubectl config set-cluster test-arm --server=https://192.168.0.110:5443  --certificate-authority=/home/ca.crt  --embed-certs=true --kubeconfig=/home/test.config
                                                                                                                          +
                                                                                                                        • If the public API server address is used, run the following command:
                                                                                                                          kubectl config set-cluster test-arm --server=https://192.168.0.110:5443 --kubeconfig=/home/test.config --insecure-skip-tls-verify=true
                                                                                                                        -

                                                                                                                        +

                                                                                                                      If you perform operations on a node in the cluster or the node that uses the configuration is a cluster node, do not set the path of kubeconfig to /root/.kube/config.

                                                                                                                      -

                                                                                                                      The cluster API server address is an intranet API server address. After an EIP is bound to the cluster, the cluster API server address can also be a public API server address.

                                                                                                                      +

                                                                                                                      By default, the apiserver IP address of the cluster is a private IP address. After an EIP is bound, you can use the public network IP address to access the apiserver.

                                                                                                                    • Configure the cluster authentication information.

                                                                                                                      1. Obtain the cluster token. (If the token is obtained in GET mode, run based64 -d to decode the token.)
                                                                                                                      -
                                                                                                                      token=$(kubectl describe secret my-sa-token-5gpl4 -n test | awk '/token:/{print $2}')
                                                                                                                      +
                                                                                                                      token=$(kubectl describe secret my-sa-token-secret -n test | awk '/token:/{print $2}')
                                                                                                                      1. Set the cluster user ui-admin.
                                                                                                                      kubectl config set-credentials ui-admin --token=$token --kubeconfig=/home/test.config
                                                                                                                      -

                                                                                                                      -

                                                                                                                    • Configure the context information for cluster authentication. ui-admin@test is the context name.

                                                                                                                      kubectl config set-context ui-admin@test --cluster=test-arm --user=ui-admin --kubeconfig=/home/test.config
                                                                                                                      -

                                                                                                                      -

                                                                                                                    • Set the context. For details about how to use the context, see Verification.

                                                                                                                      kubectl config use-context ui-admin@test --kubeconfig=/home/test.config
                                                                                                                      -

                                                                                                                      -

                                                                                                                      If you want to assign other users the above permissions to perform operations on the cluster, provide the generated configuration file /home/test.config to the user after performing step 6. The user must ensure that the host can access the API server address of the cluster. When performing step 7 on the host and using kubectl, the user must set the kubeconfig parameter to the path of the configuration file.

                                                                                                                      +

                                                                                                                      +

                                                                                                                    • Configure the context information for cluster authentication access. ui-admin@test specifies the context name.

                                                                                                                      kubectl config set-context ui-admin@test --cluster=test-arm --user=ui-admin --kubeconfig=/home/test.config
                                                                                                                      +

                                                                                                                      +

                                                                                                                    • Configure the context. For details about how to use the context, see Verification.

                                                                                                                      kubectl config use-context ui-admin@test --kubeconfig=/home/test.config
                                                                                                                      +

                                                                                                                      +

                                                                                                                      If you want to assign other users the above permissions to perform operations on the cluster, provide the generated configuration file /home/test.config to the user after performing step 7. The user must ensure that the host can access the API server address of the cluster. When performing step 8 on the host and using kubectl, the user must set the kubeconfig parameter to the path of the configuration file.

                                                                                                                Verification

                                                                                                                1. Pods in the test namespace cannot access pods in other namespaces.
                                                                                                                  kubectl get pod -n test --kubeconfig=/home/test.config
                                                                                                                  -

                                                                                                                  -
                                                                                                                2. Pods in the test namespace cannot be deleted.

                                                                                                                  +

                                                                                                                  +
                                                                                                                3. Pods in the test namespace cannot be deleted.

                                                                                                                Further Readings

                                                                                                                For more information about users and identity authentication in Kubernetes, see Authenticating.

                                                                                                                diff --git a/docs/cce/umn/cce_bestpractice_00226.html b/docs/cce/umn/cce_bestpractice_00226.html index aa3356a2..2e054242 100644 --- a/docs/cce/umn/cce_bestpractice_00226.html +++ b/docs/cce/umn/cce_bestpractice_00226.html @@ -145,7 +145,7 @@ spec: hostaliases-pod 1/1 Running 0 16m

                                                                                                              • Check whether the hostAliases functions properly.

                                                                                                                docker ps |grep hostaliases-pod

                                                                                                                docker exec -ti Container ID /bin/sh

                                                                                                                -

                                                                                                                +

                                                                                                            diff --git a/docs/cce/umn/cce_bestpractice_00228.html b/docs/cce/umn/cce_bestpractice_00228.html index e90b7cbd..267235c7 100644 --- a/docs/cce/umn/cce_bestpractice_00228.html +++ b/docs/cce/umn/cce_bestpractice_00228.html @@ -49,7 +49,7 @@ spec:
                                                                                                            deployment.apps/mysql created

                                                                                                          • Query the created Docker container on the node where the workload is running.

                                                                                                            docker ps -a|grep mysql

                                                                                                            The init container will exit after it runs to completion. The query result Exited (0) shows the exit status of the init container.

                                                                                                            -

                                                                                                            +

                                                                                                          • diff --git a/docs/cce/umn/cce_bestpractice_00231.html b/docs/cce/umn/cce_bestpractice_00231.html index 44de5abe..02b9a236 100644 --- a/docs/cce/umn/cce_bestpractice_00231.html +++ b/docs/cce/umn/cce_bestpractice_00231.html @@ -1,42 +1,56 @@

                                                                                                            Implementing Sticky Session Through Load Balancing

                                                                                                            -

                                                                                                            Concepts

                                                                                                            Session persistence is one of the most common while complex problems in load balancing.

                                                                                                            -

                                                                                                            Session persistence is also called sticky sessions. After the sticky session function is enabled, requests from the same client are distributed to the same backend ECS by the load balancer for better continuity.

                                                                                                            -

                                                                                                            In load balancing and sticky session, connection and session are two key concepts. When only load balancing is concerned, session and connection refer to the same thing.

                                                                                                            -

                                                                                                            Simply put, if a user needs to log in, it can be regarded as a session; otherwise, a connection.

                                                                                                            -

                                                                                                            The sticky session mechanism fundamentally conflicts with the basic functions of load balancing. A load balancer forwards requests from clients to multiple backend servers to avoid overload on a single server. However, sticky session requires that some requests be forwarded to the same server for processing. Therefore, select a proper sticky session mechanism based on the application environment.

                                                                                                            +

                                                                                                            Concepts

                                                                                                            Sticky sessions ensure continuity and consistency when you access applications. If a load balancer is deployed between a client and backend servers, connections may be forwarded to different servers for processing. Sticky sessions can resolve this issue. After sticky session is enabled, requests from the same client will be continuously distributed to the same backend server through load balancing.

                                                                                                            +

                                                                                                            For example, in most online systems that require user identity authentication, a user needs to interact with the server for multiple times to complete a session. These interactions require continuity. If sticky session is not configured, the load balancer may allocate certain requests to different backend servers. Since user identity has not been authenticated on other backend servers, interaction exceptions such as a user login failure may occur.

                                                                                                            +

                                                                                                            Therefore, select a proper sticky session type based on the application environment.

                                                                                                            + +
                                                                                                            Table 1 Network model comparison

                                                                                                            Dimension

                                                                                                            Tunnel Network

                                                                                                            +

                                                                                                            Tunnel Network

                                                                                                            VPC Network

                                                                                                            +

                                                                                                            VPC Network

                                                                                                            Cloud Native 2.0 Network

                                                                                                            +

                                                                                                            Cloud Native Network 2.0

                                                                                                            Core technology

                                                                                                            +

                                                                                                            Application scenarios

                                                                                                            OVS

                                                                                                            +
                                                                                                            • Common container service scenarios
                                                                                                            • Scenarios that do not have high requirements on network latency and bandwidth

                                                                                                            IPvlan and VPC route

                                                                                                            +
                                                                                                            • Scenarios that have high requirements on network latency and bandwidth
                                                                                                            • Containers can communicate with VMs using a microservice registration framework, such as Dubbo and CSE.

                                                                                                            VPC ENI/sub-ENI

                                                                                                            +
                                                                                                            • Scenarios that have high requirements on network latency, bandwidth, and performance
                                                                                                            • Containers can communicate with VMs using a microservice registration framework, such as Dubbo and CSE.

                                                                                                            Applicable Clusters

                                                                                                            +

                                                                                                            Core technology

                                                                                                            CCE cluster

                                                                                                            +

                                                                                                            OVS

                                                                                                            CCE cluster

                                                                                                            +

                                                                                                            IPvlan and VPC route

                                                                                                            CCE Turbo cluster

                                                                                                            +

                                                                                                            VPC ENI/sub-ENI

                                                                                                            Network isolation

                                                                                                            +

                                                                                                            Applicable clusters

                                                                                                            Kubernetes native NetworkPolicy for pods

                                                                                                            +

                                                                                                            CCE standard cluster

                                                                                                            No

                                                                                                            +

                                                                                                            CCE standard cluster

                                                                                                            Pods support security group isolation.

                                                                                                            +

                                                                                                            CCE Turbo cluster

                                                                                                            Passthrough networking

                                                                                                            +

                                                                                                            Network isolation

                                                                                                            No

                                                                                                            +

                                                                                                            Kubernetes native NetworkPolicy for pods

                                                                                                            No

                                                                                                            +

                                                                                                            No

                                                                                                            Yes

                                                                                                            +

                                                                                                            Pods support security group isolation.

                                                                                                            IP address management

                                                                                                            +

                                                                                                            Passthrough networking

                                                                                                            • The container CIDR block is allocated separately.
                                                                                                            • CIDR blocks are divided by node and can be dynamically allocated (CIDR blocks can be dynamically added after being allocated.)
                                                                                                            +

                                                                                                            No

                                                                                                            • The container CIDR block is allocated separately.
                                                                                                            • CIDR blocks are divided by node and statically allocated (the CIDR block cannot be changed after a node is created).
                                                                                                            +

                                                                                                            No

                                                                                                            The container CIDR block is divided from the VPC subnet and does not need to be allocated separately.

                                                                                                            +

                                                                                                            Yes

                                                                                                            Performance

                                                                                                            +

                                                                                                            IP address management

                                                                                                            Performance loss due to VXLAN encapsulation

                                                                                                            +
                                                                                                            • The container CIDR block is allocated separately.
                                                                                                            • CIDR blocks are divided by node and can be dynamically allocated (CIDR blocks can be dynamically added after being allocated.)

                                                                                                            No tunnel encapsulation. Cross-node packets are forwarded through VPC routers, delivering performance equivalent to that of the host network.

                                                                                                            +
                                                                                                            • The container CIDR block is allocated separately.
                                                                                                            • CIDR blocks are divided by node and statically allocated (the CIDR block cannot be changed after a node is created).

                                                                                                            The container network is integrated with the VPC network, eliminating performance loss.

                                                                                                            +

                                                                                                            The container CIDR block is divided from the VPC subnet and does not need to be allocated separately.

                                                                                                            Networking scale

                                                                                                            +

                                                                                                            Network performance

                                                                                                            A maximum of 2,000 nodes are supported.

                                                                                                            +

                                                                                                            Performance loss due to VXLAN encapsulation

                                                                                                            By default, 200 nodes are supported.

                                                                                                            -

                                                                                                            Each time a node is added to the cluster, a route is added to the VPC routing table. Therefore, the cluster scale is limited by the VPC route table.

                                                                                                            +

                                                                                                            No tunnel encapsulation. Cross-node packets are forwarded through VPC routers, delivering performance equivalent to that of the host network.

                                                                                                            A maximum of 2,000 nodes are supported.

                                                                                                            +

                                                                                                            The container network is integrated with the VPC network, eliminating performance loss.

                                                                                                            Application Scenarios

                                                                                                            +

                                                                                                            Networking scale

                                                                                                            • Common container services
                                                                                                            • Scenarios that do not have high requirements on network latency and bandwidth
                                                                                                            +

                                                                                                            A maximum of 2000 nodes are supported.

                                                                                                            • Scenarios that have high requirements on network latency and bandwidth
                                                                                                            • Containers communicate with VMs using a microservice registration framework, such as Dubbo and CSE.
                                                                                                            +

                                                                                                            Suitable for small- and medium-scale networks due to the limitation on VPC routing tables. It is recommended that the number of nodes be less than or equal to 1000.

                                                                                                            +

                                                                                                            Each time a node is added to the cluster, a route is added to the VPC routing tables. Therefore, the cluster scale is limited by the VPC routing tables.

                                                                                                            • Scenarios that have high requirements on network latency, bandwidth, and performance
                                                                                                            • Containers communicate with VMs using a microservice registration framework, such as Dubbo and CSE.
                                                                                                            +

                                                                                                            A maximum of 2000 nodes are supported.

                                                                                                            + + + + + + + + + + + + + + + + + + + +
                                                                                                            Table 1 Sticky session types

                                                                                                            OSI Layer

                                                                                                            +

                                                                                                            Listener Protocol and Networking

                                                                                                            +

                                                                                                            Sticky Session Type

                                                                                                            +

                                                                                                            Stickiness Duration

                                                                                                            +

                                                                                                            Scenarios Where Sticky Sessions Become Invalid

                                                                                                            +

                                                                                                            Layer 4

                                                                                                            +

                                                                                                            TCP- or UDP-compliant Services

                                                                                                            +

                                                                                                            Source IP address: The source IP address of each request is calculated using the consistent hashing algorithm to obtain a unique hashing key, and all backend servers are numbered. The system allocates the client to a particular server based on the generated key. This allows requests from the same IP address are forwarded to the same backend server.

                                                                                                            +
                                                                                                            • Default: 20 minutes
                                                                                                            • Maximum: 60 minutes
                                                                                                            • Range: 1 minute to 60 minutes
                                                                                                            +
                                                                                                            • Source IP addresses of the clients have changed.
                                                                                                            • Requests from the clients exceed the session stickiness duration.
                                                                                                            +

                                                                                                            Layer 7

                                                                                                            +

                                                                                                            HTTP- or HTTPS-compliant ingresses

                                                                                                            +
                                                                                                            • Load balancer cookie: The load balancer generates a cookie after receiving a request from the client. All subsequent requests with the cookie will be routed to the same backend server.
                                                                                                            • Application cookie: The application deployed on the backend server generates a cookie after receiving the first request from the client. All subsequent requests with the same cookie will be routed to the same backend server.
                                                                                                            +
                                                                                                            • Default: 20 minutes
                                                                                                            • Maximum: 1440 minutes
                                                                                                            • Range: 1 minute to 1440 minutes
                                                                                                            +
                                                                                                            • If requests sent by the clients do not contain a cookie, sticky sessions will not take effect.
                                                                                                            • Requests from the clients exceed the session stickiness duration.
                                                                                                            +
                                                                                                            -

                                                                                                            Layer-4 Load Balancing (Service)

                                                                                                            In layer-4 load balancing, source IP address-based sticky session (Hash routing based on the client IP address) can be enabled. To enable source IP address-based sticky session on Services, the following conditions must be met:
                                                                                                            1. Service Affinity of the Service is set to Node level (that is, the value of the externalTrafficPolicy field of the Service is Local).

                                                                                                              You do not need to set this parameter for CCE Turbo clusters.

                                                                                                              +

                                                                                                              When creating a load balancer, configure sticky sessions by setting kubernetes.io/elb.lb-algorithm to ROUND_ROBIN or kubernetes.io/elb.lb-algorithm to LEAST_CONNECTIONS. If you set kubernetes.io/elb.lb-algorithm is to SOURCE_IP, source IP address-based sticky sessions are supported. In this case, you do not need to configure sticky sessions again.

                                                                                                              -
                                                                                                            2. Enable the source IP address-based sticky session in the load balancing configuration of the Service.
                                                                                                              apiVersion: v1
                                                                                                              -kind: Service
                                                                                                              -metadata:
                                                                                                              -  name: svc-example
                                                                                                              -  namespace: default
                                                                                                              -  annotations:
                                                                                                              -    kubernetes.io/elb.class: union
                                                                                                              -    kubernetes.io/elb.id: 56dcc1b4-8810-480c-940a-a44f7736f0dc
                                                                                                              -    kubernetes.io/elb.lb-algorithm: ROUND_ROBIN
                                                                                                              -    kubernetes.io/elb.session-affinity-mode: SOURCE_IP
                                                                                                              -spec:
                                                                                                              -  selector: 
                                                                                                              -    app: nginx
                                                                                                              -  externalTrafficPolicy: Local   # You do not need to configure this parameter for CCE Turbo clusters.
                                                                                                              -  ports:
                                                                                                              -    - name: cce-service-0
                                                                                                              -      targetPort: 80
                                                                                                              -      nodePort: 32633
                                                                                                              -      port: 80
                                                                                                              -      protocol: TCP
                                                                                                              -  type: LoadBalancer
                                                                                                              -
                                                                                                            3. Anti-affinity is enabled for the backend application corresponding to the Service.
                                                                                                            +

                                                                                                            Layer 4 Sticky Sessions for Services

                                                                                                            In Layer 4 mode, source IP address-based sticky sessions can be enabled, where hash routing is performed based on the client IP address.

                                                                                                            -

                                                                                                            Layer-7 Load Balancing (Ingress)

                                                                                                            In layer-7 load balancing, sticky session based on HTTP cookies and app cookies can be enabled. To enable such sticky session, the following conditions must be met:

                                                                                                            -
                                                                                                            1. The application (workload) corresponding to the ingress is enabled with workload anti-affinity.
                                                                                                            2. Node affinity is enabled for the Service corresponding to the ingress.
                                                                                                            -

                                                                                                            Procedure

                                                                                                            -
                                                                                                            1. Create an Nginx workload.

                                                                                                              Set the number of pods to 3 and set the podAntiAffinity.
                                                                                                              kind: Deployment
                                                                                                              +
                                                                                                              +
                                                                                                              +

                                                                                                              Layer 7 Sticky Sessions for Ingresses

                                                                                                              In Layer 7 mode, sticky sessions can be enabled using HTTP cookies or application cookies.

                                                                                                              +
                                                                                                              +

                                                                                                              You can also select APP_COOKIE.

                                                                                                              apiVersion: v1
                                                                                                              @@ -102,11 +270,11 @@ metadata:
                                                                                                                 name: nginx
                                                                                                                 namespace: default
                                                                                                                 annotations:
                                                                                                              -    kubernetes.io/elb.lb-algorithm: ROUND_ROBIN
                                                                                                              +    kubernetes.io/elb.lb-algorithm: ROUND_ROBIN      # Weighted round robin allocation policy
                                                                                                                   kubernetes.io/elb.session-affinity-mode: APP_COOKIE     # Select APP_COOKIE.
                                                                                                              -    kubernetes.io/elb.session-affinity-option: '{"app_cookie_name":"test"}'  # Application cookie name.
                                                                                                              +    kubernetes.io/elb.session-affinity-option: '{"app_cookie_name":"test"}'  # Application cookie name
                                                                                                               ...
                                                                                                              -

                                                                                                            2. Create an ingress and associate it with a Service. The following example describes how to automatically create a shared load balancer. For details about how to specify other types of load balancers, see Using kubectl to Create an ELB Ingress.

                                                                                                              apiVersion: networking.k8s.io/v1
                                                                                                              +

                                                                                                            3. Create an ingress and associate it with the Service. The following uses an existing load balancer as an example. For details about how to automatically create a load balancer, see Using kubectl to Create an ELB Ingress.

                                                                                                              apiVersion: networking.k8s.io/v1
                                                                                                               kind: Ingress 
                                                                                                               metadata: 
                                                                                                                 name: ingress-test
                                                                                                              @@ -114,15 +282,7 @@ metadata:
                                                                                                                 annotations: 
                                                                                                                   kubernetes.io/elb.class: union
                                                                                                                   kubernetes.io/elb.port: '80'
                                                                                                              -    kubernetes.io/elb.autocreate: 
                                                                                                              -      '{
                                                                                                              -          "type":"public",
                                                                                                              -          "bandwidth_name":"cce-bandwidth-test",
                                                                                                              -          "bandwidth_chargemode":"traffic",
                                                                                                              -          "bandwidth_size":1,
                                                                                                              -          "bandwidth_sharetype":"PER",
                                                                                                              -          "eip_type":"5_bgp"
                                                                                                              -        }'
                                                                                                              +    kubernetes.io/elb.id: *****
                                                                                                               spec:
                                                                                                                 rules: 
                                                                                                                 - host: 'www.example.com'
                                                                                                              @@ -131,15 +291,172 @@ spec:
                                                                                                                     - path: '/'
                                                                                                                       backend: 
                                                                                                                         service:
                                                                                                              -            name: nginx     #Service name
                                                                                                              +            name: nginx     # Service name
                                                                                                                           port: 
                                                                                                                             number: 80
                                                                                                                       property:
                                                                                                                         ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
                                                                                                                       pathType: ImplementationSpecific
                                                                                                                 ingressClassName: cce
                                                                                                              -

                                                                                                            4. Log in to the ELB console, access the load balancer details page, and check whether the sticky session feature is enabled.
                                                                                                            +

                                                                                                          • Log in to the ELB console and click the target load balancer. In the backend server group of the listener, check whether sticky session is enabled.
                                                                                                          • +
                                                                                                            +