diff --git a/docs/cce/umn/ALL_META.TXT.json b/docs/cce/umn/ALL_META.TXT.json index e00f7a4d..b9d91c57 100644 --- a/docs/cce/umn/ALL_META.TXT.json +++ b/docs/cce/umn/ALL_META.TXT.json @@ -27,7 +27,7 @@ "node_id":"cce_productdesc_0001.xml", "product_code":"cce", "code":"2", - "des":"Cloud Container Engine (CCE) is a scalable, enterprise-class hosted Kubernetes service. With CCE, you can easily deploy, manage, and scale containerized applications in t", + "des":"Cloud Container Engine (CCE) is a hosted Kubernetes cluster service for enterprises. It offers complete lifecycle management for containerized applications and delivers s", "doc_type":"usermanual2", "kw":"What Is CCE?,Service Overview,User Guide", "search_title":"", @@ -45,7 +45,7 @@ "node_id":"cce_productdesc_0003.xml", "product_code":"cce", "code":"3", - "des":"CCE is a container service built on Docker and Kubernetes. A wealth of features enables you to run container clusters at scale. CCE eases containerization thanks to its r", + "des":"CCE is a container service built on Docker and Kubernetes. A wealth of features enable you to run container clusters at scale. CCE eases containerization thanks to its re", "doc_type":"usermanual2", "kw":"Product Advantages,Service Overview,User Guide", "search_title":"", @@ -99,7 +99,7 @@ "node_id":"cce_productdesc_0021.xml", "product_code":"cce", "code":"6", - "des":"Shopping apps and websites, especially during promotionsLive streaming, where service loads often fluctuateGames, where many players may go online in certain time periods", + "des":"Shopping apps and websites, especially during promotions and flash salesLive streaming, where service loads often fluctuateGames, where many players may go online in cert", "doc_type":"usermanual2", "kw":"Auto Scaling in Seconds,Application Scenarios,User Guide", "search_title":"", @@ -155,7 +155,7 @@ "code":"9", "des":"This section describes the notes and constraints on using CCE.After a cluster is created, the following items cannot be changed:Number of master nodes: For example, a non", "doc_type":"usermanual2", - "kw":"Storage Volumes,Data sharing,Constraints,Service Overview,User Guide", + "kw":"Storage Volumes,Data sharing,Notes and Constraints,Service Overview,User Guide", "search_title":"", "metedata":[ { @@ -163,7 +163,7 @@ "documenttype":"usermanual" } ], - "title":"Constraints", + "title":"Notes and Constraints", "githuburl":"" }, { @@ -241,23 +241,22 @@ "githuburl":"" }, { - "uri":"cce_bulletin_0003.html", - "node_id":"cce_bulletin_0003.xml", + "uri":"cce_bulletin_0033.html", + "node_id":"cce_bulletin_0033.xml", "product_code":"cce", "code":"14", - "des":"This section explains versioning in CCE, and the policies for Kubernetes version support.Version number: The format is x.y.z, where x.y is the major version and z is the ", + "des":"CCE provides highly scalable, high-performance, enterprise-class Kubernetes clusters. This section describes the Kubernetes version policy of CCE clusters.The CCE console", "doc_type":"usermanual2", - "kw":"Kubernetes Version Support Mechanism,Product Bulletin,User Guide", + "kw":"Kubernetes Version Policy,Product Bulletin,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", "opensource":"true", - "documenttype":"usermanual2", - "IsMulti":"Yes" + "documenttype":"usermanual" } ], - "title":"Kubernetes Version Support Mechanism", + "title":"Kubernetes Version Policy", "githuburl":"" }, { @@ -265,7 +264,7 @@ "node_id":"cce_bulletin_0061.xml", "product_code":"cce", "code":"15", - "des":"Dear users,We are pleased to announce that a brand-new CCE console is available. The new console is modern, visually appealing, and concise, providing a more comfortable ", + "des":"Released: Sep 3, 2023Dear users,We are pleased to announce that a brand-new CCE console is available. The new console is modern, visually appealing, and concise, providin", "doc_type":"usermanual2", "kw":"CCE Console Upgrade,Product Bulletin,User Guide", "search_title":"", @@ -388,7 +387,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Introduction", @@ -406,7 +405,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Preparations", @@ -424,7 +423,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Kubernetes Cluster", @@ -442,7 +441,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Deployment (Nginx)", @@ -460,7 +459,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Deploying WordPress and MySQL That Depend on Each Other", @@ -478,7 +477,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Overview", @@ -496,7 +495,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a MySQL Workload", @@ -514,7 +513,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a WordPress Workload", @@ -527,15 +526,15 @@ "code":"29", "des":"During service deployment or running, you may trigger high-risk operations at different levels, causing service faults or interruption. To help you better estimate and av", "doc_type":"usermanual2", - "kw":"High-Risk Operations and Solutions,User Guide", + "kw":"High-Risk Operations,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"High-Risk Operations and Solutions", + "title":"High-Risk Operations", "githuburl":"" }, { @@ -550,7 +549,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Clusters", @@ -568,7 +567,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Cluster Overview", @@ -586,7 +585,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Basic Cluster Information", @@ -604,17 +603,35 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes Version Release Notes", "githuburl":"" }, + { + "uri":"cce_bulletin_0089.html", + "node_id":"cce_bulletin_0089.xml", + "product_code":"cce", + "code":"34", + "des":"CCE allows you to create Kubernetes clusters 1.29. This section describes the changes made in Kubernetes 1.29.New and Enhanced FeaturesAPI Changes and RemovalsEnhanced Ku", + "doc_type":"usermanual2", + "kw":"Kubernetes 1.29 Release Notes,Kubernetes Version Release Notes,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Kubernetes 1.29 Release Notes", + "githuburl":"" + }, { "uri":"cce_bulletin_0068.html", "node_id":"cce_bulletin_0068.xml", "product_code":"cce", - "code":"34", + "code":"35", "des":"CCE allows you to create Kubernetes clusters 1.28. This section describes the changes made in Kubernetes 1.28.Important NotesNew and Enhanced FeaturesAPI Changes and Remo", "doc_type":"usermanual2", "kw":"Kubernetes 1.28 Release Notes,Kubernetes Version Release Notes,User Guide", @@ -622,7 +639,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes 1.28 Release Notes", @@ -632,7 +649,7 @@ "uri":"cce_bulletin_0059.html", "node_id":"cce_bulletin_0059.xml", "product_code":"cce", - "code":"35", + "code":"36", "des":"CCE allows you to create clusters of Kubernetes 1.27. This section describes the changes made in Kubernetes 1.27 compared with Kubernetes 1.25.New FeaturesDeprecations an", "doc_type":"usermanual2", "kw":"Kubernetes 1.27 Release Notes,Kubernetes Version Release Notes,User Guide", @@ -640,7 +657,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes 1.27 Release Notes", @@ -650,7 +667,7 @@ "uri":"cce_bulletin_0058.html", "node_id":"cce_bulletin_0058.xml", "product_code":"cce", - "code":"36", + "code":"37", "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the changes made in Kubernetes 1.25 compared wi", "doc_type":"usermanual2", "kw":"Kubernetes 1.25 Release Notes,Kubernetes Version Release Notes,User Guide", @@ -658,7 +675,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes 1.25 Release Notes", @@ -668,7 +685,7 @@ "uri":"cce_bulletin_0027.html", "node_id":"cce_bulletin_0027.xml", "product_code":"cce", - "code":"37", + "code":"38", "des":"This section describes the updates in CCE Kubernetes 1.23.Kubernetes 1.23 Release NotesFlexVolume is deprecated. Use CSI.HorizontalPodAutoscaler v2 is promoted to GA, and", "doc_type":"usermanual2", "kw":"Kubernetes 1.23 Release Notes,Kubernetes Version Release Notes,User Guide", @@ -676,7 +693,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes 1.23 Release Notes", @@ -686,25 +703,25 @@ "uri":"cce_bulletin_0026.html", "node_id":"cce_bulletin_0026.xml", "product_code":"cce", - "code":"38", + "code":"39", "des":"This section describes the updates in CCE Kubernetes 1.21.Kubernetes 1.21 Release NotesCronJob is now in the stable state, and the version number changes to batch/v1.The ", "doc_type":"usermanual2", - "kw":"Kubernetes 1.21 Release Notes,Kubernetes Version Release Notes,User Guide", + "kw":"Kubernetes 1.21 (EOM) Release Notes,Kubernetes Version Release Notes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Kubernetes 1.21 Release Notes", + "title":"Kubernetes 1.21 (EOM) Release Notes", "githuburl":"" }, { "uri":"cce_whsnew_0010.html", "node_id":"cce_whsnew_0010.xml", "product_code":"cce", - "code":"39", + "code":"40", "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.19.Kubernetes v", "doc_type":"usermanual2", "kw":"Kubernetes 1.19 (EOM) Release Notes,Kubernetes Version Release Notes,User Guide", @@ -712,7 +729,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes 1.19 (EOM) Release Notes", @@ -722,7 +739,7 @@ "uri":"cce_whsnew_0007.html", "node_id":"cce_whsnew_0007.xml", "product_code":"cce", - "code":"40", + "code":"41", "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.17.All resource", "doc_type":"usermanual2", "kw":"Kubernetes 1.17 (EOM) Release Notes,Kubernetes Version Release Notes,User Guide", @@ -730,7 +747,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes 1.17 (EOM) Release Notes", @@ -740,15 +757,15 @@ "uri":"cce_10_0405.html", "node_id":"cce_10_0405.xml", "product_code":"cce", - "code":"41", - "des":"In CCE v1.27 and later versions, all nodes support only the containerd container engine.All nodes in the CCE clusters of version 1.25, except the ones running EulerOS 2.5", + "code":"42", + "des":"dockershim has been removed since Kubernetes v1.24, and Docker is not supported in v1.24 and later versions by default. Use containerd.All nodes in the CCE clusters of ve", "doc_type":"usermanual2", "kw":"Patch Version Release Notes,Cluster Overview,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Patch Version Release Notes", @@ -758,7 +775,7 @@ "uri":"cce_10_0298.html", "node_id":"cce_10_0298.xml", "product_code":"cce", - "code":"42", + "code":"43", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Creating a Cluster", @@ -766,7 +783,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Cluster", @@ -776,7 +793,7 @@ "uri":"cce_10_0342.html", "node_id":"cce_10_0342.xml", "product_code":"cce", - "code":"43", + "code":"44", "des":"CCE provides different types of clusters for you to select. The following table lists the differences between them.", "doc_type":"usermanual2", "kw":"Comparison Between Cluster Types,Creating a Cluster,User Guide", @@ -784,7 +801,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Comparison Between Cluster Types", @@ -794,7 +811,7 @@ "uri":"cce_10_0028.html", "node_id":"cce_10_0028.xml", "product_code":"cce", - "code":"44", + "code":"45", "des":"On the CCE console, you can easily create Kubernetes clusters. After a cluster is created, the master node is hosted by CCE. You only need to create worker nodes. In this", "doc_type":"usermanual2", "kw":"Creating a CCE Standard/Turbo Cluster,Creating a Cluster,User Guide", @@ -802,7 +819,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a CCE Standard/Turbo Cluster", @@ -812,7 +829,7 @@ "uri":"cce_10_0349.html", "node_id":"cce_10_0349.xml", "product_code":"cce", - "code":"45", + "code":"46", "des":"kube-proxy is a key component of a Kubernetes cluster. It is used for load balancing and forwarding data between a Service and its backend pods.CCE supports the iptables ", "doc_type":"usermanual2", "kw":"kube-proxy,iptables,IP Virtual Server (IPVS),forwarding modes,Comparing iptables and IPVS,Creating a", @@ -820,7 +837,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Comparing iptables and IPVS", @@ -830,7 +847,7 @@ "uri":"cce_10_0140.html", "node_id":"cce_10_0140.xml", "product_code":"cce", - "code":"46", + "code":"47", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Connecting to a Cluster", @@ -838,7 +855,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Connecting to a Cluster", @@ -848,7 +865,7 @@ "uri":"cce_10_0107.html", "node_id":"cce_10_0107.xml", "product_code":"cce", - "code":"47", + "code":"48", "des":"This section uses a CCE standard cluster as an example to describe how to access a CCE cluster using kubectl.When you access a cluster using kubectl, CCE uses kubeconfig ", "doc_type":"usermanual2", "kw":"kubectl,Intranet access,Two-Way Authentication for Domain Names,Error from server Forbidden,The conn", @@ -856,7 +873,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Connecting to a Cluster Using kubectl", @@ -866,25 +883,25 @@ "uri":"cce_10_0175.html", "node_id":"cce_10_0175.xml", "product_code":"cce", - "code":"48", - "des":"This section describes how to obtain the cluster certificate from the console and use it access Kubernetes clusters.The downloaded certificate contains three files: clien", + "code":"49", + "des":"This section describes how to obtain the cluster certificate from the console and use it to access Kubernetes clusters.The downloaded certificate contains three files: cl", "doc_type":"usermanual2", - "kw":"X.509 certificate,Connecting to a Cluster Using an X.509 Certificate,Connecting to a Cluster,User Gu", + "kw":"X.509 certificate,Accessing a Cluster Using an X.509 Certificate,Connecting to a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Connecting to a Cluster Using an X.509 Certificate", + "title":"Accessing a Cluster Using an X.509 Certificate", "githuburl":"" }, { "uri":"cce_10_0367.html", "node_id":"cce_10_0367.xml", "product_code":"cce", - "code":"49", + "code":"50", "des":"Subject Alternative Name (SAN) allows multiple values (including IP addresses, domain names, and so on) to be associated with certificates. A SAN is usually used by the c", "doc_type":"usermanual2", "kw":"SAN,X.509 certificate,Accessing a Cluster Using a Custom Domain Name,Connecting to a Cluster,User Gu", @@ -892,1403 +909,35 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Accessing a Cluster Using a Custom Domain Name", "githuburl":"" }, { - "uri":"cce_10_0215.html", - "node_id":"cce_10_0215.xml", - "product_code":"cce", - "code":"50", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Upgrading a Cluster", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Upgrading a Cluster", - "githuburl":"" - }, - { - "uri":"cce_10_0197.html", - "node_id":"cce_10_0197.xml", + "uri":"cce_10_0864.html", + "node_id":"cce_10_0864.xml", "product_code":"cce", "code":"51", - "des":"CCE strictly complies with community consistency authentication. It releases three Kubernetes versions each year and offers a maintenance period of at least 24 months aft", + "des":"You can bind an EIP to an API server of a Kubernetes cluster so that the API server can access the Internet.Binding an EIP to an API server for Internet access can pose a", "doc_type":"usermanual2", - "kw":"cluster upgrade process,Node Priority,In-place upgrade,Upgrade Overview,Upgrading a Cluster,User Gui", + "kw":"Configuring a Cluster's API Server for Internet Access,Connecting to a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Upgrade Overview", - "githuburl":"" - }, - { - "uri":"cce_10_0302.html", - "node_id":"cce_10_0302.xml", - "product_code":"cce", - "code":"52", - "des":"Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Upgrade Overview.Before upgra", - "doc_type":"usermanual2", - "kw":"Deprecated APIs,Before You Start,Upgrading a Cluster,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Before You Start", - "githuburl":"" - }, - { - "uri":"cce_10_0560.html", - "node_id":"cce_10_0560.xml", - "product_code":"cce", - "code":"53", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Performing Post-Upgrade Verification", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Performing Post-Upgrade Verification", - "githuburl":"" - }, - { - "uri":"cce_10_0568.html", - "node_id":"cce_10_0568.xml", - "product_code":"cce", - "code":"54", - "des":"After a cluster is upgraded, check whether the cluster is in the Running state.CCE automatically checks your cluster status. Go to the cluster list page and confirm the c", - "doc_type":"usermanual2", - "kw":"Cluster Status Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Cluster Status Check", - "githuburl":"" - }, - { - "uri":"cce_10_0569.html", - "node_id":"cce_10_0569.xml", - "product_code":"cce", - "code":"55", - "des":"After a cluster is upgraded, check whether nodes in the cluster are in the Running state.CCE automatically checks your node statuses. Go to the node list page and confirm", - "doc_type":"usermanual2", - "kw":"Node Status Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Status Check", - "githuburl":"" - }, - { - "uri":"cce_10_0567.html", - "node_id":"cce_10_0567.xml", - "product_code":"cce", - "code":"56", - "des":"After a cluster is upgraded, check whether there are any nodes that skip the upgrade in the cluster. These nodes may affect the proper running of the cluster.CCE automati", - "doc_type":"usermanual2", - "kw":"Node Skipping Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Skipping Check", - "githuburl":"" - }, - { - "uri":"cce_10_0561.html", - "node_id":"cce_10_0561.xml", - "product_code":"cce", - "code":"57", - "des":"After a cluster is upgraded, check whether its services are running properly.Different services have different verification mode. Select a suitable one and verify the ser", - "doc_type":"usermanual2", - "kw":"Service Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Service Check", - "githuburl":"" - }, - { - "uri":"cce_10_0565.html", - "node_id":"cce_10_0565.xml", - "product_code":"cce", - "code":"58", - "des":"Check whether nodes can be created in the cluster.If nodes cannot be created in your cluster after the cluster is upgraded, contact technical support.", - "doc_type":"usermanual2", - "kw":"New Node Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"New Node Check", - "githuburl":"" - }, - { - "uri":"cce_10_0566.html", - "node_id":"cce_10_0566.xml", - "product_code":"cce", - "code":"59", - "des":"Check whether pods can be created on the existing nodes after the cluster is upgraded.Check whether pods can be created on new nodes after the cluster is upgraded.After c", - "doc_type":"usermanual2", - "kw":"New Pod Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"New Pod Check", - "githuburl":"" - }, - { - "uri":"cce_10_0210.html", - "node_id":"cce_10_0210.xml", - "product_code":"cce", - "code":"60", - "des":"This section describes how to migrate services from a cluster of an earlier version to a cluster of a later version in CCE.This operation is applicable when a cross-versi", - "doc_type":"usermanual2", - "kw":"Migrating Services Across Clusters of Different Versions,Upgrading a Cluster,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Migrating Services Across Clusters of Different Versions", - "githuburl":"" - }, - { - "uri":"cce_10_0550.html", - "node_id":"cce_10_0550.xml", - "product_code":"cce", - "code":"61", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Troubleshooting for Pre-upgrade Check Exceptions", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Troubleshooting for Pre-upgrade Check Exceptions", - "githuburl":"" - }, - { - "uri":"cce_10_0549.html", - "node_id":"cce_10_0549.xml", - "product_code":"cce", - "code":"62", - "des":"The system automatically checks a cluster before its upgrade. If the cluster does not meet the pre-upgrade check conditions, the upgrade cannot continue. To avoid risks, ", - "doc_type":"usermanual2", - "kw":"Pre-upgrade Check,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Pre-upgrade Check", - "githuburl":"" - }, - { - "uri":"cce_10_0431.html", - "node_id":"cce_10_0431.xml", - "product_code":"cce", - "code":"63", - "des":"Check the following items:Check whether the node is available.Check whether the node OS supports the upgrade.Check whether the node is marked with unexpected node pool la", - "doc_type":"usermanual2", - "kw":"Node Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Restrictions", - "githuburl":"" - }, - { - "uri":"cce_10_0432.html", - "node_id":"cce_10_0432.xml", - "product_code":"cce", - "code":"64", - "des":"Check whether the target cluster is under upgrade management.CCE may temporarily restrict the cluster upgrade due to the following reasons:The cluster is identified as th", - "doc_type":"usermanual2", - "kw":"Upgrade Management,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Upgrade Management", - "githuburl":"" - }, - { - "uri":"cce_10_0433.html", - "node_id":"cce_10_0433.xml", - "product_code":"cce", - "code":"65", - "des":"Check the following items:Check whether the add-on status is normal.Check whether the add-on support the target version.Scenario 1: The add-on malfunctions.Log in to the ", - "doc_type":"usermanual2", - "kw":"Add-ons,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Add-ons", - "githuburl":"" - }, - { - "uri":"cce_10_0434.html", - "node_id":"cce_10_0434.xml", - "product_code":"cce", - "code":"66", - "des":"Check whether the current HelmRelease record contains discarded Kubernetes APIs that are not supported by the target cluster version. If yes, the Helm chart may be unavai", - "doc_type":"usermanual2", - "kw":"Helm Charts,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Helm Charts", - "githuburl":"" - }, - { - "uri":"cce_10_0435.html", - "node_id":"cce_10_0435.xml", - "product_code":"cce", - "code":"67", - "des":"Check whether CCE can connect to your master nodes.Contact technical support.", - "doc_type":"usermanual2", - "kw":"SSH Connectivity of Master Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"SSH Connectivity of Master Nodes", - "githuburl":"" - }, - { - "uri":"cce_10_0436.html", - "node_id":"cce_10_0436.xml", - "product_code":"cce", - "code":"68", - "des":"Check the node pool status.Check whether the node pool OS or container runtime is supported after the upgrade.Scenario: The node pool malfunctions.Log in to the CCE conso", - "doc_type":"usermanual2", - "kw":"Node Pools,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Pools", - "githuburl":"" - }, - { - "uri":"cce_10_0437.html", - "node_id":"cce_10_0437.xml", - "product_code":"cce", - "code":"69", - "des":"Check whether the Protocol & Port of the worker node security groups are set to ICMP: All and whether the security group with the source IP address set to the master node", - "doc_type":"usermanual2", - "kw":"Security Groups,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Security Groups", - "githuburl":"" - }, - { - "uri":"cce_10_0439.html", - "node_id":"cce_10_0439.xml", - "product_code":"cce", - "code":"70", - "des":"Check whether the node needs to be migrated.For the 1.15 cluster that is upgraded from 1.13 in rolling mode, migrate (reset or create and replace) all nodes before perfor", - "doc_type":"usermanual2", - "kw":"To-Be-Migrated Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"To-Be-Migrated Nodes", - "githuburl":"" - }, - { - "uri":"cce_10_0440.html", - "node_id":"cce_10_0440.xml", - "product_code":"cce", - "code":"71", - "des":"Check whether there are discarded resources in the clusters.Scenario: The Service in the clusters of v1.25 or later has discarded annotation: tolerate-unready-endpoints.E", - "doc_type":"usermanual2", - "kw":"Discarded Kubernetes Resources,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Discarded Kubernetes Resources", - "githuburl":"" - }, - { - "uri":"cce_10_0441.html", - "node_id":"cce_10_0441.xml", - "product_code":"cce", - "code":"72", - "des":"Read the version compatibility differences and ensure that they are not affected. The patch upgrade does not involve version compatibility differences.", - "doc_type":"usermanual2", - "kw":"Compatibility Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Compatibility Risks", - "githuburl":"" - }, - { - "uri":"cce_10_0442.html", - "node_id":"cce_10_0442.xml", - "product_code":"cce", - "code":"73", - "des":"Check whether cce-agent on the current node is of the latest version.Scenario 1: The error message \"you cce-agent no update, please restart it\" is displayed.cce-agent doe", - "doc_type":"usermanual2", - "kw":"CCE Agent Versions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"CCE Agent Versions", - "githuburl":"" - }, - { - "uri":"cce_10_0443.html", - "node_id":"cce_10_0443.xml", - "product_code":"cce", - "code":"74", - "des":"Check whether the CPU usage of the node exceeds 90%.Upgrade the cluster during off-peak hours.Check whether too many pods are deployed on the node. If yes, reschedule pod", - "doc_type":"usermanual2", - "kw":"Node CPU Usage,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node CPU Usage", - "githuburl":"" - }, - { - "uri":"cce_10_0444.html", - "node_id":"cce_10_0444.xml", - "product_code":"cce", - "code":"75", - "des":"Check the following items:Check whether the key CRD packageversions.version.cce.io of the cluster is deleted.Check whether the cluster key CRD network-attachment-definiti", - "doc_type":"usermanual2", - "kw":"CRDs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"CRDs", - "githuburl":"" - }, - { - "uri":"cce_10_0445.html", - "node_id":"cce_10_0445.xml", - "product_code":"cce", - "code":"76", - "des":"Check the following items:Check whether the key data disks on the node meet the upgrade requirements.Check whether the /tmp directory has 500 MB available space.During th", - "doc_type":"usermanual2", - "kw":"Node Disks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Disks", - "githuburl":"" - }, - { - "uri":"cce_10_0446.html", - "node_id":"cce_10_0446.xml", - "product_code":"cce", - "code":"77", - "des":"Check the following items:Check whether the DNS configuration of the current node can resolve the OBS address.Check whether the current node can access the OBS address of", - "doc_type":"usermanual2", - "kw":"Node DNS,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node DNS", - "githuburl":"" - }, - { - "uri":"cce_10_0447.html", - "node_id":"cce_10_0447.xml", - "product_code":"cce", - "code":"78", - "des":"Check whether the owner and owner group of the files in the /var/paas directory used by the CCE are both paas.Scenario 1: The error message \"xx file permission has been c", - "doc_type":"usermanual2", - "kw":"Node Key Directory File Permissions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Key Directory File Permissions", - "githuburl":"" - }, - { - "uri":"cce_10_0448.html", - "node_id":"cce_10_0448.xml", - "product_code":"cce", - "code":"79", - "des":"Check whether the kubelet on the node is running properly.Scenario 1: The kubelet status is abnormal.If the kubelet malfunctions, the node is unavailable. Restore the nod", - "doc_type":"usermanual2", - "kw":"Kubelet,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Kubelet", - "githuburl":"" - }, - { - "uri":"cce_10_0449.html", - "node_id":"cce_10_0449.xml", - "product_code":"cce", - "code":"80", - "des":"Check whether the memory usage of the node exceeds 90%.Upgrade the cluster during off-peak hours.Check whether too many pods are deployed on the node. If yes, reschedule ", - "doc_type":"usermanual2", - "kw":"Node Memory,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Memory", - "githuburl":"" - }, - { - "uri":"cce_10_0450.html", - "node_id":"cce_10_0450.xml", - "product_code":"cce", - "code":"81", - "des":"Check whether the clock synchronization server ntpd or chronyd of the node is running properly.Scenario 1: ntpd is running abnormally.Log in to the node and run the syste", - "doc_type":"usermanual2", - "kw":"Node Clock Synchronization Server,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Clock Synchronization Server", - "githuburl":"" - }, - { - "uri":"cce_10_0451.html", - "node_id":"cce_10_0451.xml", - "product_code":"cce", - "code":"82", - "des":"Check whether the OS kernel version of the node is supported by CCE.Case 1: The node image is not a standard CCE image.CCE nodes run depending on the initial standard ker", - "doc_type":"usermanual2", - "kw":"Node OS,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node OS", - "githuburl":"" - }, - { - "uri":"cce_10_0452.html", - "node_id":"cce_10_0452.xml", - "product_code":"cce", - "code":"83", - "des":"Check whether the number of CPUs on the master node is greater than 2.If the number of CPUs on the master node is 2, contact technical support to expand the number to 4 o", - "doc_type":"usermanual2", - "kw":"Node CPUs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node CPUs", - "githuburl":"" - }, - { - "uri":"cce_10_0453.html", - "node_id":"cce_10_0453.xml", - "product_code":"cce", - "code":"84", - "des":"Check whether the Python commands are available on a node.If the command output is not 0, the check fails.Install Python before the upgrade.", - "doc_type":"usermanual2", - "kw":"Node Python Commands,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Python Commands", - "githuburl":"" - }, - { - "uri":"cce_10_0455.html", - "node_id":"cce_10_0455.xml", - "product_code":"cce", - "code":"85", - "des":"Check whether the nodes in the cluster are ready.Scenario 1: The nodes are in the unavailable status.Log in to the CCE console and click the cluster name to access the cl", - "doc_type":"usermanual2", - "kw":"Node Readiness,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Readiness", - "githuburl":"" - }, - { - "uri":"cce_10_0456.html", - "node_id":"cce_10_0456.xml", - "product_code":"cce", - "code":"86", - "des":"Check whether journald of a node is normal.Log in to the node and run the systemctl is-active systemd-journald command to obtain the running status of journald. If the co", - "doc_type":"usermanual2", - "kw":"Node journald,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node journald", - "githuburl":"" - }, - { - "uri":"cce_10_0457.html", - "node_id":"cce_10_0457.xml", - "product_code":"cce", - "code":"87", - "des":"Check whether the containerd.sock file exists on the node. This file affects the startup of container runtime in the Euler OS.Scenario: The Docker used by the node is the", - "doc_type":"usermanual2", - "kw":"containerd.sock,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"containerd.sock", - "githuburl":"" - }, - { - "uri":"cce_10_0458.html", - "node_id":"cce_10_0458.xml", - "product_code":"cce", - "code":"88", - "des":"Before the upgrade, check whether an internal error occurs.If this check fails, contact technical support.", - "doc_type":"usermanual2", - "kw":"Internal Errors,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Internal Errors", - "githuburl":"" - }, - { - "uri":"cce_10_0459.html", - "node_id":"cce_10_0459.xml", - "product_code":"cce", - "code":"89", - "des":"Check whether inaccessible mount points exist on the node.Scenario: There are inaccessible mount points on the node.If NFS (such as obsfs or SFS) is used by the node and ", - "doc_type":"usermanual2", - "kw":"Node Mount Points,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Mount Points", - "githuburl":"" - }, - { - "uri":"cce_10_0460.html", - "node_id":"cce_10_0460.xml", - "product_code":"cce", - "code":"90", - "des":"Check whether the taint needed for cluster upgrade exists on the node.Scenario 1: The node is skipped during the cluster upgrade.If the version of the node is different f", - "doc_type":"usermanual2", - "kw":"Kubernetes Node Taints,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Kubernetes Node Taints", - "githuburl":"" - }, - { - "uri":"cce_10_0478.html", - "node_id":"cce_10_0478.xml", - "product_code":"cce", - "code":"91", - "des":"Check whether there are any compatibility restrictions on the current Everest add-on.There are compatibility restrictions on the current Everest add-on and it cannot be u", - "doc_type":"usermanual2", - "kw":"Everest Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Everest Restrictions", - "githuburl":"" - }, - { - "uri":"cce_10_0479.html", - "node_id":"cce_10_0479.xml", - "product_code":"cce", - "code":"92", - "des":"Check whether the current cce-controller-hpa add-on has compatibility restrictions.The current cce-controller-hpa add-on has compatibility restrictions. An add-on that ca", - "doc_type":"usermanual2", - "kw":"cce-hpa-controller Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"cce-hpa-controller Restrictions", - "githuburl":"" - }, - { - "uri":"cce_10_0480.html", - "node_id":"cce_10_0480.xml", - "product_code":"cce", - "code":"93", - "des":"Check whether the current cluster version and the target version support enhanced CPU policy.Scenario: Only the current cluster version supports the enhanced CPU policy f", - "doc_type":"usermanual2", - "kw":"Enhanced CPU Policies,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Enhanced CPU Policies", - "githuburl":"" - }, - { - "uri":"cce_10_0484.html", - "node_id":"cce_10_0484.xml", - "product_code":"cce", - "code":"94", - "des":"Check whether the container runtime and network components on the worker nodes are healthy.If a worker node component malfunctions, log in to the node to check the status", - "doc_type":"usermanual2", - "kw":"Health of Worker Node Components,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Health of Worker Node Components", - "githuburl":"" - }, - { - "uri":"cce_10_0485.html", - "node_id":"cce_10_0485.xml", - "product_code":"cce", - "code":"95", - "des":"Check whether the Kubernetes, container runtime, and network components of the master nodes are healthy.If a master node component malfunctions, contact technical support", - "doc_type":"usermanual2", - "kw":"Health of Master Node Components,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Health of Master Node Components", - "githuburl":"" - }, - { - "uri":"cce_10_0486.html", - "node_id":"cce_10_0486.xml", - "product_code":"cce", - "code":"96", - "des":"Check whether the resources of Kubernetes components, such as etcd and kube-controller-manager, exceed the upper limit.Solution 1: Reduce Kubernetes resources that are ne", - "doc_type":"usermanual2", - "kw":"Memory Resource Limit of Kubernetes Components,Troubleshooting for Pre-upgrade Check Exceptions,User", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Memory Resource Limit of Kubernetes Components", - "githuburl":"" - }, - { - "uri":"cce_10_0487.html", - "node_id":"cce_10_0487.xml", - "product_code":"cce", - "code":"97", - "des":"The system scans the audit logs of the past day to check whether the user calls the deprecated APIs of the target Kubernetes version.Due to the limited time range of audi", - "doc_type":"usermanual2", - "kw":"Discarded Kubernetes APIs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Discarded Kubernetes APIs", - "githuburl":"" - }, - { - "uri":"cce_10_0488.html", - "node_id":"cce_10_0488.xml", - "product_code":"cce", - "code":"98", - "des":"If IPv6 is enabled for a CCE Turbo cluster, check whether the target cluster version supports IPv6.CCE Turbo clusters support IPv6 since v1.23. This feature is available ", - "doc_type":"usermanual2", - "kw":"IPv6 Capabilities of a CCE Turbo Cluster,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"IPv6 Capabilities of a CCE Turbo Cluster", - "githuburl":"" - }, - { - "uri":"cce_10_0489.html", - "node_id":"cce_10_0489.xml", - "product_code":"cce", - "code":"99", - "des":"Check whether NetworkManager of a node is normal.Log in to the node and run the systemctl is-active NetworkManager command to obtain the running status of NetworkManager.", - "doc_type":"usermanual2", - "kw":"Node NetworkManager,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node NetworkManager", - "githuburl":"" - }, - { - "uri":"cce_10_0490.html", - "node_id":"cce_10_0490.xml", - "product_code":"cce", - "code":"100", - "des":"Check the ID file format.", - "doc_type":"usermanual2", - "kw":"Node ID File,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node ID File", - "githuburl":"" - }, - { - "uri":"cce_10_0491.html", - "node_id":"cce_10_0491.xml", - "product_code":"cce", - "code":"101", - "des":"When you upgrade a cluster to v1.19 or later, the system checks whether the following configuration files have been modified on the backend:/opt/cloud/cce/kubernetes/kube", - "doc_type":"usermanual2", - "kw":"Node Configuration Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Configuration Consistency", - "githuburl":"" - }, - { - "uri":"cce_10_0492.html", - "node_id":"cce_10_0492.xml", - "product_code":"cce", - "code":"102", - "des":"Check whether the configuration files of key components exist on the node.The following table lists the files to be checked.Contact technical support to restore the confi", - "doc_type":"usermanual2", - "kw":"Node Configuration File,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Configuration File", - "githuburl":"" - }, - { - "uri":"cce_10_0493.html", - "node_id":"cce_10_0493.xml", - "product_code":"cce", - "code":"103", - "des":"Check whether the current CoreDNS key configuration Corefile is different from the Helm release record. The difference may be overwritten during the add-on upgrade, affec", - "doc_type":"usermanual2", - "kw":"CoreDNS Configuration Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"CoreDNS Configuration Consistency", - "githuburl":"" - }, - { - "uri":"cce_10_0494.html", - "node_id":"cce_10_0494.xml", - "product_code":"cce", - "code":"104", - "des":"Whether the sudo commands and sudo-related files of the node are workingScenario 1: The sudo command fails to be executed.During the in-place cluster upgrade, the sudo co", - "doc_type":"usermanual2", - "kw":"sudo Commands of a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"sudo Commands of a Node", - "githuburl":"" - }, - { - "uri":"cce_10_0495.html", - "node_id":"cce_10_0495.xml", - "product_code":"cce", - "code":"105", - "des":"Whether some key commands that the node upgrade depends on are workingScenario 1: Executing the package manager command failed.Executing the rpm or dpkg command failed. I", - "doc_type":"usermanual2", - "kw":"Key Commands of Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Key Commands of Nodes", - "githuburl":"" - }, - { - "uri":"cce_10_0496.html", - "node_id":"cce_10_0496.xml", - "product_code":"cce", - "code":"106", - "des":"Check whether the docker/containerd.sock file is directly mounted to the pods on a node. During an upgrade, Docker or containerd restarts and the sock file on the host ch", - "doc_type":"usermanual2", - "kw":"Mounting of a Sock File on a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Mounting of a Sock File on a Node", - "githuburl":"" - }, - { - "uri":"cce_10_0497.html", - "node_id":"cce_10_0497.xml", - "product_code":"cce", - "code":"107", - "des":"Check whether the certificate used by an HTTPS load balancer has been modified on ELB.The certificate referenced by an HTTPS ingress created on CCE is modified on the ELB", - "doc_type":"usermanual2", - "kw":"HTTPS Load Balancer Certificate Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Gu", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"HTTPS Load Balancer Certificate Consistency", - "githuburl":"" - }, - { - "uri":"cce_10_0498.html", - "node_id":"cce_10_0498.xml", - "product_code":"cce", - "code":"108", - "des":"Check whether the default mount directory and soft link on the node have been manually mounted or modified.Non-shared diskBy default, /var/lib/docker, containerd, or /mnt", - "doc_type":"usermanual2", - "kw":"Node Mounting,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Mounting", - "githuburl":"" - }, - { - "uri":"cce_10_0499.html", - "node_id":"cce_10_0499.xml", - "product_code":"cce", - "code":"109", - "des":"Check whether user paas is allowed to log in to a node.Run the following command to check whether user paas is allowed to log in to a node:If the permissions assigned to ", - "doc_type":"usermanual2", - "kw":"Login Permissions of User paas on a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Login Permissions of User paas on a Node", - "githuburl":"" - }, - { - "uri":"cce_10_0500.html", - "node_id":"cce_10_0500.xml", - "product_code":"cce", - "code":"110", - "des":"Check whether the load balancer associated with a Service is allocated with a private IPv4 address.Solution 1: Delete the Service that is associated with a load balancer ", - "doc_type":"usermanual2", - "kw":"Private IPv4 Addresses of Load Balancers,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Private IPv4 Addresses of Load Balancers", - "githuburl":"" - }, - { - "uri":"cce_10_0501.html", - "node_id":"cce_10_0501.xml", - "product_code":"cce", - "code":"111", - "des":"Check whether the source version of the cluster is earlier than v1.11 and the target version is later than v1.23.If the source version of the cluster is earlier than v1.1", - "doc_type":"usermanual2", - "kw":"Historical Upgrade Records,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Historical Upgrade Records", - "githuburl":"" - }, - { - "uri":"cce_10_0502.html", - "node_id":"cce_10_0502.xml", - "product_code":"cce", - "code":"112", - "des":"Check whether the CIDR block of the cluster management plane is the same as that configured on the backbone network.If the CIDR block of the cluster management plane is d", - "doc_type":"usermanual2", - "kw":"CIDR Block of the Cluster Management Plane,Troubleshooting for Pre-upgrade Check Exceptions,User Gui", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"CIDR Block of the Cluster Management Plane", - "githuburl":"" - }, - { - "uri":"cce_10_0503.html", - "node_id":"cce_10_0503.xml", - "product_code":"cce", - "code":"113", - "des":"The GPU add-on is involved in the upgrade, which may affect the GPU driver installation during the creation of a GPU node.The GPU add-on driver needs to be configured by ", - "doc_type":"usermanual2", - "kw":"GPU Add-on,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"GPU Add-on", - "githuburl":"" - }, - { - "uri":"cce_10_0504.html", - "node_id":"cce_10_0504.xml", - "product_code":"cce", - "code":"114", - "des":"Check whether the default system parameter settings on your nodes are modified.If the MTU value of the bond0 network on your BMS node is not the default value 1500, this ", - "doc_type":"usermanual2", - "kw":"Nodes' System Parameter Settings,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Nodes' System Parameter Settings", - "githuburl":"" - }, - { - "uri":"cce_10_0505.html", - "node_id":"cce_10_0505.xml", - "product_code":"cce", - "code":"115", - "des":"Check whether there are residual package version data in the current cluster.A message is displayed indicating that there are residual 10.12.1.109 CRD resources in your c", - "doc_type":"usermanual2", - "kw":"Residual Package Versions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Residual Package Versions", - "githuburl":"" - }, - { - "uri":"cce_10_0506.html", - "node_id":"cce_10_0506.xml", - "product_code":"cce", - "code":"116", - "des":"Check whether the commands required for the upgrade are available on the node.The cluster upgrade failure is typically caused by the lack of key node commands that are re", - "doc_type":"usermanual2", - "kw":"Node Commands,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Commands", - "githuburl":"" - }, - { - "uri":"cce_10_0507.html", - "node_id":"cce_10_0507.xml", - "product_code":"cce", - "code":"117", - "des":"Check whether swap has been enabled on cluster nodes.By default, swap is disabled on CCE nodes. Check the necessity of enabling swap manually and determine the impact of ", - "doc_type":"usermanual2", - "kw":"Node Swap,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Swap", - "githuburl":"" - }, - { - "uri":"cce_10_0510.html", - "node_id":"cce_10_0510.xml", - "product_code":"cce", - "code":"118", - "des":"Check whether the service container running on the node may restart when the containerd component is upgraded on the node that uses containerd in the current cluster.Ensu", - "doc_type":"usermanual2", - "kw":"Check containerd pod restart risk,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Check containerd pod restart risk", - "githuburl":"" - }, - { - "uri":"cce_10_0511.html", - "node_id":"cce_10_0511.xml", - "product_code":"cce", - "code":"119", - "des":"Check whether the configuration of the CCE AI Suite add-on in a cluster has been intrusively modified. If so, upgrading the cluster may fail.", - "doc_type":"usermanual2", - "kw":"Key GPU Add-on Parameters,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Key GPU Add-on Parameters", - "githuburl":"" - }, - { - "uri":"cce_10_0512.html", - "node_id":"cce_10_0512.xml", - "product_code":"cce", - "code":"120", - "des":"Check whether GPU or NPU service pods are rebuilt in a cluster when kubelet is restarted during the upgrade of the cluster.Upgrade the cluster when the impact on services", - "doc_type":"usermanual2", - "kw":"GPU or NPU Pod Rebuild Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"GPU or NPU Pod Rebuild Risks", - "githuburl":"" - }, - { - "uri":"cce_10_0513.html", - "node_id":"cce_10_0513.xml", - "product_code":"cce", - "code":"121", - "des":"Check whether the access control of the ELB listener has been configured for the Service in the current cluster using annotations and whether the configurations are corre", - "doc_type":"usermanual2", - "kw":"ELB Listener Access Control,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"ELB Listener Access Control", - "githuburl":"" - }, - { - "uri":"cce_10_0514.html", - "node_id":"cce_10_0514.xml", - "product_code":"cce", - "code":"122", - "des":"Check whether the flavor of the master nodes in the cluster is the same as the actual flavor of these nodes.Flavor inconsistency is typically due to a modification made o", - "doc_type":"usermanual2", - "kw":"Master Node Flavor,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Master Node Flavor", - "githuburl":"" - }, - { - "uri":"cce_10_0515.html", - "node_id":"cce_10_0515.xml", - "product_code":"cce", - "code":"123", - "des":"Check whether the number of available IP addresses in the cluster subnet supports rolling upgrade.If the number of IP addresses in the selected cluster subnet is insuffic", - "doc_type":"usermanual2", - "kw":"Subnet Quota of Master Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Subnet Quota of Master Nodes", - "githuburl":"" - }, - { - "uri":"cce_10_0516.html", - "node_id":"cce_10_0516.xml", - "product_code":"cce", - "code":"124", - "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If the runtime on your node is", - "doc_type":"usermanual2", - "kw":"Node Runtime,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Runtime", - "githuburl":"" - }, - { - "uri":"cce_10_0517.html", - "node_id":"cce_10_0517.xml", - "product_code":"cce", - "code":"125", - "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If the runtime on your node po", - "doc_type":"usermanual2", - "kw":"Node Pool Runtime,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Pool Runtime", - "githuburl":"" - }, - { - "uri":"cce_10_0518.html", - "node_id":"cce_10_0518.xml", - "product_code":"cce", - "code":"126", - "des":"Check the number of images on your node. If the number is greater than 1000, Docker startup may be slow.Contact O&M personnel to check whether this issue affects the upgr", - "doc_type":"usermanual2", - "kw":"Number of Node Images,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Number of Node Images", + "title":"Configuring a Cluster's API Server for Internet Access", "githuburl":"" }, { "uri":"cce_10_0031.html", "node_id":"cce_10_0031.xml", "product_code":"cce", - "code":"127", + "code":"52", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Managing a Cluster", @@ -2296,7 +945,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Managing a Cluster", @@ -2306,69 +955,87 @@ "uri":"cce_10_0213.html", "node_id":"cce_10_0213.xml", "product_code":"cce", - "code":"128", - "des":"CCE allows you to manage cluster parameters, through which you can let core components work under your requirements.This function is supported only in clusters of v1.15 a", + "code":"53", + "des":"CCE allows you to manage cluster parameters, through which you can let core components work under your requirements.kube-apiserverkube-controller-managerkube-scheduler", "doc_type":"usermanual2", - "kw":"cluster parameters,kube-apiserver,kube-controller-manager,Cluster Configuration Management,Managing ", + "kw":"cluster parameters,kube-apiserver,kube-controller-manager,Modifying Cluster Configurations,Managing ", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Cluster Configuration Management", + "title":"Modifying Cluster Configurations", "githuburl":"" }, { "uri":"cce_10_0602.html", "node_id":"cce_10_0602.xml", "product_code":"cce", - "code":"129", - "des":"If enabled, concurrent requests are dynamically controlled based on the resource pressure of master nodes to keep them and the cluster available.The cluster version must ", + "code":"54", + "des":"After overload control is enabled, the number of simultaneous requests is dynamically regulated according to the resource pressure on the master nodes. This ensures that ", "doc_type":"usermanual2", - "kw":"Cluster Overload Control,Managing a Cluster,User Guide", + "kw":"overload control,Enabling Overload Control for a Cluster,Managing a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Cluster Overload Control", + "title":"Enabling Overload Control for a Cluster", "githuburl":"" }, { "uri":"cce_10_0403.html", "node_id":"cce_10_0403.xml", "product_code":"cce", - "code":"130", - "des":"CCE allows you to change the number of nodes managed in a cluster.This function is supported for clusters of v1.15 and later versions.Starting from v1.15.11, the number o", + "code":"55", + "des":"CCE allows you to change the number of nodes managed in a cluster.A cluster that has only one master node supports fewer than 1000 worker nodes.The number of master nodes", "doc_type":"usermanual2", "kw":"Changing Cluster Scale,Managing a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Changing Cluster Scale", "githuburl":"" }, + { + "uri":"cce_10_0426.html", + "node_id":"cce_10_0426.xml", + "product_code":"cce", + "code":"56", + "des":"When creating a cluster, you can customize a node security group to centrally manage network security policies. For a created cluster, you can change its default node sec", + "doc_type":"usermanual2", + "kw":"Changing the Default Security Group of a Node,Managing a Cluster,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Changing the Default Security Group of a Node", + "githuburl":"" + }, { "uri":"cce_10_0212.html", "node_id":"cce_10_0212.xml", "product_code":"cce", - "code":"131", - "des":"Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workloads, and Services. Related services cannot be ", + "code":"57", + "des":"Deleting a cluster will delete the workloads and Services in the cluster, and the deleted data cannot be recovered. Before performing this operation, ensure that related ", "doc_type":"usermanual2", "kw":"Deleting a Cluster,Managing a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Deleting a Cluster", @@ -2378,25 +1045,1411 @@ "uri":"cce_10_0214.html", "node_id":"cce_10_0214.xml", "product_code":"cce", - "code":"132", - "des":"If you do not need to use a cluster temporarily, hibernate the cluster.After a cluster is hibernated, resources such as workloads cannot be created or managed in the clus", + "code":"58", + "des":"If a pay-per-use cluster is not needed temporarily, hibernate it to reduce costs.After a cluster is hibernated, resources such as workloads cannot be created or managed i", "doc_type":"usermanual2", - "kw":"Hibernating and Waking Up a Cluster,Managing a Cluster,User Guide", + "kw":"Hibernating or Waking Up a Cluster,Managing a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Hibernating and Waking Up a Cluster", + "title":"Hibernating or Waking Up a Cluster", + "githuburl":"" + }, + { + "uri":"cce_10_0215.html", + "node_id":"cce_10_0215.xml", + "product_code":"cce", + "code":"59", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Upgrading a Cluster", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Upgrading a Cluster", + "githuburl":"" + }, + { + "uri":"cce_10_0197.html", + "node_id":"cce_10_0197.xml", + "product_code":"cce", + "code":"60", + "des":"CCE strictly complies with community consistency authentication. It releases three Kubernetes versions each year and offers a maintenance period of at least 24 months aft", + "doc_type":"usermanual2", + "kw":"cluster upgrade process,Node Priority,In-place upgrade,Process and Method of Upgrading a Cluster,Upg", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Process and Method of Upgrading a Cluster", + "githuburl":"" + }, + { + "uri":"cce_10_0302.html", + "node_id":"cce_10_0302.xml", + "product_code":"cce", + "code":"61", + "des":"Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Process and Method of Upgradi", + "doc_type":"usermanual2", + "kw":"Deprecated APIs,Before You Start,Upgrading a Cluster,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Before You Start", + "githuburl":"" + }, + { + "uri":"cce_10_0560.html", + "node_id":"cce_10_0560.xml", + "product_code":"cce", + "code":"62", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Performing Post-Upgrade Verification", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Performing Post-Upgrade Verification", + "githuburl":"" + }, + { + "uri":"cce_10_0568.html", + "node_id":"cce_10_0568.xml", + "product_code":"cce", + "code":"63", + "des":"After a cluster is upgraded, check whether the cluster is in the Running state.CCE automatically checks your cluster status. Go to the cluster list page and confirm the c", + "doc_type":"usermanual2", + "kw":"Cluster Status Check,Performing Post-Upgrade Verification,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Cluster Status Check", + "githuburl":"" + }, + { + "uri":"cce_10_0569.html", + "node_id":"cce_10_0569.xml", + "product_code":"cce", + "code":"64", + "des":"After a cluster is upgraded, check whether nodes in the cluster are in the Running state.CCE automatically checks your node statuses. Go to the node list page and confirm", + "doc_type":"usermanual2", + "kw":"Node Status Check,Performing Post-Upgrade Verification,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Status Check", + "githuburl":"" + }, + { + "uri":"cce_10_0567.html", + "node_id":"cce_10_0567.xml", + "product_code":"cce", + "code":"65", + "des":"After a cluster is upgraded, check whether there are any nodes that skip the upgrade in the cluster. These nodes may affect the proper running of the cluster.CCE automati", + "doc_type":"usermanual2", + "kw":"Node Skipping Check,Performing Post-Upgrade Verification,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Skipping Check", + "githuburl":"" + }, + { + "uri":"cce_10_0561.html", + "node_id":"cce_10_0561.xml", + "product_code":"cce", + "code":"66", + "des":"After a cluster is upgraded, check whether its services are running properly.Different services have different verification mode. Select a suitable one and verify the ser", + "doc_type":"usermanual2", + "kw":"Service Check,Performing Post-Upgrade Verification,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Service Check", + "githuburl":"" + }, + { + "uri":"cce_10_0565.html", + "node_id":"cce_10_0565.xml", + "product_code":"cce", + "code":"67", + "des":"Check whether nodes can be created in the cluster.If nodes cannot be created in your cluster after the cluster is upgraded, contact technical support.", + "doc_type":"usermanual2", + "kw":"New Node Check,Performing Post-Upgrade Verification,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"New Node Check", + "githuburl":"" + }, + { + "uri":"cce_10_0566.html", + "node_id":"cce_10_0566.xml", + "product_code":"cce", + "code":"68", + "des":"Check whether pods can be created on the existing nodes after the cluster is upgraded.Check whether pods can be created on new nodes after the cluster is upgraded.After c", + "doc_type":"usermanual2", + "kw":"New Pod Check,Performing Post-Upgrade Verification,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"New Pod Check", + "githuburl":"" + }, + { + "uri":"cce_10_0210.html", + "node_id":"cce_10_0210.xml", + "product_code":"cce", + "code":"69", + "des":"This section describes how to migrate services from a cluster of an earlier version to a cluster of a later version in CCE.This operation is applicable when a cross-versi", + "doc_type":"usermanual2", + "kw":"Migrating Services Across Clusters of Different Versions,Upgrading a Cluster,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Migrating Services Across Clusters of Different Versions", + "githuburl":"" + }, + { + "uri":"cce_10_0550.html", + "node_id":"cce_10_0550.xml", + "product_code":"cce", + "code":"70", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Troubleshooting for Pre-upgrade Check Exceptions", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Troubleshooting for Pre-upgrade Check Exceptions", + "githuburl":"" + }, + { + "uri":"cce_10_0549.html", + "node_id":"cce_10_0549.xml", + "product_code":"cce", + "code":"71", + "des":"The system automatically checks a cluster before its upgrade. If the cluster does not meet the pre-upgrade check conditions, the upgrade cannot continue. To avoid risks, ", + "doc_type":"usermanual2", + "kw":"Pre-upgrade Check,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Pre-upgrade Check", + "githuburl":"" + }, + { + "uri":"cce_10_0431.html", + "node_id":"cce_10_0431.xml", + "product_code":"cce", + "code":"72", + "des":"Check the following items:Check whether the node is available.Check whether the node OS supports the upgrade.Check whether the node is marked with unexpected node pool la", + "doc_type":"usermanual2", + "kw":"Node Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Restrictions", + "githuburl":"" + }, + { + "uri":"cce_10_0432.html", + "node_id":"cce_10_0432.xml", + "product_code":"cce", + "code":"73", + "des":"Check whether the target cluster is under upgrade management.CCE may temporarily restrict the cluster upgrade due to the following reasons:The cluster is identified as th", + "doc_type":"usermanual2", + "kw":"Upgrade Management,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Upgrade Management", + "githuburl":"" + }, + { + "uri":"cce_10_0433.html", + "node_id":"cce_10_0433.xml", + "product_code":"cce", + "code":"74", + "des":"Check the following items:Check whether the add-on status is normal.Check whether the add-on support the target version.Scenario 1: The add-on malfunctions.Log in to the ", + "doc_type":"usermanual2", + "kw":"Add-ons,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Add-ons", + "githuburl":"" + }, + { + "uri":"cce_10_0434.html", + "node_id":"cce_10_0434.xml", + "product_code":"cce", + "code":"75", + "des":"Check whether the current HelmRelease record contains discarded Kubernetes APIs that are not supported by the target cluster version. If yes, the Helm chart may be unavai", + "doc_type":"usermanual2", + "kw":"Helm Charts,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Helm Charts", + "githuburl":"" + }, + { + "uri":"cce_10_0435.html", + "node_id":"cce_10_0435.xml", + "product_code":"cce", + "code":"76", + "des":"Check whether CCE can connect to your master nodes.Contact technical support.", + "doc_type":"usermanual2", + "kw":"SSH Connectivity of Master Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"SSH Connectivity of Master Nodes", + "githuburl":"" + }, + { + "uri":"cce_10_0436.html", + "node_id":"cce_10_0436.xml", + "product_code":"cce", + "code":"77", + "des":"Check the node pool status.Check whether the node pool OS or container runtime is supported after the upgrade.Scenario: The node pool malfunctions.Log in to the CCE conso", + "doc_type":"usermanual2", + "kw":"Node Pools,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Pools", + "githuburl":"" + }, + { + "uri":"cce_10_0437.html", + "node_id":"cce_10_0437.xml", + "product_code":"cce", + "code":"78", + "des":"Check whether the Protocol & Port of the worker node security groups is set to ICMP: All and whether the security group with the source IP address set to the master node ", + "doc_type":"usermanual2", + "kw":"Security Groups,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Security Groups", + "githuburl":"" + }, + { + "uri":"cce_10_0439.html", + "node_id":"cce_10_0439.xml", + "product_code":"cce", + "code":"79", + "des":"Check whether nodes need to be migrated.For the 1.15 cluster that is upgraded from 1.13 in rolling mode, migrate (reset or create and replace) all nodes before performing", + "doc_type":"usermanual2", + "kw":"Residual Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Residual Nodes", + "githuburl":"" + }, + { + "uri":"cce_10_0440.html", + "node_id":"cce_10_0440.xml", + "product_code":"cce", + "code":"80", + "des":"Check whether there are discarded resources in the clusters.Scenario: The Service in the clusters of v1.25 or later has discarded annotation: tolerate-unready-endpoints.E", + "doc_type":"usermanual2", + "kw":"Discarded Kubernetes Resources,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Discarded Kubernetes Resources", + "githuburl":"" + }, + { + "uri":"cce_10_0441.html", + "node_id":"cce_10_0441.xml", + "product_code":"cce", + "code":"81", + "des":"Read the version compatibility differences and ensure that they are not affected. The patch upgrade does not involve version compatibility differences.", + "doc_type":"usermanual2", + "kw":"Compatibility Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Compatibility Risks", + "githuburl":"" + }, + { + "uri":"cce_10_0442.html", + "node_id":"cce_10_0442.xml", + "product_code":"cce", + "code":"82", + "des":"Check whether cce-agent on the current node is of the latest version.Scenario 1: The error message \"you cce-agent no update, please restart it\" is displayed.cce-agent doe", + "doc_type":"usermanual2", + "kw":"CCE Agent Versions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"CCE Agent Versions", + "githuburl":"" + }, + { + "uri":"cce_10_0443.html", + "node_id":"cce_10_0443.xml", + "product_code":"cce", + "code":"83", + "des":"Check whether the CPU usage of the node exceeds 90%.Upgrade the cluster during off-peak hours.Check whether too many pods are deployed on the node. If yes, reschedule pod", + "doc_type":"usermanual2", + "kw":"Node CPU Usage,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node CPU Usage", + "githuburl":"" + }, + { + "uri":"cce_10_0444.html", + "node_id":"cce_10_0444.xml", + "product_code":"cce", + "code":"84", + "des":"Check the following items:Check whether the key CRD packageversions.version.cce.io of the cluster is deleted.Check whether the cluster key CRD network-attachment-definiti", + "doc_type":"usermanual2", + "kw":"CRDs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"CRDs", + "githuburl":"" + }, + { + "uri":"cce_10_0445.html", + "node_id":"cce_10_0445.xml", + "product_code":"cce", + "code":"85", + "des":"Check the following items:Check whether the key data disks on the node meet the upgrade requirements.Check whether the /tmp directory has 500 MB available space.During th", + "doc_type":"usermanual2", + "kw":"Node Disks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Disks", + "githuburl":"" + }, + { + "uri":"cce_10_0446.html", + "node_id":"cce_10_0446.xml", + "product_code":"cce", + "code":"86", + "des":"Check the following items:Check whether the DNS configuration of the current node can resolve the OBS address.Check whether the current node can access the OBS address of", + "doc_type":"usermanual2", + "kw":"Node DNS,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node DNS", + "githuburl":"" + }, + { + "uri":"cce_10_0447.html", + "node_id":"cce_10_0447.xml", + "product_code":"cce", + "code":"87", + "des":"Check whether the owner and owner group of the files in the /var/paas directory used by the CCE are both paas.Scenario 1: The error message \"xx file permission has been c", + "doc_type":"usermanual2", + "kw":"Node Key Directory File Permissions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Key Directory File Permissions", + "githuburl":"" + }, + { + "uri":"cce_10_0448.html", + "node_id":"cce_10_0448.xml", + "product_code":"cce", + "code":"88", + "des":"Check whether the kubelet on the node is running properly.Scenario 1: The kubelet status is abnormal.If the kubelet malfunctions, the node is unavailable. Restore the nod", + "doc_type":"usermanual2", + "kw":"kubelet,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"kubelet", + "githuburl":"" + }, + { + "uri":"cce_10_0449.html", + "node_id":"cce_10_0449.xml", + "product_code":"cce", + "code":"89", + "des":"Check whether the memory usage of the node exceeds 90%.Upgrade the cluster during off-peak hours.Check whether too many pods are deployed on the node. If yes, reschedule ", + "doc_type":"usermanual2", + "kw":"Node Memory,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Memory", + "githuburl":"" + }, + { + "uri":"cce_10_0450.html", + "node_id":"cce_10_0450.xml", + "product_code":"cce", + "code":"90", + "des":"Check whether the clock synchronization server ntpd or chronyd of the node is running properly.Scenario 1: ntpd is running abnormally.Log in to the node and run the syste", + "doc_type":"usermanual2", + "kw":"Node Clock Synchronization Server,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Clock Synchronization Server", + "githuburl":"" + }, + { + "uri":"cce_10_0451.html", + "node_id":"cce_10_0451.xml", + "product_code":"cce", + "code":"91", + "des":"Check whether the OS kernel version of the node is supported by CCE.Case 1: The node image is not a standard CCE image.CCE nodes run depending on the initial standard ker", + "doc_type":"usermanual2", + "kw":"Node OS,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node OS", + "githuburl":"" + }, + { + "uri":"cce_10_0452.html", + "node_id":"cce_10_0452.xml", + "product_code":"cce", + "code":"92", + "des":"Check whether the number of CPUs on the master node is greater than 2.If the number of CPUs on the master node is 2, contact technical support to expand the number to 4 o", + "doc_type":"usermanual2", + "kw":"Node CPUs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node CPUs", + "githuburl":"" + }, + { + "uri":"cce_10_0453.html", + "node_id":"cce_10_0453.xml", + "product_code":"cce", + "code":"93", + "des":"Check whether the Python commands are available on a node.If the command output is not 0, the check fails.Install Python before the upgrade.", + "doc_type":"usermanual2", + "kw":"Node Python Commands,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Python Commands", + "githuburl":"" + }, + { + "uri":"cce_10_0455.html", + "node_id":"cce_10_0455.xml", + "product_code":"cce", + "code":"94", + "des":"Check whether the nodes in the cluster are ready.Scenario 1: The nodes are in the unavailable status.Log in to the CCE console and click the cluster name to access the cl", + "doc_type":"usermanual2", + "kw":"Node Readiness,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Readiness", + "githuburl":"" + }, + { + "uri":"cce_10_0456.html", + "node_id":"cce_10_0456.xml", + "product_code":"cce", + "code":"95", + "des":"Check whether journald of a node is normal.Log in to the node and run the systemctl is-active systemd-journald command to obtain the running status of journald. If the co", + "doc_type":"usermanual2", + "kw":"Node journald,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node journald", + "githuburl":"" + }, + { + "uri":"cce_10_0457.html", + "node_id":"cce_10_0457.xml", + "product_code":"cce", + "code":"96", + "des":"Check whether the containerd.sock file exists on the node. This file affects the startup of container runtime in the Euler OS.Scenario: The Docker used by the node is the", + "doc_type":"usermanual2", + "kw":"containerd.sock,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"containerd.sock", + "githuburl":"" + }, + { + "uri":"cce_10_0458.html", + "node_id":"cce_10_0458.xml", + "product_code":"cce", + "code":"97", + "des":"Before the upgrade, check whether an internal error occurs.If this check fails, contact technical support.", + "doc_type":"usermanual2", + "kw":"Internal Errors,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Internal Errors", + "githuburl":"" + }, + { + "uri":"cce_10_0459.html", + "node_id":"cce_10_0459.xml", + "product_code":"cce", + "code":"98", + "des":"Check whether inaccessible mount points exist on the node.Scenario: There are inaccessible mount points on the node.If NFS (such as obsfs or SFS) is used by the node and ", + "doc_type":"usermanual2", + "kw":"Node Mount Points,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Mount Points", + "githuburl":"" + }, + { + "uri":"cce_10_0460.html", + "node_id":"cce_10_0460.xml", + "product_code":"cce", + "code":"99", + "des":"Check whether the taint needed for cluster upgrade exists on the node.Scenario 1: The node is skipped during the cluster upgrade.If the version of the node is different f", + "doc_type":"usermanual2", + "kw":"Kubernetes Node Taints,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Kubernetes Node Taints", + "githuburl":"" + }, + { + "uri":"cce_10_0478.html", + "node_id":"cce_10_0478.xml", + "product_code":"cce", + "code":"100", + "des":"Check whether there are any compatibility restrictions on the current Everest add-on.There are compatibility restrictions on the current Everest add-on and it cannot be u", + "doc_type":"usermanual2", + "kw":"Everest Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Everest Restrictions", + "githuburl":"" + }, + { + "uri":"cce_10_0479.html", + "node_id":"cce_10_0479.xml", + "product_code":"cce", + "code":"101", + "des":"Check whether the current cce-controller-hpa add-on has compatibility restrictions.The current cce-controller-hpa add-on has compatibility restrictions. An add-on that ca", + "doc_type":"usermanual2", + "kw":"cce-hpa-controller Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"cce-hpa-controller Restrictions", + "githuburl":"" + }, + { + "uri":"cce_10_0480.html", + "node_id":"cce_10_0480.xml", + "product_code":"cce", + "code":"102", + "des":"Check whether the current cluster version and the target version support enhanced CPU policy.Scenario: Only the current cluster version supports the enhanced CPU policy f", + "doc_type":"usermanual2", + "kw":"Enhanced CPU Policies,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Enhanced CPU Policies", + "githuburl":"" + }, + { + "uri":"cce_10_0484.html", + "node_id":"cce_10_0484.xml", + "product_code":"cce", + "code":"103", + "des":"Check whether the container runtime and network components on the worker nodes are healthy.If a worker node component malfunctions, log in to the node to check the status", + "doc_type":"usermanual2", + "kw":"Health of Worker Node Components,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Health of Worker Node Components", + "githuburl":"" + }, + { + "uri":"cce_10_0485.html", + "node_id":"cce_10_0485.xml", + "product_code":"cce", + "code":"104", + "des":"Check whether the Kubernetes, container runtime, and network components of the master nodes are healthy.If a master node component malfunctions, contact technical support", + "doc_type":"usermanual2", + "kw":"Health of Master Node Components,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Health of Master Node Components", + "githuburl":"" + }, + { + "uri":"cce_10_0486.html", + "node_id":"cce_10_0486.xml", + "product_code":"cce", + "code":"105", + "des":"Check whether the resources of Kubernetes components, such as etcd and kube-controller-manager, exceed the upper limit.Solution 1: Reduce Kubernetes resources that are ne", + "doc_type":"usermanual2", + "kw":"Memory Resource Limit of Kubernetes Components,Troubleshooting for Pre-upgrade Check Exceptions,User", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Memory Resource Limit of Kubernetes Components", + "githuburl":"" + }, + { + "uri":"cce_10_0487.html", + "node_id":"cce_10_0487.xml", + "product_code":"cce", + "code":"106", + "des":"The system scans the audit logs of the past day to check whether the user calls the deprecated APIs of the target Kubernetes version.Due to the limited time range of audi", + "doc_type":"usermanual2", + "kw":"Discarded Kubernetes APIs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Discarded Kubernetes APIs", + "githuburl":"" + }, + { + "uri":"cce_10_0488.html", + "node_id":"cce_10_0488.xml", + "product_code":"cce", + "code":"107", + "des":"If IPv6 is enabled for a CCE Turbo cluster, check whether the target cluster version supports IPv6.CCE Turbo clusters support IPv6 since v1.23. This feature is available ", + "doc_type":"usermanual2", + "kw":"IPv6 Support in CCE Turbo Clusters,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"IPv6 Support in CCE Turbo Clusters", + "githuburl":"" + }, + { + "uri":"cce_10_0489.html", + "node_id":"cce_10_0489.xml", + "product_code":"cce", + "code":"108", + "des":"Check whether NetworkManager of a node is normal.Log in to the node and run the systemctl is-active NetworkManager command to obtain the running status of NetworkManager.", + "doc_type":"usermanual2", + "kw":"NetworkManager,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"NetworkManager", + "githuburl":"" + }, + { + "uri":"cce_10_0490.html", + "node_id":"cce_10_0490.xml", + "product_code":"cce", + "code":"109", + "des":"Check the ID file format.", + "doc_type":"usermanual2", + "kw":"Node ID File,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node ID File", + "githuburl":"" + }, + { + "uri":"cce_10_0491.html", + "node_id":"cce_10_0491.xml", + "product_code":"cce", + "code":"110", + "des":"When you upgrade a cluster to v1.19 or later, the system checks whether the following configuration files have been modified on the backend:/opt/cloud/cce/kubernetes/kube", + "doc_type":"usermanual2", + "kw":"Node Configuration Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Configuration Consistency", + "githuburl":"" + }, + { + "uri":"cce_10_0492.html", + "node_id":"cce_10_0492.xml", + "product_code":"cce", + "code":"111", + "des":"Check whether the configuration files of key components exist on the node.The following table lists the files to be checked.Contact technical support to restore the confi", + "doc_type":"usermanual2", + "kw":"Node Configuration File,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Configuration File", + "githuburl":"" + }, + { + "uri":"cce_10_0493.html", + "node_id":"cce_10_0493.xml", + "product_code":"cce", + "code":"112", + "des":"Check whether the current CoreDNS key configuration Corefile is different from the Helm release record. The difference may be overwritten during the add-on upgrade, affec", + "doc_type":"usermanual2", + "kw":"CoreDNS Configuration Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"CoreDNS Configuration Consistency", + "githuburl":"" + }, + { + "uri":"cce_10_0494.html", + "node_id":"cce_10_0494.xml", + "product_code":"cce", + "code":"113", + "des":"Check whether the sudo commands and sudo-related files of the node are working.Scenario 1: The sudo command fails to be executed.During the in-place cluster upgrade, the ", + "doc_type":"usermanual2", + "kw":"sudo,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"sudo", + "githuburl":"" + }, + { + "uri":"cce_10_0495.html", + "node_id":"cce_10_0495.xml", + "product_code":"cce", + "code":"114", + "des":"Whether some key commands that the node upgrade depends on are workingScenario 1: Executing the package manager command failed.Executing the rpm or dpkg command failed. I", + "doc_type":"usermanual2", + "kw":"Key Node Commands,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Key Node Commands", + "githuburl":"" + }, + { + "uri":"cce_10_0496.html", + "node_id":"cce_10_0496.xml", + "product_code":"cce", + "code":"115", + "des":"Check whether the docker/containerd.sock file is directly mounted to the pods on a node. During an upgrade, Docker or containerd restarts and the sock file on the host ch", + "doc_type":"usermanual2", + "kw":"Mounting of a Sock File on a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Mounting of a Sock File on a Node", + "githuburl":"" + }, + { + "uri":"cce_10_0497.html", + "node_id":"cce_10_0497.xml", + "product_code":"cce", + "code":"116", + "des":"Check whether the certificate used by an HTTPS load balancer has been modified on ELB.The certificate referenced by an HTTPS ingress created on CCE is modified on the ELB", + "doc_type":"usermanual2", + "kw":"HTTPS Load Balancer Certificate Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Gu", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"HTTPS Load Balancer Certificate Consistency", + "githuburl":"" + }, + { + "uri":"cce_10_0498.html", + "node_id":"cce_10_0498.xml", + "product_code":"cce", + "code":"117", + "des":"Check whether the default mount directory and soft link on the node have been manually mounted or modified.Non-shared diskBy default, /var/lib/docker, containerd, or /mnt", + "doc_type":"usermanual2", + "kw":"Node Mounting,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Mounting", + "githuburl":"" + }, + { + "uri":"cce_10_0499.html", + "node_id":"cce_10_0499.xml", + "product_code":"cce", + "code":"118", + "des":"Check whether user paas is allowed to log in to a node.Run the following command to check whether user paas is allowed to log in to a node:If the permissions assigned to ", + "doc_type":"usermanual2", + "kw":"Login Permissions of User paas on a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Login Permissions of User paas on a Node", + "githuburl":"" + }, + { + "uri":"cce_10_0500.html", + "node_id":"cce_10_0500.xml", + "product_code":"cce", + "code":"119", + "des":"Check whether the load balancer associated with a Service is allocated with a private IPv4 address.Solution 1: Delete the Service that is associated with a load balancer ", + "doc_type":"usermanual2", + "kw":"Private IPv4 Addresses of Load Balancers,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Private IPv4 Addresses of Load Balancers", + "githuburl":"" + }, + { + "uri":"cce_10_0501.html", + "node_id":"cce_10_0501.xml", + "product_code":"cce", + "code":"120", + "des":"Check whether the source version of the cluster is earlier than v1.11 and the target version is later than v1.23.If the source version of the cluster is earlier than v1.1", + "doc_type":"usermanual2", + "kw":"Historical Upgrade Records,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Historical Upgrade Records", + "githuburl":"" + }, + { + "uri":"cce_10_0502.html", + "node_id":"cce_10_0502.xml", + "product_code":"cce", + "code":"121", + "des":"Check whether the CIDR block of the cluster management plane is the same as that configured on the backbone network.If the CIDR block of the cluster management plane is d", + "doc_type":"usermanual2", + "kw":"CIDR Block of the Cluster Management Plane,Troubleshooting for Pre-upgrade Check Exceptions,User Gui", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"CIDR Block of the Cluster Management Plane", + "githuburl":"" + }, + { + "uri":"cce_10_0503.html", + "node_id":"cce_10_0503.xml", + "product_code":"cce", + "code":"122", + "des":"The GPU add-on is involved in the upgrade, which may affect the GPU driver installation during the creation of a GPU node.The GPU add-on driver needs to be configured by ", + "doc_type":"usermanual2", + "kw":"GPU Add-on,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"GPU Add-on", + "githuburl":"" + }, + { + "uri":"cce_10_0504.html", + "node_id":"cce_10_0504.xml", + "product_code":"cce", + "code":"123", + "des":"Check whether the default system parameter settings on your nodes are modified.If the MTU value of the bond0 network on your BMS node is not the default value 1500, this ", + "doc_type":"usermanual2", + "kw":"Nodes' System Parameters,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Nodes' System Parameters", + "githuburl":"" + }, + { + "uri":"cce_10_0505.html", + "node_id":"cce_10_0505.xml", + "product_code":"cce", + "code":"124", + "des":"Check whether there are residual package version data in the current cluster.A message is displayed indicating that there are residual 10.12.1.109 CRD resources in your c", + "doc_type":"usermanual2", + "kw":"Residual Package Version Data,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Residual Package Version Data", + "githuburl":"" + }, + { + "uri":"cce_10_0506.html", + "node_id":"cce_10_0506.xml", + "product_code":"cce", + "code":"125", + "des":"Check whether the commands required for the upgrade are available on the node.The cluster upgrade failure is typically caused by the lack of key node commands that are re", + "doc_type":"usermanual2", + "kw":"Node Commands,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Commands", + "githuburl":"" + }, + { + "uri":"cce_10_0507.html", + "node_id":"cce_10_0507.xml", + "product_code":"cce", + "code":"126", + "des":"Check whether swap has been enabled on cluster nodes.By default, swap is disabled on CCE nodes. Check the necessity of enabling swap manually and determine the impact of ", + "doc_type":"usermanual2", + "kw":"Node Swap,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Swap", + "githuburl":"" + }, + { + "uri":"cce_10_0510.html", + "node_id":"cce_10_0510.xml", + "product_code":"cce", + "code":"127", + "des":"Check whether the service pods running on a containerd node are restarted when containerd is upgraded.Upgrade the cluster when the impact on services is controllable (for", + "doc_type":"usermanual2", + "kw":"containerd Pod Restart Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"containerd Pod Restart Risks", + "githuburl":"" + }, + { + "uri":"cce_10_0511.html", + "node_id":"cce_10_0511.xml", + "product_code":"cce", + "code":"128", + "des":"Check whether the configuration of the CCE AI Suite add-on in a cluster has been intrusively modified. If so, upgrading the cluster may fail.", + "doc_type":"usermanual2", + "kw":"Key GPU Add-on Parameters,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Key GPU Add-on Parameters", + "githuburl":"" + }, + { + "uri":"cce_10_0512.html", + "node_id":"cce_10_0512.xml", + "product_code":"cce", + "code":"129", + "des":"Check whether GPU service pods are rebuilt in a cluster when kubelet is restarted during the upgrade of the cluster.Upgrade the cluster when the impact on services is con", + "doc_type":"usermanual2", + "kw":"GPU Pod Rebuild Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"GPU Pod Rebuild Risks", + "githuburl":"" + }, + { + "uri":"cce_10_0513.html", + "node_id":"cce_10_0513.xml", + "product_code":"cce", + "code":"130", + "des":"Check whether the access control of the ELB listener has been configured for the Service in the current cluster using annotations and whether the configurations are corre", + "doc_type":"usermanual2", + "kw":"ELB Listener Access Control,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"ELB Listener Access Control", + "githuburl":"" + }, + { + "uri":"cce_10_0514.html", + "node_id":"cce_10_0514.xml", + "product_code":"cce", + "code":"131", + "des":"Check whether the flavor of the master nodes in the cluster is the same as the actual flavor of these nodes.Flavor inconsistency is typically due to a modification made o", + "doc_type":"usermanual2", + "kw":"Master Node Flavor,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Master Node Flavor", + "githuburl":"" + }, + { + "uri":"cce_10_0515.html", + "node_id":"cce_10_0515.xml", + "product_code":"cce", + "code":"132", + "des":"Check whether the number of available IP addresses in the cluster subnet supports rolling upgrade.If the number of IP addresses in the selected cluster subnet is insuffic", + "doc_type":"usermanual2", + "kw":"Subnet Quota of Master Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Subnet Quota of Master Nodes", + "githuburl":"" + }, + { + "uri":"cce_10_0516.html", + "node_id":"cce_10_0516.xml", + "product_code":"cce", + "code":"133", + "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If the runtime on your node is", + "doc_type":"usermanual2", + "kw":"Node Runtime,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Runtime", + "githuburl":"" + }, + { + "uri":"cce_10_0517.html", + "node_id":"cce_10_0517.xml", + "product_code":"cce", + "code":"134", + "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If the runtime on your node po", + "doc_type":"usermanual2", + "kw":"Node Pool Runtime,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Pool Runtime", + "githuburl":"" + }, + { + "uri":"cce_10_0518.html", + "node_id":"cce_10_0518.xml", + "product_code":"cce", + "code":"135", + "des":"Check the number of images on your node. If there are more than 1000 images, it takes a long time for Docker to start, affecting the standard Docker output and functions ", + "doc_type":"usermanual2", + "kw":"Number of Node Images,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Number of Node Images", "githuburl":"" }, { "uri":"cce_10_0183.html", "node_id":"cce_10_0183.xml", "product_code":"cce", - "code":"133", + "code":"136", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Nodes", @@ -2404,7 +2457,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Nodes", @@ -2414,7 +2467,7 @@ "uri":"cce_10_0180.html", "node_id":"cce_10_0180.xml", "product_code":"cce", - "code":"134", + "code":"137", "des":"A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (P", "doc_type":"usermanual2", "kw":"paas,user group,Node Overview,Nodes,User Guide", @@ -2422,7 +2475,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Node Overview", @@ -2432,51 +2485,51 @@ "uri":"cce_10_0462.html", "node_id":"cce_10_0462.xml", "product_code":"cce", - "code":"135", + "code":"138", "des":"Container engines, one of the most important components of Kubernetes, manage the lifecycle of images and containers. The kubelet interacts with a container runtime throu", "doc_type":"usermanual2", - "kw":"Container Engine,Nodes,User Guide", + "kw":"Container Engines,Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Container Engine", + "title":"Container Engines", "githuburl":"" }, { "uri":"cce_10_0476.html", "node_id":"cce_10_0476.xml", "product_code":"cce", - "code":"136", + "code":"139", "des":"This section describes the mappings between released cluster versions and OS versions.", "doc_type":"usermanual2", - "kw":"Node OS,Nodes,User Guide", + "kw":"Node OSs,Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Node OS", + "title":"Node OSs", "githuburl":"" }, { "uri":"cce_10_0363.html", "node_id":"cce_10_0363.xml", "product_code":"cce", - "code":"137", - "des":"At least one cluster has been created.A key pair has been created for identity authentication upon remote node login.The node has at least 2 vCPUs and 4 GiB of memory.To ", + "code":"140", + "des":"At least one cluster has been created.A key pair has been created for identity authentication upon remote node login.The DNS configuration of a subnet where a node is loc", "doc_type":"usermanual2", "kw":"Creating a Node,Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Node", @@ -2486,15 +2539,15 @@ "uri":"cce_10_0198.html", "node_id":"cce_10_0198.xml", "product_code":"cce", - "code":"138", - "des":"In CCE, you can create a node (Creating a Node) or add existing nodes (ECSs) to your cluster for management.While an ECS is being accepted into a cluster, the operating s", + "code":"141", + "des":"In CCE, you can create a node (Creating a Node) or add existing nodes (ECSs) to your cluster for management.When accepting an ECS, you can reset the ECS OS to a standard ", "doc_type":"usermanual2", "kw":"Accepting Nodes for Management,Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Accepting Nodes for Management", @@ -2504,7 +2557,7 @@ "uri":"cce_10_0185.html", "node_id":"cce_10_0185.xml", "product_code":"cce", - "code":"139", + "code":"142", "des":"If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).Only login to a running ECS is allowed.Only the user linux can l", "doc_type":"usermanual2", "kw":"Logging In to a Node,Nodes,User Guide", @@ -2512,7 +2565,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Logging In to a Node", @@ -2522,7 +2575,7 @@ "uri":"cce_10_0672.html", "node_id":"cce_10_0672.xml", "product_code":"cce", - "code":"140", + "code":"143", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"node labels", @@ -2530,7 +2583,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Management Nodes", @@ -2540,7 +2593,7 @@ "uri":"cce_10_0004.html", "node_id":"cce_10_0004.xml", "product_code":"cce", - "code":"141", + "code":"144", "des":"You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.", "doc_type":"usermanual2", "kw":"node labels,Inherent Label of a Node,Managing Node Labels,Management Nodes,User Guide", @@ -2548,7 +2601,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Managing Node Labels", @@ -2558,7 +2611,7 @@ "uri":"cce_10_0352.html", "node_id":"cce_10_0352.xml", "product_code":"cce", - "code":"142", + "code":"145", "des":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.On the CCE console, you can also batch manage nodes' taints.Enter the k", "doc_type":"usermanual2", "kw":"NoSchedule,PreferNoSchedule,NoExecute,System Taints,Managing Node Taints,Management Nodes,User Guide", @@ -2566,7 +2619,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Managing Node Taints", @@ -2576,7 +2629,7 @@ "uri":"cce_10_0003.html", "node_id":"cce_10_0003.xml", "product_code":"cce", - "code":"143", + "code":"146", "des":"You can reset a node to modify the node configuration, such as the node OS and login mode.Resetting a node will reinstall the node OS and the Kubernetes software on the n", "doc_type":"usermanual2", "kw":"reset a node,Resetting a Node,Management Nodes,User Guide", @@ -2584,7 +2637,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Resetting a Node", @@ -2594,7 +2647,7 @@ "uri":"cce_10_0338.html", "node_id":"cce_10_0338.xml", "product_code":"cce", - "code":"144", + "code":"147", "des":"Removing a node from a cluster will re-install the node OS and clear CCE components on the node.Removing a node will not delete the server corresponding to the node. You ", "doc_type":"usermanual2", "kw":"Removing a Node,Management Nodes,User Guide", @@ -2602,7 +2655,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Removing a Node", @@ -2612,7 +2665,7 @@ "uri":"cce_10_0184.html", "node_id":"cce_10_0184.xml", "product_code":"cce", - "code":"145", + "code":"148", "des":"Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required. Modifyi", "doc_type":"usermanual2", "kw":"synchronize the ECS,Synchronizing the Data of Cloud Servers,Management Nodes,User Guide", @@ -2620,7 +2673,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Synchronizing the Data of Cloud Servers", @@ -2630,15 +2683,15 @@ "uri":"cce_10_0605.html", "node_id":"cce_10_0605.xml", "product_code":"cce", - "code":"146", - "des":"After you enable nodal drainage on the console, CCE configures the node to be non-schedulable and securely evicts all pods that comply with Nodal Drainage Rules on the no", + "code":"149", + "des":"After you enable nodal drainage on the console, CCE configures the node to be non-schedulable and securely evicts all pods that comply with Rules for Draining Nodes on th", "doc_type":"usermanual2", "kw":"nodal drainage,nodal drainage,Draining a Node,Management Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Draining a Node", @@ -2648,15 +2701,15 @@ "uri":"cce_10_0186.html", "node_id":"cce_10_0186.xml", "product_code":"cce", - "code":"147", - "des":"When a node in a CCE cluster is deleted, services running on the node will also be deleted. Exercise caution when performing this operation.VM nodes that are being used b", + "code":"150", + "des":"You can delete a pay-per-use node that is not needed from the node list.Deleting or unsubscribing from a node in a CCE cluster will release the node and services running ", "doc_type":"usermanual2", "kw":"Deleting a Node,Management Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Deleting a Node", @@ -2666,15 +2719,15 @@ "uri":"cce_10_0036.html", "node_id":"cce_10_0036.xml", "product_code":"cce", - "code":"148", - "des":"After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not resu", + "code":"151", + "des":"When a node in the cluster is stopped, all services on that node will also be stopped, and the node will no longer be available for scheduling. Check if your services wil", "doc_type":"usermanual2", "kw":"Stopping a Node,Management Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Stopping a Node", @@ -2684,7 +2737,7 @@ "uri":"cce_10_0276.html", "node_id":"cce_10_0276.xml", "product_code":"cce", - "code":"149", + "code":"152", "des":"In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.The o", "doc_type":"usermanual2", "kw":"Performing Rolling Upgrade for Nodes,Management Nodes,User Guide", @@ -2692,7 +2745,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Performing Rolling Upgrade for Nodes", @@ -2702,7 +2755,7 @@ "uri":"cce_10_0704.html", "node_id":"cce_10_0704.xml", "product_code":"cce", - "code":"150", + "code":"153", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Node O&M", @@ -2710,7 +2763,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Node O&M", @@ -2720,7 +2773,7 @@ "uri":"cce_10_0178.html", "node_id":"cce_10_0178.xml", "product_code":"cce", - "code":"151", + "code":"154", "des":"Some node resources are used to run mandatory Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total number of node res", "doc_type":"usermanual2", "kw":"total number of node resources,Node Resource Reservation Policy,Node O&M,User Guide", @@ -2728,7 +2781,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Node Resource Reservation Policy", @@ -2738,15 +2791,15 @@ "uri":"cce_10_0341.html", "node_id":"cce_10_0341.xml", "product_code":"cce", - "code":"152", + "code":"155", "des":"This section describes how to allocate data disk space to nodes so that you can configure the data disk space accordingly.When creating a node, configure data disks for t", "doc_type":"usermanual2", - "kw":"data disk space allocation,Container engine and container image space,basesize,basesize,Container St", + "kw":"data disk space allocation,Container engine and container image space,container engine and container", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Data Disk Space Allocation", @@ -2756,25 +2809,43 @@ "uri":"cce_10_0348.html", "node_id":"cce_10_0348.xml", "product_code":"cce", - "code":"153", - "des":"The maximum number of pods that can be created on a node is calculated based on the cluster type:For a cluster using the container tunnel network model, the value depends", + "code":"156", + "des":"The maximum number of pods that can be created on a node is calculated based on the cluster type:When creating a cluster using a VPC network, you need to configure the nu", "doc_type":"usermanual2", - "kw":"Maximum Number of Pods on a Node,alpha.cce/fixPoolMask,maximum number of pods,Maximum Number of Pods", + "kw":"Maximum Number of Pods on a Node,maximum number of pods,Maximum Number of Pods That Can Be Created o", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Maximum Number of Pods That Can Be Created on a Node", "githuburl":"" }, + { + "uri":"cce_10_0883.html", + "node_id":"cce_10_0883.xml", + "product_code":"cce", + "code":"157", + "des":"To maintain the stability of nodes, CCE stores Kubernetes and container runtime components on separate data disks. Kubernetes uses the /mnt/paas/kubernetes directory, and", + "doc_type":"usermanual2", + "kw":"Differences Between CCE Node mountPath Configurations and Community Native Configurations,Node O&M,U", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Differences Between CCE Node mountPath Configurations and Community Native Configurations", + "githuburl":"" + }, { "uri":"cce_10_0601.html", "node_id":"cce_10_0601.xml", "product_code":"cce", - "code":"154", + "code":"158", "des":"Kubernetes has removed dockershim from v1.24 and does not support Docker by default. CCE is going to stop the support for Docker. Change the node container engine from Do", "doc_type":"usermanual2", "kw":"Migrating Nodes from Docker to containerd,Node O&M,User Guide", @@ -2782,7 +2853,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Migrating Nodes from Docker to containerd", @@ -2792,25 +2863,25 @@ "uri":"cce_10_0659.html", "node_id":"cce_10_0659.xml", "product_code":"cce", - "code":"155", + "code":"159", "des":"The node fault detection function depends on the NPD add-on. The add-on instances run on nodes and monitor nodes. This section describes how to enable node fault detectio", "doc_type":"usermanual2", - "kw":"Node Fault Detection,Check Items,Node Fault Detection Policy,Node O&M,User Guide", + "kw":"Node Fault Detection,Check Items,Configuring Node Fault Detection Policies,Node O&M,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Node Fault Detection Policy", + "title":"Configuring Node Fault Detection Policies", "githuburl":"" }, { "uri":"cce_10_0035.html", "node_id":"cce_10_0035.xml", "product_code":"cce", - "code":"156", + "code":"160", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Node Pools", @@ -2818,7 +2889,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Node Pools", @@ -2828,7 +2899,7 @@ "uri":"cce_10_0081.html", "node_id":"cce_10_0081.xml", "product_code":"cce", - "code":"157", + "code":"161", "des":"CCE introduces node pools to help you better manage nodes in Kubernetes clusters. A node pool contains one node or a group of nodes with identical configuration in a clus", "doc_type":"usermanual2", "kw":"DefaultPool,DefaultPool,Deploying a Workload in a Specified Node Pool,Node Pool Overview,Node Pools,", @@ -2836,7 +2907,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Node Pool Overview", @@ -2846,25 +2917,43 @@ "uri":"cce_10_0012.html", "node_id":"cce_10_0012.xml", "product_code":"cce", - "code":"158", - "des":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.The Autoscaler a", + "code":"162", + "des":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.Basic SettingsCo", "doc_type":"usermanual2", "kw":"Creating a Node Pool,Node Pools,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Node Pool", "githuburl":"" }, + { + "uri":"cce_10_0658.html", + "node_id":"cce_10_0658.xml", + "product_code":"cce", + "code":"163", + "des":"You can specify a specification in a node pool for scaling.The default node pool does not support scaling. Use Creating a Node to add a node.Number of Scaling Targets: Th", + "doc_type":"usermanual2", + "kw":"Scaling a Node Pool,Node Pools,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Scaling a Node Pool", + "githuburl":"" + }, { "uri":"cce_10_0222.html", "node_id":"cce_10_0222.xml", "product_code":"cce", - "code":"159", + "code":"164", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Managing a Node Pool", @@ -2872,7 +2961,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Managing a Node Pool", @@ -2882,15 +2971,15 @@ "uri":"cce_10_0653.html", "node_id":"cce_10_0653.xml", "product_code":"cce", - "code":"160", - "des":"The modification of resource tags of a node pool takes effect only on new nodes. To synchronize the modification onto existing nodes, manually reset the existing nodes.Ch", + "code":"165", + "des":"Changes to the container engine, OS, or pre-/post-installation script in a node pool take effect only on new nodes. To synchronize the modification onto existing nodes, m", "doc_type":"usermanual2", "kw":"Updating a Node Pool,Managing a Node Pool,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Updating a Node Pool", @@ -2900,7 +2989,7 @@ "uri":"cce_10_0727.html", "node_id":"cce_10_0727.xml", "product_code":"cce", - "code":"161", + "code":"166", "des":"Auto Scaling (AS) enables elastic scaling of nodes in a node pool based on scaling policies. Without this function, you have to manually adjust the number of nodes in a n", "doc_type":"usermanual2", "kw":"Updating an AS Configuration,Managing a Node Pool,User Guide", @@ -2908,7 +2997,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Updating an AS Configuration", @@ -2918,25 +3007,43 @@ "uri":"cce_10_0652.html", "node_id":"cce_10_0652.xml", "product_code":"cce", - "code":"162", - "des":"The default node pool DefaultPool does not support the following management operations.CCE allows you to highly customize Kubernetes parameter settings on core components", + "code":"167", + "des":"The default node pool does not support the following management operations.CCE allows you to highly customize Kubernetes parameter settings on core components in a cluste", "doc_type":"usermanual2", - "kw":"Configuring a Node Pool,Managing a Node Pool,User Guide", + "kw":"Modifying Node Pool Configurations,Managing a Node Pool,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Configuring a Node Pool", + "title":"Modifying Node Pool Configurations", + "githuburl":"" + }, + { + "uri":"cce_10_0886.html", + "node_id":"cce_10_0886.xml", + "product_code":"cce", + "code":"168", + "des":"If you want to add a newly created ECS to a node pool in a cluster, or remove a node from a node pool and add it to the node pool again, accept the node.When an ECS is ac", + "doc_type":"usermanual2", + "kw":"Accepting Nodes in a Node Pool,Managing a Node Pool,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Accepting Nodes in a Node Pool", "githuburl":"" }, { "uri":"cce_10_0655.html", "node_id":"cce_10_0655.xml", "product_code":"cce", - "code":"163", + "code":"169", "des":"You can copy the configuration of an existing node pool on the CCE console to create new node pools.", "doc_type":"usermanual2", "kw":"Copying a Node Pool,Managing a Node Pool,User Guide", @@ -2944,7 +3051,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Copying a Node Pool", @@ -2954,7 +3061,7 @@ "uri":"cce_10_0654.html", "node_id":"cce_10_0654.xml", "product_code":"cce", - "code":"164", + "code":"170", "des":"After the configuration of a node pool is updated, some configurations cannot be automatically synchronized for existing nodes. You can manually synchronize configuration", "doc_type":"usermanual2", "kw":"Synchronizing Node Pools,Managing a Node Pool,User Guide", @@ -2962,7 +3069,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Synchronizing Node Pools", @@ -2972,15 +3079,15 @@ "uri":"cce_10_0660.html", "node_id":"cce_10_0660.xml", "product_code":"cce", - "code":"165", - "des":"When CCE releases a new OS image, existing nodes cannot be automatically upgraded. You can manually upgrade them in batches.This section describes how to upgrade an OS by", + "code":"171", + "des":"After CCE releases a new OS image, if existing nodes cannot be automatically upgraded, you can manually upgrade them in batches.This section describes how to upgrade an O", "doc_type":"usermanual2", "kw":"Upgrading an OS,Managing a Node Pool,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Upgrading an OS", @@ -2990,7 +3097,7 @@ "uri":"cce_10_0656.html", "node_id":"cce_10_0656.xml", "product_code":"cce", - "code":"166", + "code":"172", "des":"Nodes in a node pool can be migrated to the default node pool. Nodes in the default node pool or a custom node pool cannot be migrated to other custom node pools.The migr", "doc_type":"usermanual2", "kw":"Migrating a Node,Managing a Node Pool,User Guide", @@ -2998,7 +3105,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Migrating a Node", @@ -3008,7 +3115,7 @@ "uri":"cce_10_0657.html", "node_id":"cce_10_0657.xml", "product_code":"cce", - "code":"167", + "code":"173", "des":"Deleting a node pool will delete nodes in the pool. Pods on these nodes will be automatically migrated to available nodes in other node pools.Deleting a node pool will de", "doc_type":"usermanual2", "kw":"Deleting a Node Pool,Managing a Node Pool,User Guide", @@ -3016,7 +3123,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Deleting a Node Pool", @@ -3026,7 +3133,7 @@ "uri":"cce_10_0046.html", "node_id":"cce_10_0046.xml", "product_code":"cce", - "code":"168", + "code":"174", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Deployments,StatefulSets,DaemonSets,jobs,cron jobs", @@ -3034,7 +3141,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Workloads", @@ -3044,7 +3151,7 @@ "uri":"cce_10_0006.html", "node_id":"cce_10_0006.xml", "product_code":"cce", - "code":"169", + "code":"175", "des":"A workload is an application running on Kubernetes. No matter how many components are there in your workload, you can run it in a group of Kubernetes pods. A workload is ", "doc_type":"usermanual2", "kw":"Deployments,StatefulSets,DaemonSets,jobs,cron jobs,Overview,Workloads,User Guide", @@ -3052,7 +3159,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Overview", @@ -3062,7 +3169,7 @@ "uri":"cce_10_0673.html", "node_id":"cce_10_0673.xml", "product_code":"cce", - "code":"170", + "code":"176", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Creating a Workload", @@ -3070,7 +3177,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Workload", @@ -3080,7 +3187,7 @@ "uri":"cce_10_0047.html", "node_id":"cce_10_0047.xml", "product_code":"cce", - "code":"171", + "code":"177", "des":"Deployments are workloads (for example, Nginx) that do not store any data or status. You can create Deployments on the CCE console or by running kubectl commands.Before c", "doc_type":"usermanual2", "kw":"create a workload using kubectl,Creating a Deployment,Creating a Workload,User Guide", @@ -3088,7 +3195,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Deployment", @@ -3098,7 +3205,7 @@ "uri":"cce_10_0048.html", "node_id":"cce_10_0048.xml", "product_code":"cce", - "code":"172", + "code":"178", "des":"StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.A conta", "doc_type":"usermanual2", "kw":"Using kubectl,Creating a StatefulSet,Creating a Workload,User Guide", @@ -3106,7 +3213,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a StatefulSet", @@ -3116,7 +3223,7 @@ "uri":"cce_10_0216.html", "node_id":"cce_10_0216.xml", "product_code":"cce", - "code":"173", + "code":"179", "des":"CCE provides deployment and management capabilities for multiple types of containers and supports features of container workloads, including creation, configuration, moni", "doc_type":"usermanual2", "kw":"create a workload using kubectl,Creating a DaemonSet,Creating a Workload,User Guide", @@ -3124,7 +3231,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a DaemonSet", @@ -3134,7 +3241,7 @@ "uri":"cce_10_0150.html", "node_id":"cce_10_0150.xml", "product_code":"cce", - "code":"174", + "code":"180", "des":"Jobs are short-lived and run for a certain time to completion. They can be executed immediately after being deployed. It is completed after it exits normally (exit 0).A j", "doc_type":"usermanual2", "kw":"Creating a Job,Creating a Workload,User Guide", @@ -3142,7 +3249,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Job", @@ -3152,7 +3259,7 @@ "uri":"cce_10_0151.html", "node_id":"cce_10_0151.xml", "product_code":"cce", - "code":"175", + "code":"181", "des":"A cron job runs on a repeating schedule. You can perform time synchronization for all active nodes at a fixed time point.A cron job runs periodically at the specified tim", "doc_type":"usermanual2", "kw":"time synchronization,Creating a Cron Job,Creating a Workload,User Guide", @@ -3160,7 +3267,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Cron Job", @@ -3170,33 +3277,51 @@ "uri":"cce_10_0130.html", "node_id":"cce_10_0130.xml", "product_code":"cce", - "code":"176", + "code":"182", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"Configuring a Container", + "kw":"Configuring a Workload", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Configuring a Container", + "title":"Configuring a Workload", + "githuburl":"" + }, + { + "uri":"cce_10_0463.html", + "node_id":"cce_10_0463.xml", + "product_code":"cce", + "code":"183", + "des":"The most significant difference is that each Kata container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualiz", + "doc_type":"usermanual2", + "kw":"Secure Runtime and Common Runtime,Configuring a Workload,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Secure Runtime and Common Runtime", "githuburl":"" }, { "uri":"cce_10_0354.html", "node_id":"cce_10_0354.xml", "product_code":"cce", - "code":"177", + "code":"184", "des":"When creating a workload, you can configure containers to use the same time zone as the node. You can enable time zone synchronization when creating a workload.The time z", "doc_type":"usermanual2", - "kw":"Configuring Time Zone Synchronization,Configuring a Container,User Guide", + "kw":"Configuring Time Zone Synchronization,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Configuring Time Zone Synchronization", @@ -3206,15 +3331,15 @@ "uri":"cce_10_0353.html", "node_id":"cce_10_0353.xml", "product_code":"cce", - "code":"178", + "code":"185", "des":"When a workload is created, the container image is pulled from the image repository to the node. The image is also pulled when the workload is restarted or upgraded.By de", "doc_type":"usermanual2", - "kw":"Configuring an Image Pull Policy,Configuring a Container,User Guide", + "kw":"Configuring an Image Pull Policy,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Configuring an Image Pull Policy", @@ -3224,15 +3349,15 @@ "uri":"cce_10_0009.html", "node_id":"cce_10_0009.xml", "product_code":"cce", - "code":"179", + "code":"186", "des":"CCE allows you to create workloads using images pulled from third-party image repositories.Generally, a third-party image repository can be accessed only after authentica", "doc_type":"usermanual2", - "kw":"Using Third-Party Images,Configuring a Container,User Guide", + "kw":"Using Third-Party Images,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Using Third-Party Images", @@ -3242,15 +3367,15 @@ "uri":"cce_10_0163.html", "node_id":"cce_10_0163.xml", "product_code":"cce", - "code":"180", + "code":"187", "des":"CCE allows you to set resource requirements and limits, such as CPU and RAM, for added containers during workload creation. Kubernetes also allows using YAML to set requi", "doc_type":"usermanual2", - "kw":"ephemeral storage,Configuring Container Specifications,Configuring a Container,User Guide", + "kw":"ephemeral storage,Configuring Container Specifications,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Configuring Container Specifications", @@ -3260,15 +3385,15 @@ "uri":"cce_10_0105.html", "node_id":"cce_10_0105.xml", "product_code":"cce", - "code":"181", + "code":"188", "des":"CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before sto", "doc_type":"usermanual2", - "kw":"Startup Command,Post-Start,Pre-Stop,Configuring Container Lifecycle Parameters,Configuring a Contain", + "kw":"Startup Command,Post-Start,Pre-Stop,Configuring Container Lifecycle Parameters,Configuring a Workloa", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Configuring Container Lifecycle Parameters", @@ -3278,15 +3403,15 @@ "uri":"cce_10_0112.html", "node_id":"cce_10_0112.xml", "product_code":"cce", - "code":"182", + "code":"189", "des":"Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect application ex", "doc_type":"usermanual2", - "kw":"Health check,HTTP request,TCP port,CLI,Configuring Container Health Check,Configuring a Container,Us", + "kw":"Health check,HTTP request,TCP port,CLI,Configuring Container Health Check,Configuring a Workload,Use", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Configuring Container Health Check", @@ -3296,15 +3421,15 @@ "uri":"cce_10_0113.html", "node_id":"cce_10_0113.xml", "product_code":"cce", - "code":"183", + "code":"190", "des":"An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deploy", "doc_type":"usermanual2", - "kw":"Configuring Environment Variables,Configuring a Container,User Guide", + "kw":"Configuring Environment Variables,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Configuring Environment Variables", @@ -3314,33 +3439,33 @@ "uri":"cce_10_0397.html", "node_id":"cce_10_0397.xml", "product_code":"cce", - "code":"184", + "code":"191", "des":"In actual applications, upgrade is a common operation. A Deployment, StatefulSet, or DaemonSet can easily support application upgrade.You can set different upgrade polici", "doc_type":"usermanual2", - "kw":"Workload Upgrade Policies,Configuring a Container,User Guide", + "kw":"Configuring Workload Upgrade Policies,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Workload Upgrade Policies", + "title":"Configuring Workload Upgrade Policies", "githuburl":"" }, { "uri":"cce_10_0232.html", "node_id":"cce_10_0232.xml", "product_code":"cce", - "code":"185", + "code":"192", "des":"Kubernetes supports node affinity and pod affinity/anti-affinity. You can configure custom rules to achieve affinity and anti-affinity scheduling. For example, you can de", "doc_type":"usermanual2", - "kw":"Scheduling Policies (Affinity/Anti-affinity),Configuring a Container,User Guide", + "kw":"Scheduling Policies (Affinity/Anti-affinity),Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Scheduling Policies (Affinity/Anti-affinity)", @@ -3350,79 +3475,79 @@ "uri":"cce_10_0728.html", "node_id":"cce_10_0728.xml", "product_code":"cce", - "code":"186", + "code":"193", "des":"Tolerations allow the scheduler to schedule pods to nodes with target taints. Tolerances work with node taints. Each node allows one or more taints. If no tolerance is co", "doc_type":"usermanual2", - "kw":"Taints and Tolerations,Configuring a Container,User Guide", + "kw":"Configuring Tolerance Policies,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Taints and Tolerations", + "title":"Configuring Tolerance Policies", "githuburl":"" }, { "uri":"cce_10_0386.html", "node_id":"cce_10_0386.xml", "product_code":"cce", - "code":"187", + "code":"194", "des":"CCE allows you to add annotations to a YAML file to realize some advanced pod functions. The following table describes the annotations you can add.When you create a workl", "doc_type":"usermanual2", - "kw":"Labels and Annotations,Configuring a Container,User Guide", + "kw":"Configuring Labels and Annotations,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Labels and Annotations", + "title":"Configuring Labels and Annotations", "githuburl":"" }, { "uri":"cce_10_00356.html", "node_id":"cce_10_00356.xml", "product_code":"cce", - "code":"188", + "code":"195", "des":"If you encounter unexpected problems when using a container, you can log in to the container to debug it.The example output is as follows:NAME ", "doc_type":"usermanual2", - "kw":"Accessing a Container,Workloads,User Guide", + "kw":"Logging In to a Container,Workloads,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Accessing a Container", + "title":"Logging In to a Container", "githuburl":"" }, { "uri":"cce_10_0007.html", "node_id":"cce_10_0007.xml", "product_code":"cce", - "code":"189", + "code":"196", "des":"After a workload is created, you can upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.Workload/Job managementOperationDescriptionMonitor", "doc_type":"usermanual2", - "kw":"Managing Workloads and Jobs,Workloads,User Guide", + "kw":"Managing Workloads,Workloads,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Managing Workloads and Jobs", + "title":"Managing Workloads", "githuburl":"" }, { "uri":"cce_10_0833.html", "node_id":"cce_10_0833.xml", "product_code":"cce", - "code":"190", + "code":"197", "des":"Custom Resource Definition (CRD) is an extension of Kubernetes APIs. When default Kubernetes resources cannot meet service requirements, you can use CRDs to define new re", "doc_type":"usermanual2", "kw":"Managing Custom Resources,Workloads,User Guide", @@ -3430,3023 +3555,17 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Managing Custom Resources", "githuburl":"" }, - { - "uri":"cce_10_0463.html", - "node_id":"cce_10_0463.xml", - "product_code":"cce", - "code":"191", - "des":"The most significant difference is that each Kata container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualiz", - "doc_type":"usermanual2", - "kw":"Kata Runtime and Common Runtime,Workloads,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Kata Runtime and Common Runtime", - "githuburl":"" - }, - { - "uri":"cce_10_0674.html", - "node_id":"cce_10_0674.xml", - "product_code":"cce", - "code":"192", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0702.html", - "node_id":"cce_10_0702.xml", - "product_code":"cce", - "code":"193", - "des":"CCE supports different types of resource scheduling and task scheduling, improving application performance and overall cluster resource utilization. This section describe", - "doc_type":"usermanual2", - "kw":"Overview,Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Overview", - "githuburl":"" - }, - { - "uri":"cce_10_0551.html", - "node_id":"cce_10_0551.xml", - "product_code":"cce", - "code":"194", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"CPU Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"CPU Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0351.html", - "node_id":"cce_10_0351.xml", - "product_code":"cce", - "code":"195", - "des":"By default, kubelet uses CFS quotas to enforce pod CPU limits. When the node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether t", - "doc_type":"usermanual2", - "kw":"CPU Policy,CPU Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"CPU Policy", - "githuburl":"" - }, - { - "uri":"cce_10_0552.html", - "node_id":"cce_10_0552.xml", - "product_code":"cce", - "code":"196", - "des":"Kubernetes provides two CPU policies: none and static.none: The CPU policy is disabled by default, indicating the existing scheduling behavior.static: The static CPU core", - "doc_type":"usermanual2", - "kw":"Enhanced CPU Policy,CPU Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Enhanced CPU Policy", - "githuburl":"" - }, - { - "uri":"cce_10_0720.html", - "node_id":"cce_10_0720.xml", - "product_code":"cce", - "code":"197", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"GPU Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"GPU Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0345.html", - "node_id":"cce_10_0345.xml", - "product_code":"cce", - "code":"198", - "des":"You can use GPUs in CCE containers.A GPU node has been created. For details, see Creating a Node.The gpu-device-plugin (previously gpu-beta add-on) has been installed. Du", - "doc_type":"usermanual2", - "kw":"Default GPU Scheduling in Kubernetes,GPU Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Default GPU Scheduling in Kubernetes", - "githuburl":"" - }, - { - "uri":"cce_10_0423.html", - "node_id":"cce_10_0423.xml", - "product_code":"cce", - "code":"199", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Volcano Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Volcano Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0721.html", - "node_id":"cce_10_0721.xml", - "product_code":"cce", - "code":"200", - "des":"Volcano is a Kubernetes-based batch processing platform that supports machine learning, deep learning, bioinformatics, genomics, and other big data applications. It provi", - "doc_type":"usermanual2", - "kw":"Overview,Volcano Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Overview", - "githuburl":"" - }, - { - "uri":"cce_10_0722.html", - "node_id":"cce_10_0722.xml", - "product_code":"cce", - "code":"201", - "des":"Volcano is a Kubernetes-based batch processing platform with high-performance general computing capabilities like task scheduling engine, heterogeneous chip management, a", - "doc_type":"usermanual2", - "kw":"Scheduling Workloads,Volcano Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Scheduling Workloads", - "githuburl":"" - }, - { - "uri":"cce_10_0768.html", - "node_id":"cce_10_0768.xml", - "product_code":"cce", - "code":"202", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Resource Usage-based Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Resource Usage-based Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0773.html", - "node_id":"cce_10_0773.xml", - "product_code":"cce", - "code":"203", - "des":"Bin packing is an optimization algorithm that aims to properly allocate resources to each job and get the jobs done using the minimum amount of resources. After bin packi", - "doc_type":"usermanual2", - "kw":"Bin Packing,Resource Usage-based Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Bin Packing", - "githuburl":"" - }, - { - "uri":"cce_10_0766.html", - "node_id":"cce_10_0766.xml", - "product_code":"cce", - "code":"204", - "des":"Scheduling in a cluster is the process of binding pending pods to nodes, and is performed by a component called kube-scheduler or Volcano Scheduler. The scheduler uses a ", - "doc_type":"usermanual2", - "kw":"Descheduling,Resource Usage-based Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Descheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0767.html", - "node_id":"cce_10_0767.xml", - "product_code":"cce", - "code":"205", - "des":"In scenarios such as node pool replacement and rolling node upgrade, an old resource pool needs to be replaced with a new one. To prevent the node pool replacement from a", - "doc_type":"usermanual2", - "kw":"Node Pool Affinity,Resource Usage-based Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Pool Affinity", - "githuburl":"" - }, - { - "uri":"cce_10_0774.html", - "node_id":"cce_10_0774.xml", - "product_code":"cce", - "code":"206", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Priority-based Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Priority-based Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0775.html", - "node_id":"cce_10_0775.xml", - "product_code":"cce", - "code":"207", - "des":"A pod priority indicates the importance of a pod relative to other pods. Volcano supports pod PriorityClasses in Kubernetes. After PriorityClasses are configured, the sch", - "doc_type":"usermanual2", - "kw":"Priority-based Scheduling,Priority-based Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Priority-based Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0776.html", - "node_id":"cce_10_0776.xml", - "product_code":"cce", - "code":"208", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"AI Performance-based Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"AI Performance-based Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0777.html", - "node_id":"cce_10_0777.xml", - "product_code":"cce", - "code":"209", - "des":"Dominant Resource Fairness (DRF) is a scheduling algorithm based on the dominant resource of a container group. DRF scheduling can be used to enhance the service throughp", - "doc_type":"usermanual2", - "kw":"DRF,AI Performance-based Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"DRF", - "githuburl":"" - }, - { - "uri":"cce_10_0778.html", - "node_id":"cce_10_0778.xml", - "product_code":"cce", - "code":"210", - "des":"Gang scheduling is a scheduling algorithm that schedules correlated processes or threads to run simultaneously on different processors. It meets the scheduling requiremen", - "doc_type":"usermanual2", - "kw":"Gang,AI Performance-based Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Gang", - "githuburl":"" - }, - { - "uri":"cce_10_0425.html", - "node_id":"cce_10_0425.xml", - "product_code":"cce", - "code":"211", - "des":"When a node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether the pod is throttled and which CPU cores are available at scheduli", - "doc_type":"usermanual2", - "kw":"NUMA Affinity Scheduling,Volcano Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"NUMA Affinity Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0709.html", - "node_id":"cce_10_0709.xml", - "product_code":"cce", - "code":"212", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Cloud Native Hybrid Deployment", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Cloud Native Hybrid Deployment", - "githuburl":"" - }, - { - "uri":"cce_10_0384.html", - "node_id":"cce_10_0384.xml", - "product_code":"cce", - "code":"213", - "des":"Many services see surges in traffic. To ensure performance and stability, resources are often requested at the maximum needed. However, the surges may ebb very shortly an", - "doc_type":"usermanual2", - "kw":"Dynamic Resource Oversubscription,Cloud Native Hybrid Deployment,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Dynamic Resource Oversubscription", - "githuburl":"" - }, - { - "uri":"cce_10_0020.html", - "node_id":"cce_10_0020.xml", - "product_code":"cce", - "code":"214", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Network", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Network", - "githuburl":"" - }, - { - "uri":"cce_10_0010.html", - "node_id":"cce_10_0010.xml", - "product_code":"cce", - "code":"215", - "des":"You can learn about a cluster network from the following two aspects:What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are ru", - "doc_type":"usermanual2", - "kw":"Overview,Network,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Overview", - "githuburl":"" - }, - { - "uri":"cce_10_0280.html", - "node_id":"cce_10_0280.xml", - "product_code":"cce", - "code":"216", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Container Network Models", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Container Network Models", - "githuburl":"" - }, - { - "uri":"cce_10_0281.html", - "node_id":"cce_10_0281.xml", - "product_code":"cce", - "code":"217", - "des":"The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:Tun", - "doc_type":"usermanual2", - "kw":"Overview,Container Network Models,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Overview", - "githuburl":"" - }, - { - "uri":"cce_10_0282.html", - "node_id":"cce_10_0282.xml", - "product_code":"cce", - "code":"218", - "des":"The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet pac", - "doc_type":"usermanual2", - "kw":"Container Tunnel Network,Container Network Models,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Container Tunnel Network", - "githuburl":"" - }, - { - "uri":"cce_10_0283.html", - "node_id":"cce_10_0283.xml", - "product_code":"cce", - "code":"219", - "des":"The VPC network uses VPC routing to integrate with the underlying network. This network model is suitable for performance-intensive scenarios. The maximum number of nodes", - "doc_type":"usermanual2", - "kw":"VPC Network,Container Network Models,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"VPC Network", - "githuburl":"" - }, - { - "uri":"cce_10_0284.html", - "node_id":"cce_10_0284.xml", - "product_code":"cce", - "code":"220", - "des":"Developed by CCE, Cloud Native 2.0 network deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are all", - "doc_type":"usermanual2", - "kw":"Cloud Native 2.0 Network,Container Network Models,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Cloud Native 2.0 Network", - "githuburl":"" - }, - { - "uri":"cce_10_0247.html", - "node_id":"cce_10_0247.xml", - "product_code":"cce", - "code":"221", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Service", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Service", - "githuburl":"" - }, - { - "uri":"cce_10_0249.html", - "node_id":"cce_10_0249.xml", - "product_code":"cce", - "code":"222", - "des":"After a pod is created, the following problems may occur if you directly access the pod:The pod can be deleted and recreated at any time by a controller such as a Deploym", - "doc_type":"usermanual2", - "kw":"Overview,Service,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Overview", - "githuburl":"" - }, - { - "uri":"cce_10_0011.html", - "node_id":"cce_10_0011.xml", - "product_code":"cce", - "code":"223", - "des":"ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.The cluster-internal domain name format is -

2024-05-30

+

2024-08-30

+ +

Update:

+ + + +

2024-08-15

+ +

Add:

+ + + +

2024-08-07

+ +

Add:

+ +

Update:

+ + + +

2024-06-26

+ + + + +

2024-05-30

diff --git a/docs/cce/umn/cce_10_0003.html b/docs/cce/umn/cce_10_0003.html index a39d3d97..cf00e839 100644 --- a/docs/cce/umn/cce_10_0003.html +++ b/docs/cce/umn/cce_10_0003.html @@ -4,40 +4,38 @@

Scenario

You can reset a node to modify the node configuration, such as the node OS and login mode.

Resetting a node will reinstall the node OS and the Kubernetes software on the node. If a node is unavailable because you modify the node configuration, you can reset the node to rectify the fault.

-

Constraints

  • For CCE standard clusters and CCE Turbo clusters to support node resetting, the version must be v1.13 or later.
+

Notes and Constraints

  • For CCE standard clusters and CCE Turbo clusters to support node resetting, the version must be v1.13 or later.
-

Precautions

  • Only worker nodes can be reset. If the node is still unavailable after the resetting, delete the node and create a new one.
  • After a node is reset, the node OS will be reinstalled. Before resetting a node, drain the node to gracefully evict the pods running on the node to other available nodes. Perform this operation during off-peak hours.
  • After a node is reset, its system disk and data disks will be cleared. Back up important data before resetting a node.
  • After a worker node with an extra data disk attached is reset, the attachment will be cleared. In this case, attach the disk again and data will be retained.
  • The IP addresses of the workload pods on the node will change, but the container network access is not affected.
  • There is remaining EVS disk quota.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
  • Resetting a node will clear the Kubernetes labels and taints you added (those added by editing a node pool will not be lost). As a result, node-specific resources (such as local storage and workloads scheduled to this node) may be unavailable.
  • Resetting a node will cause PVC/PV data loss for the local PV associated with the node. These PVCs and PVs cannot be restored or used again. In this scenario, the pod that uses the local PV is evicted from the reset node. A new pod is created and stays in the pending state. This is because the PVC used by the pod has a node label, due to which the pod cannot be scheduled. After the node is reset, the pod may be scheduled to the reset node. In this case, the pod remains in the creating state because the underlying logical volume corresponding to the PVC does not exist.
+

Precautions

  • Only worker nodes can be reset. If the node is still unavailable after the resetting, delete the node and create a new one.
  • After a node is reset, the node OS will be reinstalled. Before resetting a node, drain the node to gracefully evict the pods running on the node to other available nodes. Perform this operation during off-peak hours.
  • After a node is reset, its system disk and data disks will be cleared. Back up important data before resetting a node.
  • After a worker node with an extra data disk attached is reset on the ECS console, the attachment will be cleared. In this case, attach the disk again and data will be retained.
  • The IP addresses of the workload pods on the node will change, but the container network access is not affected.
  • There is remaining EVS disk quota.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
  • Resetting a node will clear the Kubernetes labels and taints you added (those added by editing a node pool will not be lost). As a result, node-specific resources (such as local storage and workloads scheduled to this node) may be unavailable.
  • Resetting a node will cause PVC/PV data loss for the local PV associated with the node. These PVCs and PVs cannot be restored or used again. In this scenario, the pod that uses the local PV is evicted from the reset node. A new pod is created and stays in the pending state. This is because the PVC used by the pod has a node label, due to which the pod cannot be scheduled. After the node is reset, the pod may be scheduled to the reset node. In this case, the pod remains in the creating state because the underlying logical volume corresponding to the PVC does not exist.
-

Procedure

You can batch reset nodes using private images.

-
  1. Log in to the CCE console and click the cluster name to access the cluster console.
  2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
  3. In the node list, select one or more nodes to be reset and choose More > Reset Node in the Operation column.
  4. In the displayed dialog box, click Next.

    • For nodes in the DefaultPool node pool, the parameter setting page is displayed. Set the parameters by referring to 5.
    • For a node you create in a node pool, resetting the node does not support parameter configuration. You can directly use the configuration image of the node pool to reset the node.
    -

  5. Specify node parameters.

    Compute Settings -
    Table 1 Configuration parameters

    Parameter

    +

    Resetting Nodes in the Default Pool

    1. Log in to the CCE console and click the cluster name to access the cluster console.
    2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
    3. In the node list of the default pool, select one or more nodes to be reset and choose More > Reset Node in the Operation column.
    4. In the displayed dialog box, click Next.
    5. Specify node parameters.

      Compute Settings +
      - - - - - - - - - @@ -48,20 +46,20 @@

      Storage Settings

      Configure storage resources on a node for the containers running on it. -
      Table 1 Configuration parameters

      Parameter

      Description

      +

      Description

      Specifications

      +

      Specifications

      Specifications cannot be modified when you reset a node.

      +

      Specifications cannot be modified when you reset a node.

      Container Engine

      +

      Container Engine

      The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

      +

      The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

      OS

      +

      OS

      Select an OS type. Different types of nodes support different OSs.
      • Public image: Select a public image for the node.
      • Private image: Select a private image for the node.
      +
      Select an OS type. Different types of nodes support different OSs.
      • Public image: Select a public image for the node.
      • Private image: Select a private image for the node.
      NOTE:

      Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.

      Login Mode

      +

      Login Mode

      • Key Pair

        Select the key pair used to log in to the node. You can select a shared key.

        +
      • Key Pair

        Select the key pair used to log in to the node. You can select a shared key.

        A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create Key Pair.

      Table 2 Configuration parameters

      Parameter

      +
      - - - - - @@ -71,48 +69,48 @@
      Advanced Settings -
      Table 2 Storage configuration parameters

      Parameter

      Description

      +

      Description

      System Disk

      +

      System Disk

      Directly use the system disk of the cloud server.

      +

      Directly use the system disk of the cloud server.

      Data Disk

      +

      Data Disk

      At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

      +

      At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

      Click Expand to configure Data Disk Space Allocation, which is used to allocate space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.

      For other data disks, a raw disk is created without any processing by default. You can also click Expand and select Mount Disk to mount the data disk to a specified directory.

      + + + - - -
      Table 3 Advanced configuration parameters

      Parameter

      +
      - - - - - - - - - - - - - @@ -121,7 +119,46 @@

    6. Click Next: Confirm.
    7. Click Submit.
    8. -

      + +

      Resetting Nodes in a Node Pool

      Parameter configurations are not supported when resetting a node you created in a node pool. The image configured for the node pool is used to reset the node.

      +
      +
      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
      3. In the node list of the target node pool, select a node to be reset and choose More > Reset Node in the Operation column.
      4. In the displayed dialog box, click Yes.
      +
      +

      Resetting Nodes in a Batch

      Resetting nodes in a batch varies depending on application scenarios.

      + +
      Table 3 Advanced configuration parameters

      Parameter

      Description

      +

      Description

      Resource Tag

      +

      Resource Tag

      You can add resource tags to classify resources.

      -

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      -

      CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

      +

      You can add resource tags to classify resources. A maximum of eight resource tags can be added.

      +

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      +

      CCE will automatically create the "CCE-Dynamic-Provisioning-Node=Node ID" tag.

      Kubernetes Label

      +

      Kubernetes Label

      Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

      -

      Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

      +

      Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

      +

      Labels can be used to distinguish nodes. With workload affinity settings, pods can be scheduled to a specified node. For more information, see Labels and Selectors.

      Taint

      +

      Taint

      This parameter is left blank by default. You can add taints to configure node anti-affinity. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
      +
      This field is left blank by default. You can add taints to configure anti-affinity for the node. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed.
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
      NOTICE:
      • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.
      • After a node pool is created, you can click Edit to modify its configuration. The modification will be synchronized to all nodes in the node pool.

      Max. Pods

      +

      Max. Pods

      Maximum number of pods that can run on the node, including the default system pods.

      +

      Maximum number of pods that can run on the node, including the default system pods.

      This limit prevents the node from being overloaded with pods.

      Pre-installation Command

      +

      Pre-installation Command

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

      +

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded. The characters of both the pre-installation and post-installation scripts are centrally calculated, and the total number of characters after transcoding cannot exceed 10240.

      The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

      Post-installation Command

      +

      Post-installation Command

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

      +

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded. The characters of both the pre-installation and post-installation scripts are centrally calculated, and the total number of characters after transcoding cannot exceed 10240.

      The script will be executed after Kubernetes software is installed, which does not affect the installation.

      + + + + + + + + + + + + + + + + + +

      Scenario

      +

      Supported or Not

      +

      Description

      +

      Resetting nodes in the default pool in a batch

      +

      Supported in some scenarios

      +

      This operation can be performed only if the flavors, AZs, and disk configurations of all nodes are the same.

      +

      Resetting nodes in a node pool in a batch

      +

      Supported in some scenarios

      +

      This operation can be performed only if the disk configurations of all nodes are the same.

      +

      Resetting nodes in different node pools in a batch

      +

      Not supported

      +

      Only the nodes in the same node pool can be reset in a batch.

      +
      +
      +

      diff --git a/docs/cce/umn/cce_10_0004.html b/docs/cce/umn/cce_10_0004.html index 4a2ed4e0..5ddb441b 100644 --- a/docs/cce/umn/cce_10_0004.html +++ b/docs/cce/umn/cce_10_0004.html @@ -33,6 +33,12 @@

      false indicates that the node is not a bare metal node.

      node.kubernetes.io/container-engine

      +

      Container engine

      +

      Example: docker or containerd

      +

      node.kubernetes.io/instance-type

      Node specifications

      @@ -74,11 +80,6 @@

      Node OS kernel version

      node.kubernetes.io/container-engine

      -

      Container engine used by the node.

      -

      accelerator

      GPU node labels.

      @@ -93,7 +94,7 @@
      -

      Adding or Deleting a Node Label

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab, select the target node and click Labels and Taints in the upper left corner.
      3. In the displayed dialog box, click Add batch operations under Batch Operation, and then choose Add/Update or Delete.

        Enter the key and value of the label to be added or deleted, and click OK.

        +

        Adding or Deleting a Node Label

        1. Log in to the CCE console and click the cluster name to access the cluster console.
        2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab, select the target node and click Labels and Taints in the upper left corner.
        3. In the displayed dialog box, click Add operation under Batch Operation, and then choose Add/Update or Delete.

          Enter the key and value of the label to be added or deleted, and click OK.

          For example, the key is deploy_qa and the value is true, indicating that the node is used to deploy the QA (test) environment.

        4. After the label is added, check the added label in node data.
        diff --git a/docs/cce/umn/cce_10_0006.html b/docs/cce/umn/cce_10_0006.html index b3d3578e..d55f867c 100644 --- a/docs/cce/umn/cce_10_0006.html +++ b/docs/cce/umn/cce_10_0006.html @@ -25,8 +25,8 @@

        DaemonSets are closely related to nodes. If a node becomes faulty, the DaemonSet will not create the same pods on other nodes.

        Figure 3 DaemonSet
      -

      Overview of Job and CronJob

      Jobs and cron jobs allow you to run short lived, one-off tasks in batch. They ensure the task pods run to completion.

      -
      • A job is a resource object used by Kubernetes to control batch tasks. Jobs are different from long-term servo tasks (such as Deployments and StatefulSets). The former is started and terminated at specific times, while the latter runs unceasingly unless being terminated. The pods managed by a job will be automatically removed after successfully completing tasks based on user configurations.
      • A cron job runs a job periodically on a specified schedule. A cron job object is similar to a line of a crontab file in Linux.
      +

      Overview of Job and CronJob

      Jobs and CronJobs allow you to run short lived, one-off tasks in batch. They ensure the task pods run to completion.

      +
      • A job is a resource object used by Kubernetes to control batch tasks. Jobs are different from long-term servo tasks (such as Deployments and StatefulSets). The former is started and terminated at specific times, while the latter runs unceasingly unless being terminated. The pods managed by a job will be automatically removed after successfully completing tasks based on user configurations.
      • A CronJob runs a job periodically on a specified schedule. A CronJob object is similar to a line of a crontab file in Linux.

      This run-to-completion feature of jobs is especially suitable for one-off tasks, such as continuous integration (CI).

      Workload Lifecycle

      diff --git a/docs/cce/umn/cce_10_0007.html b/docs/cce/umn/cce_10_0007.html index 22595b6d..15e58c92 100644 --- a/docs/cce/umn/cce_10_0007.html +++ b/docs/cce/umn/cce_10_0007.html @@ -1,6 +1,6 @@ -

      Managing Workloads and Jobs

      +

      Managing Workloads

      Scenario

      After a workload is created, you can upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.
      @@ -25,8 +25,8 @@ - @@ -76,7 +76,7 @@

      Viewing Logs

      You can view logs of Deployments, StatefulSets, DaemonSets, and jobs. This section uses a Deployment as an example to describe how to view logs.

      Before viewing logs, ensure that the time of the browser is the same as that on the backend server.

      -
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and click the View Log of the target workload.

        In the displayed View Log window, you can view logs.

        +
        1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
        2. Click the Deployments tab and click View Log of the target workload.

          In the displayed View Log window, you can view logs.

          The displayed logs are standard output logs of containers and do not have persistence and advanced O&M capabilities. To use more comprehensive log capabilities, see Logs. If the function of collecting standard output is enabled for the workload (enabled by default), you can go to AOM to view more workload logs. For details, see Collecting Container Logs Using ICAgent.

        @@ -88,11 +88,11 @@

    9. Upgrade the workload based on service requirements. The method for setting parameter is the same as that for creating a workload.
    10. After the update is complete, click Upgrade Workload, manually confirm the YAML file, and submit the upgrade.
    11. -

      Editing a YAML file

      You can modify and download YAML files of Deployments, StatefulSets, DaemonSets, CronJobs, and containers on the CCE console. YAML files of jobs can only be viewed, copied, and downloaded. This section uses a Deployment as an example to describe how to edit the YAML file.

      +

      Editing a YAML file

      You can modify and download YAML files of Deployments, StatefulSets, DaemonSets, CronJobs, and pods on the CCE console. YAML files of jobs can only be viewed, copied, and downloaded. This section uses a Deployment as an example to describe how to edit the YAML file.

      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and choose More > Edit YAML in the Operation column of the target workload. In the dialog box that is displayed, modify the YAML file.
      3. Click OK.
      4. (Optional) In the Edit YAML window, click Download to download the YAML file.

      Rolling Back a Workload (Available Only for Deployments)

      CCE records the release history of all Deployments. You can roll back a Deployment to a specified version.

      -
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab, choose More > Roll Back in the Operation column of the target workload.
      3. Switch to the Change History tab page, click Roll Back to This Version of the target version, manually confirm the YAML file, and click OK.
      +
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and choose More > Roll Back in the Operation column of the target workload.
      3. Switch to the Change History tab page, click Roll Back to This Version of the target version, manually confirm the YAML file, and click OK.

      Redeploying a Workload

      After you redeploy a workload, all pods in the workload will be restarted. This section uses Deployments as an example to illustrate how to redeploy a workload.

      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and choose More > Redeploy in the Operation column of the target workload.
      3. In the dialog box that is displayed, click Yes to redeploy the workload.
      @@ -109,13 +109,13 @@

      -

      Deleting a Workload/Job

      You can delete a workload or job that is no longer needed. Deleted workloads or jobs cannot be recovered. Exercise caution when you perform this operation. This section uses a Deployment as an example to describe how to delete a workload.

      +

      Deleting a Workload/Job

      You can delete a workload or job that is no longer needed. Deleted workloads or jobs cannot be recovered. This section uses a Deployment as an example to describe how to delete a workload.

      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. In the same row as the workload you will delete, choose Operation > More > Delete.

        Read the system prompts carefully. A workload cannot be recovered after it is deleted. Exercise caution when performing this operation.

      3. Click Yes.

        • If the node where the pod is located is unavailable or shut down and the workload cannot be deleted, you can forcibly delete the pod from the pod list on the workload details page.
        • Ensure that the storage volumes to be deleted are not used by other workloads. If these volumes are imported or have snapshots, you can only unbind them.

      -

      Events

      This section uses Deployments as an example to illustrate how to view events of a workload. To view the event of a job or cron jon, click View Event in the Operation column of the target workload.

      +

      Events

      This section uses a Deployment as an example to describe how to view events of a workload. To view the event of a job or CronJob, click View Event in the Operation column of the target workload.

      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. On the Deployments tab page, click the target workload. In the Pods tab page, click the View Events to view the event name, event type, number of occurrences, Kubernetes event, first occurrence time, and last occurrence time.

        Event data will be retained for one hour and then automatically deleted.

      diff --git a/docs/cce/umn/cce_10_0009.html b/docs/cce/umn/cce_10_0009.html index 99b00079..6caddac7 100644 --- a/docs/cce/umn/cce_10_0009.html +++ b/docs/cce/umn/cce_10_0009.html @@ -10,7 +10,7 @@

      Enter the username and password used to access the third-party image repository.

    12. When creating a workload, enter a private image path in the format of domainname/namespace/imagename:tag in Image Name and select the key created in 1.
    13. Set other parameters and click Create Workload.
    14. -

      Using kubectl

      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Use kubectl to create a secret of the kubernetes.io/dockerconfigjson.

        kubectl create secret docker-registry myregistrykey  -n default --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
        +

        Using kubectl

        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Use kubectl to create a secret of the kubernetes.io/dockerconfigjson.

          kubectl create secret docker-registry myregistrykey  -n default --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL

          In the preceding command, myregistrykey indicates the key name, default indicates the namespace where the key is located, and other parameters are as follows:

          • DOCKER_REGISTRY_SERVER: address of a third-party image repository, for example, www.3rdregistry.com or 10.10.10.10:443
          • DOCKER_USER: account used for logging in to a third-party image repository
          • DOCKER_PASSWORD: password used for logging in to a third-party image repository
          • DOCKER_EMAIL: email of a third-party image repository

        3. Use a third-party image to create a workload.

          A kubernetes.io/dockerconfigjson secret is used for authentication when you obtain a private image. The following is an example of using the myregistrykey for authentication.
          apiVersion: v1
          @@ -30,7 +30,7 @@ spec:
           
          diff --git a/docs/cce/umn/cce_10_0010.html b/docs/cce/umn/cce_10_0010.html index dbbe0ae1..8fe2b3c3 100644 --- a/docs/cce/umn/cce_10_0010.html +++ b/docs/cce/umn/cce_10_0010.html @@ -4,11 +4,11 @@

          You can learn about a cluster network from the following two aspects:

          • What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are running on the nodes. Nodes and containers need to communicate with each other. For details about the cluster network types and their functions, see Cluster Network Structure.
          • How is pod access implemented in a cluster? Accessing a pod or container is a process of accessing services of a user. Kubernetes provides Service and Ingress to address pod access issues. This section summarizes common network access scenarios. You can select the proper scenario based on site requirements. For details about the network access scenarios, see Access Scenarios.

          Cluster Network Structure

          All nodes in the cluster are located in a VPC and use the VPC network. The container network is managed by dedicated network add-ons.

          -

          +

          • Node Network

            A node network assigns IP addresses to hosts (nodes in the figure above) in a cluster. Select a VPC subnet as the node network of the CCE cluster. The number of available IP addresses in a subnet determines the maximum number of nodes (including master nodes and worker nodes) that can be created in a cluster. This quantity is also affected by the container network. For details, see the container network model.

            -
          • Container Network

            A container network assigns IP addresses to containers in a cluster. CCE inherits the IP-Per-Pod-Per-Network network model of Kubernetes. That is, each pod has an independent IP address on a network plane and all containers in a pod share the same network namespace. All pods in a cluster exist in a directly connected flat network. They can access each other through their IP addresses without using NAT. Kubernetes only provides a network mechanism for pods, but does not directly configure pod networks. The configuration of pod networks is implemented by specific container network add-ons. The container network add-ons are responsible for configuring networks for pods and managing container IP addresses.

            +
          • Container Network

            A container network assigns IP addresses to pods in a cluster. CCE inherits the IP-Per-Pod-Per-Network network model of Kubernetes. That is, each pod has an independent IP address on a network plane and all containers in a pod share the same network namespace. All pods in a cluster exist in a directly connected flat network. They can access each other through their IP addresses without using NAT. Kubernetes only provides a network mechanism for pods, but does not directly configure pod networks. The configuration of pod networks is implemented by specific container network add-ons. The container network add-ons are responsible for configuring networks for pods and managing container IP addresses.

            Currently, CCE supports the following container network models:

            -
            • Container tunnel network: The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch.
            • VPC network: The VPC network uses VPC routing to integrate with the underlying network. This network model applies to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network. Each node is assigned a CIDR block of a fixed size. This networking model is free from tunnel encapsulation overhead and outperforms the container tunnel network model. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in a cluster can be directly accessed from outside the cluster.
            • Developed by CCE, Cloud Native 2.0 network deeply integrates Elastic Network Interfaces (ENIs) and Sub Network Interfaces (sub-ENIs) of VPC. Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and EIPs are bound to deliver high performance.
            +
            • Container tunnel network: The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch.
            • VPC network: The VPC network model seamlessly combines VPC routing with the underlying network, making it ideal for high-performance scenarios. However, the maximum number of nodes allowed in a cluster is determined by the VPC route quota. Each node is assigned a CIDR block of a fixed size. The VPC network model outperforms the container tunnel network model in terms of performance because it does not have tunnel encapsulation overhead. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in a cluster can be directly accessed from outside the cluster.
            • Developed by CCE, Cloud Native 2.0 network deeply integrates Elastic Network Interfaces (ENIs) and Sub Network Interfaces (sub-ENIs) of VPC. Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and EIPs are bound to deliver high performance.

            The performance, networking scale, and application scenarios of a container network vary according to the container network model. For details about the functions and features of different container network models, see Overview.

          • Service Network

            Service is also a Kubernetes object. Each Service has a static IP address. When creating a cluster on CCE, you can specify the Service CIDR block. The Service CIDR block cannot overlap with the node or container CIDR block. The Service CIDR block can be used only within a cluster.

          @@ -27,7 +27,7 @@
          • Intra-cluster access: A ClusterIP Service is used for workloads in the same cluster to access each other.
          • Access from outside a cluster: A Service (NodePort or LoadBalancer type) or an ingress is recommended for a workload outside a cluster to access workloads in the cluster.
            • Access through the public network: An EIP should be bound to the node or load balancer.
            • Access through the private network: The workload can be accessed through the internal IP address of the node or load balancer. If workloads are located in different VPCs, a peering connection is required to enable communication between different VPCs.
          • The workload can access the external network as follows:
            • Accessing an intranet: The workload accesses the intranet address, but the implementation method varies depending on container network models. Ensure that the peer security group allows the access requests from the container CIDR block.
            • Accessing a public network: Assign an EIP to the node where the workload runs (when the VPC network or tunnel network model is used), bind an EIP to the pod IP address (when the Cloud Native Network 2.0 model is used), or configure SNAT rules through the NAT gateway. For details, see Accessing the Internet from a Container.
          -
          Figure 3 Network access diagram
          +
          Figure 3 Network access diagram
          diff --git a/docs/cce/umn/cce_10_0011.html b/docs/cce/umn/cce_10_0011.html index 54cfdbfd..89d12a2e 100644 --- a/docs/cce/umn/cce_10_0011.html +++ b/docs/cce/umn/cce_10_0011.html @@ -4,14 +4,14 @@

          Scenario

          ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.

          The cluster-internal domain name format is <Service name>.<Namespace of the workload>.svc.cluster.local:<Port>, for example, nginx.default.svc.cluster.local:80.

          Figure 1 shows the mapping relationships between access channels, container ports, and access ports.

          -
          Figure 1 Intra-cluster access (ClusterIP)
          +
          Figure 1 Intra-cluster access (ClusterIP)
          -

          Creating a ClusterIP Service

          1. Log in to the CCE console and click the cluster name to access the cluster console.
          2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
          3. Configure intra-cluster access parameters.

            • Service Name: Specify a Service name, which can be the same as the workload name.
            • Service Type: Select ClusterIP.
            • Namespace: Namespace to which the workload belongs.
            • Selector: Add a label and click Confirm. A Service selects a pod based on the added label. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
            • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
            • Port Settings
              • Protocol: protocol used by the Service.
              • Service Port: port used by the Service. The port number ranges from 1 to 65535.
              • Container Port: port on which the workload listens. For example, Nginx uses port 80 by default.
              +

              Creating a ClusterIP Service

              1. Log in to the CCE console and click the cluster name to access the cluster console.
              2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
              3. Configure intra-cluster access parameters.

                • Service Name: Specify a Service name, which can be the same as the workload name.
                • Service Type: Select ClusterIP.
                • Namespace: namespace that the workload belongs to.
                • Selector: Add a label and click Confirm. The Service will use this label to select pods. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
                • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
                • Ports
                  • Protocol: protocol used by the Service.
                  • Service Port: port used by the Service. The port number ranges from 1 to 65535.
                  • Container Port: listener port of the workload. For example, Nginx uses port 80 by default.

              4. Click OK.
              -

              Setting the Access Type Using kubectl

              You can run kubectl commands to set the access type (Service). This section uses an Nginx workload as an example to describe how to implement intra-cluster access using kubectl.

              -
              1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
              2. Create and edit the nginx-deployment.yaml and nginx-clusterip-svc.yaml files.

                The file names are user-defined. nginx-deployment.yaml and nginx-clusterip-svc.yaml are merely example file names.

                +

                Setting the Access Type Using kubectl

                You can configure Service access using kubectl. This section uses an Nginx workload as an example to describe how to implement intra-cluster access using kubectl.

                +
                1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                2. Create and edit the nginx-deployment.yaml and nginx-clusterip-svc.yaml files.

                  The file names are user-defined. nginx-deployment.yaml and nginx-clusterip-svc.yaml are merely example file names.

                  vi nginx-deployment.yaml
                  apiVersion: apps/v1
                   kind: Deployment
                   metadata:
                  @@ -64,7 +64,7 @@ spec:
                   NAME              TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
                   kubernetes        ClusterIP   10.247.0.1     <none>        443/TCP    4d6h
                   nginx-clusterip   ClusterIP   10.247.74.52   <none>        8080/TCP   14m
                  -

                3. Access a Service.

                  A Service can be accessed from containers or nodes in a cluster.

                  +

                4. Access the Service.

                  A Service can be accessed from containers or nodes in a cluster.

                  Create a pod, access the pod, and run the curl command to access IP address:Port or the domain name of the Service, as shown in the following figure.

                  The domain name suffix can be omitted. In the same namespace, you can directly use nginx-clusterip:8080 for access. In other namespaces, you can use nginx-clusterip.default:8080 for access.

                  # kubectl run -i --tty --image nginx:alpine test --rm /bin/sh
                  diff --git a/docs/cce/umn/cce_10_0012.html b/docs/cce/umn/cce_10_0012.html
                  index 59f8bd58..164f3039 100644
                  --- a/docs/cce/umn/cce_10_0012.html
                  +++ b/docs/cce/umn/cce_10_0012.html
                  @@ -3,24 +3,22 @@
                   

                  Creating a Node Pool

                  Scenario

                  This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.

                  -

                  Constraints

                  • The Autoscaler add-on needs to be installed for node auto scaling. For details about the add-on installation and parameter configuration, see CCE Cluster Autoscaler.
                  -
                  -

                  Procedure

                  1. Log in to the CCE console.
                  2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
                  3. In the upper right corner of the page, click Create Node Pool.

                    Basic Settings

                    +

                    Procedure

                    1. Log in to the CCE console.
                    2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane. In the right pane, click the Node Pools tab.
                    3. In the upper right corner of the page, click Create Node Pool.

                      Basic Settings

                      -
      Table 1 Workload/Job management

      Operation

      Edit YAML

      You can modify and download YAML files of Deployments, StatefulSets, DaemonSets, CronJobs, and containers on the CCE console. YAML files of jobs can only be viewed, copied, and downloaded.

      -
      NOTE:

      If an existing CronJob is modified, the new configuration takes effect for the new pods, and the existing pod continues to run without any change.

      +

      You can modify and download YAML files of Deployments, StatefulSets, DaemonSets, CronJobs, and pods on the CCE console. YAML files of jobs can only be viewed, copied, and downloaded.

      +
      NOTE:

      If an existing CronJob is modified, the new configuration takes effect for the new pods, and the existing pods continue to run without any change.

      Table 1 Basic settings

      Parameter

      +
      - - - - - @@ -28,49 +26,44 @@

      Configurations

      You can configure the flavor and OS of a cloud server, on which your containerized applications run. -
      Table 1 Basic settings

      Parameter

      Description

      +

      Description

      Node Pool Name

      +

      Node Pool Name

      Name of a node pool. By default, the name is in the format of Cluster name-nodepool-Random number. If you do not want to use the default name format, you can customize the name.

      +

      Name of a node pool. By default, the name is in the format of Cluster name-nodepool-Random number. If you do not want to use the default name format, you can customize the name.

      Expected Initial Nodes

      +

      Expected Initial Nodes

      Number of nodes to be created in this node pool. A maximum of 50 nodes that can be created at a time.

      +

      Number of nodes to be created in this node pool. A maximum of 50 nodes that can be created at a time.

      Table 2 Node configuration parameters

      Parameter

      +
      - - - - - - - - - - - - - - @@ -81,35 +74,35 @@

      Storage Settings

      Configure storage resources on a node for the containers running on it. Select a disk type and configure its size based on service requirements. -
      Table 2 Node configuration parameters

      Parameter

      Description

      +

      Description

      AZ

      +

      Node Type

      AZ where the node is located. Nodes in a cluster can be created in different AZs for higher reliability. The value cannot be changed after the node is created.

      -

      Select Random to deploy your node in a random AZ based on the selected node flavor.

      -

      An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network. To enhance workload availability, create nodes in different AZs.

      -

      Node Type

      -

      Select a node type based on service requirements. Then, you can select a proper flavor from the node flavor list.

      +

      Select a node type based on service requirements. Then, you can select a proper flavor from the node flavor list.

      CCE standard clusters support the following node types:
      • ECS (VM): A virtualized ECS is used as a cluster node.
      CCE Turbo clusters support the following node types:
      • ECS (VM): A virtualized ECS is used as a cluster node. A CCE Turbo cluster supports only the cloud servers that allow multiple ENIs. Select a server type displayed on the CCE console.

      Specifications

      +

      Specifications

      Select a node flavor based on service requirements. The available node flavors vary depending on regions or AZs. For details, see the CCE console.

      +
      Select a node flavor based on service requirements. The available node flavors vary depending on regions or AZs. For details, see the CCE console.
      NOTE:
      • If a node pool is configured with multiple node flavors, only the flavors (which can be located in different AZs) of the same node type are supported. For example, a node pool consisting of general computing-plus nodes supports only general computing-plus node flavors, but not the flavors of general computing nodes.
      • A maximum of 10 node flavors can be added to a node pool (the flavors in different AZs are counted separately). When adding a node flavor, you can choose multiple AZs, but you need to specify them.
      • Nodes in a newly created node pool are created using the default flavor. If the resources for the default flavor are insufficient, node creation will fail.
      • After a node pool is created, the flavors of existing nodes cannot be deleted.
      +
      +

      Container Engine

      +

      Container Engine

      The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

      +

      The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

      OS

      +

      OS

      Select an OS type. Different types of nodes support different OSs.
      • Public image: Select a public image for the node.
      • Private image: Select a private image for the node.
      +
      Select an OS type. Different types of nodes support different OSs.
      • Public image: Select a public image for the node.
      • Private image: Select a private image for the node.
      NOTE:

      Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.

      Login Mode

      +

      Login Mode

      • Key Pair

        Select the key pair used to log in to the node. You can select a shared key.

        +
      • Key Pair

        Select the key pair used to log in to the node. You can select a shared key.

        A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create Key Pair.

      Table 3 Configuration parameters

      Parameter

      +
      - - - - - @@ -119,31 +112,31 @@

      Network Settings

      Configure networking resources to allow node and containerized application access. -
      Table 3 Storage configuration parameters

      Parameter

      Description

      +

      Description

      System Disk

      +

      System Disk

      System disk used by the node OS. The value ranges from 40 GiB to 1024 GiB. The default value is 50 GiB.

      -
      Encryption: System disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. This function is available only in certain regions.
      • Encryption is not selected by default.
      • After setting System Disk Encryption to Encryption, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the Encryption text box.
      +

      System disk used by the node OS. The value ranges from 40 GiB to 1024 GiB. The default value is 50 GiB.

      +
      Encryption: System disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. Only the nodes of the Elastic Cloud Server (VM) type in certain regions support system disk encryption. For details, see the console.
      • Not encrypted is selected by default.
      • If you select Enabled (key) for System Disk Encryption, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the text box.
      • If you select Enabled (KMS key ID) for System Disk Encryption, enter a KMS key (which can be shared by others) in the current region.

      Data Disk

      +

      Data Disk

      At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

      +

      At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

      • First data disk: used for container runtime and kubelet components. The value ranges from 20 GiB to 32768 GiB. The default value is 100 GiB.
      • Other data disks: You can set the data disk size to a value ranging from 10 GiB to 32768 GiB. The default value is 100 GiB.
      NOTE:
      • If the node flavor is disk-intensive or ultra-high I/O, one data disk can be a local disk.
      • Local disks may break down and do not ensure data reliability. Store your service data in EVS disks, which are more reliable than local disks.

      Advanced Settings

      -

      Click Expand and configure the following parameters:

      -
      • Data Disk Space Allocation: allocates space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.
      • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. This function is available only in certain regions.
        • Encryption is not selected by default.
        • After selecting Encryption, you can select an existing key in the displayed dialog box. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the Encryption text box.
        +

        Expand the area and configure the following parameters:

        +
        • Data Disk Space Allocation: allocates space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.
        • Data Disk Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. BMS nodes do not support data disk encryption that is available only in certain regions. For details, see the console.
          • Not encrypted is selected by default.
          • If you select Enabled (key) for Data Disk Encryption, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the text box.
          • If you select Enabled (KMS key ID) for Data Disk Encryption, enter a KMS key (which can be shared by others) in the current region.

        Adding data disks

        -

        A maximum of four data disks can be added. By default, raw disks are created without any processing. You can also click Expand and select any of the following options:

        -
        • Default: By default, a raw disk is created without any processing.
        • Mount Disk: The data disk is attached to a specified directory.
        • Use as PV: applicable when there is a high performance requirement on PVs. The node.kubernetes.io/local-storage-persistent label is added to the node with PV configured. The value is linear or striped.
        • Use as ephemeral volume: applicable when there is a high performance requirement on EmptyDir.
        +

        A maximum of 16 data disks can be attached to an ECS and 10 to a BMS. By default, a raw disk is created without any processing. You can also click Expand and select any of the following options:

        +
        • Default: By default, a raw disk is created without any processing.
        • Mount Disk: The data disk is attached to a specified directory.
        • Use as PV: applicable when there is a high performance requirement on PVs. The node.kubernetes.io/local-storage-persistent label is added to the node with PV configured. The value is linear or striped.
        • Use as ephemeral volume: applicable when there is a high performance requirement on emptyDir.
        NOTE:
        • Local PVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 2.1.23 or later. Version 2.1.23 or later is recommended.
        • Local EVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 1.2.29 or later.
        -
        Local Persistent Volumes and Local EVs support the following write modes:
        • Linear: A linear logical volume integrates one or more physical volumes. Data is written to the next physical volume when the previous one is used up.
        • Striped: A striped logical volume stripes data into blocks of the same size and stores them in multiple physical volumes in sequence, allowing data to be concurrently read and written. A storage pool consisting of striped volumes cannot be scaled-out. This option can be selected only when multiple volumes exist.
        +
        Local PVs and local EVs can be written in the following modes:
        • Linear: A linear logical volume integrates one or more physical volumes. Data is written to the next physical volume when the previous one is used up.
        • Striped: A striped logical volume stripes data into blocks of the same size and stores them in multiple physical volumes in sequence. This allows data to be concurrently read and written. A storage pool consisting of striped volumes cannot be scaled-out. This option can be selected only when multiple volumes exist.
      Table 4 Configuration parameters

      Parameter

      +
      - - - - - - - - -
      Table 4 Configuration parameters

      Parameter

      Description

      +

      Description

      Virtual Private Cloud

      +

      Virtual Private Cloud

      The VPC to which the cluster belongs by default, which cannot be changed.

      +

      The VPC to which the cluster belongs by default, which cannot be changed.

      Node Subnet

      +

      Node Subnet

      The node subnet selected during cluster creation is used by default. You can choose another subnet instead.

      -
      • Multiple subnets: You can select multiple subnets in the same VPC for your node pool. Newly added nodes for a scale-out will preferentially consume the IP addresses of the subnets in the top order.
      • Single subnet: Only one subnet is configured for your node pool. If the IP addresses of a single subnet are insufficient, configure multiple subnets. Otherwise, a node pool scale-out may fail.
      +

      The node subnet selected during cluster creation is used by default. You can choose another subnet instead.

      +
      • Multiple subnets: You can select multiple subnets in the same VPC for nodes. Newly added nodes will preferentially use the IP addresses from the top-ranking subnet.
      • Single subnet: Only one subnet is configured for your node pool. If the IP addresses of a single subnet are insufficient, configure multiple subnets. Otherwise, a node pool scale-out may fail.

      Node IP Address

      +

      Node IP Address

      Random allocation is supported.

      +

      Random allocation is supported.

      Associate Security Group

      +

      Associate Security Group

      Security group used by the nodes created in the node pool. A maximum of five security groups can be selected.

      +

      Security group used by the nodes created in the node pool. A maximum of five security groups can be selected.

      When a cluster is created, a node security group named {Cluster name}-cce-node-{Random ID} is created and used by default.

      Traffic needs to pass through certain ports in the node security group to ensure node communications. Ensure that you have enabled these ports if you select another security group.

      NOTE:

      After a node pool is created, its associated security group cannot be modified.

      @@ -156,68 +149,89 @@

      Advanced Settings

      Configure advanced node capabilities such as labels, taints, and startup command. -
      @@ -188,7 +188,7 @@ spec:
      Hello

      Using kubectl

      -
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create a file named nginx-configmap.yaml and edit it.

        vi nginx-configmap.yaml

        +
        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Create a file named nginx-configmap.yaml and edit it.

          vi nginx-configmap.yaml

          As shown in the following example, after the ConfigMap volume is mounted, a configuration file with the key as the file name and value as the file content is generated in the /etc/config directory of the container.

          apiVersion: apps/v1
           kind: Deployment
          @@ -216,7 +216,7 @@ spec:
                 configMap:
                   name: cce-configmap                 # Name of the referenced ConfigMap.

        3. Create a workload.

          kubectl apply -f nginx-configmap.yaml

          -

        4. After the workload runs properly, the SPECIAL_LEVEL and SPECIAL_TYPE files are generated in the /etc/config directory. The contents of the files are Hello and CCE, respectively.

          1. Run the following command to view the created pod:
            kubectl get pod | grep nginx-configmap
            +

          2. After the workload runs properly, the SPECIAL_LEVEL and SPECIAL_TYPE files will be generated in the /etc/config directory. The contents of the files are Hello and CCE, respectively.

            1. Run the following command to view the created pod:
              kubectl get pod | grep nginx-configmap
              Expected output:
              nginx-configmap-***   1/1     Running   0              2m18s
            2. Run the following command to view the SPECIAL_LEVEL or SPECIAL_TYPE file in the pod:
              kubectl exec nginx-configmap-*** -- cat /etc/config/SPECIAL_LEVEL
              diff --git a/docs/cce/umn/cce_10_0016.html b/docs/cce/umn/cce_10_0016.html index bb7466e2..ef23fe95 100644 --- a/docs/cce/umn/cce_10_0016.html +++ b/docs/cce/umn/cce_10_0016.html @@ -22,12 +22,12 @@ data:
              • Added from secret: Select a secret and import all keys in the secret as environment variables.
              • Added from secret key: Import the value of a key in a secret as the value of an environment variable.
                • Variable Name: name of an environment variable in the workload. The name can be customized and is set to the key name selected in the secret by default.
                • Variable Value/Reference: Select a secret and the key to be imported. The corresponding value is imported as a workload environment variable.

                For example, after you import the value of username in secret mysecret as the value of workload environment variable username, an environment variable named username exists in the container.

              -

            3. Set other workload parameters and click Create Workload.

              After the workload runs properly, log in to the container and run the following statement to check whether the secret has been set as an environment variable of the workload:

              +

            4. Configure other workload parameters and click Create Workload.

              After the workload runs properly, log in to the container and run the following statement to check whether the secret has been set as an environment variable of the workload:

              printenv username

              If the output is the same as the content in the secret, the secret has been set as an environment variable of the workload.

            Using kubectl

            -
            1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
            2. Create a file named nginx-secret.yaml and edit it.

              vi nginx-secret.yaml

              +
              1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
              2. Create a file named nginx-secret.yaml and edit it.

                vi nginx-secret.yaml

                Content of the YAML file:

                • Added from secret: To add all data in a secret to environment variables, use the envFrom parameter. The keys in the secret will become names of environment variables in a workload.
                  apiVersion: apps/v1
                   kind: Deployment
                  @@ -93,7 +93,7 @@ spec:
                   
                   

                  Configuring the Data Volume of a Workload

                  You can mount a secret as a volume to the specified container path. Contents in a secret are user-defined. Before that, create a secret. For details, see Creating a Secret.

                  Using the CCE console

                  -
                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                  2. In the navigation pane on the left, click Workloads. In the right pane, click the Deployments tab. Click Create Workload in the upper right corner.

                    When creating a workload, click Data Storage in the Container Settings area. Click Add Volume and select Secret from the drop-down list.

                    +
                    1. Log in to the CCE console and click the cluster name to access the cluster console.
                    2. Choose Workloads in the navigation pane. In the right pane, click the Deployments tab. Click Create Workload in the upper right corner.

                      When creating a workload, click Data Storage in the Container Settings area. Click Add Volume and select Secret from the drop-down list.

                    3. Select parameters for mounting a secret volume, as shown in Table 1.

      Table 5 Advanced configuration parameters

      Parameter

      +
      - - - - - - - - - + + + + + + - - - - - - - - + + +
      Table 5 Advanced configuration parameters

      Parameter

      Description

      +

      Description

      Resource Tag

      +

      Resource Tag

      You can add resource tags to classify resources.

      -

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      +

      You can add resource tags to classify resources.

      +

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      CCE will automatically create the "CCE-Dynamic-Provisioning-Node=Node ID" tag.

      Kubernetes Label

      +

      Kubernetes Label

      A Kubernetes label is a key-value pair added to a Kubernetes object (such as a pod). After specifying a label, click Add. A maximum of 20 labels can be added.

      -

      Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

      +

      A key-value pair added to a Kubernetes object (such as a pod). After specifying a label, click Add Label for more. A maximum of 20 labels can be added.

      +

      Labels can be used to distinguish nodes. With workload affinity settings, pods can be scheduled to a specified node. For more information, see Labels and Selectors.

      Taint

      +

      Taint

      This parameter is left blank by default. You can add taints to configure anti-affinity for the node. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
      +
      This parameter is left blank by default. You can add taints to configure anti-affinity for the node. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed.
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.

      For details, see Managing Node Taints.

      NOTE:

      For a cluster of v1.19 or earlier, the workload may have been scheduled to a node before the taint is added. To avoid such a situation, select a cluster of v1.19 or later.

      Max. Pods

      +

      Synchronization for Existing Nodes

      Maximum number of pods that can run on the node, including the default system pods.

      +

      After the options are selected, changes to resource tags and Kubernetes labels/taints in a node pool will be synchronized to existing nodes in the node pool.

      +

      New Node Scheduling

      +

      Default scheduling policy for the nodes newly added to a node pool. If you select Unschedulable, newly created nodes in the node pool will be labeled as unschedulable. In this way, you can perform some operations on the nodes before pods are scheduled to these nodes.

      +

      Scheduled Scheduling: After scheduled scheduling is enabled, new nodes will be automatically scheduled after the custom time expires.

      +
      • Disabled: By default, scheduled scheduling is not enabled for new nodes. To manually enable this function, go to the node list. For details, see Configuring a Node Scheduling Policy in One-Click Mode.
      • Custom: the default timeout for unschedulable nodes. The value ranges from 0 to 99 in the unit of minutes.
      +
      NOTE:
      • If auto scaling of node pools is also required, ensure the scheduled scheduling is less than 15 minutes. If a node added through Autoscaler cannot be scheduled for more than 15 minutes, Autoscaler determines that the scale-out failed and triggers another scale-out. Additionally, if the node cannot be scheduled for more than 20 minutes, the node will be scaled in by Autoscaler.
      • After this function is enabled, nodes will be tainted with node.cloudprovider.kubernetes.io/uninitialized during a node pool creation or update.
      +
      +

      Max. Pods

      +

      Maximum number of pods that can run on the node, including the default system pods.

      This limit prevents the node from being overloaded with pods.

      This number is also decided by other factors. For details, see Maximum Number of Pods That Can Be Created on a Node.

      ECS Group

      +

      ECS Group

      An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.

      +

      An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.

      Anti-affinity: ECSs in an ECS group are deployed on different physical hosts to improve service reliability.

      Select an existing ECS group, or click Add ECS Group to create one. After the ECS group is created, click the refresh icon.

      Pre-installation Command

      +

      Pre-installation Command

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

      +

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded. The characters of both the pre-installation and post-installation scripts are centrally calculated, and the total number of characters after transcoding cannot exceed 10240.

      The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

      Post-installation Command

      +

      Post-installation Command

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

      +

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded. The characters of both the pre-installation and post-installation scripts are centrally calculated, and the total number of characters after transcoding cannot exceed 10240.

      The script will be executed after Kubernetes software is installed, which does not affect the installation.

      NOTE:

      Do not run the reboot command in the post-installation script to restart the system immediately. To restart the system, run the shutdown -r 1 command to restart with a delay of one minute.

      Agency

      +

      Agency

      An agency is created by the account administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources.

      +

      An agency is created by the account administrator on the IAM console. Using an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources.

      If no agency is available, click Create Agency on the right to create one.

      User-defined node name prefix and suffix

      +

      Custom name prefix and suffix of a node in a node pool. After the configuration, the nodes in the node pool will be named with the configured prefix and suffix. For example, if the prefix is prefix- and the suffix is -suffix, the nodes in the node pool will be named in the format of "prefix-Node pool name with five-digit random characters-suffix".

      +
      NOTICE:
      • A prefix and suffix can be customized only when a node pool is created, and they cannot be modified after the node pool is created.
      • A prefix can end with a special character, and a suffix can start with a special character.
      • A node name consists of a maximum of 56 characters in the format of "Prefix-Node pool name with five-digit random characters-Suffix".
      • A node name does not support the combination of a period (.) and special characters (such as .., .-, or -.).
      • This function is available only in clusters of v1.28.1, v1.27.3, v1.25.6, v1.23.11, v1.21.12, or later.
      +
      +
      diff --git a/docs/cce/umn/cce_10_0014.html b/docs/cce/umn/cce_10_0014.html index 4604a94b..4e004f0e 100644 --- a/docs/cce/umn/cce_10_0014.html +++ b/docs/cce/umn/cce_10_0014.html @@ -8,16 +8,22 @@ - - - + + + + - diff --git a/docs/cce/umn/cce_10_0015.html b/docs/cce/umn/cce_10_0015.html index b2ade808..61939768 100644 --- a/docs/cce/umn/cce_10_0015.html +++ b/docs/cce/umn/cce_10_0015.html @@ -24,7 +24,7 @@ data:
      Hello

      Using kubectl

      -
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create a file named nginx-configmap.yaml and edit it.

        vi nginx-configmap.yaml

        +
        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Create a file named nginx-configmap.yaml and edit it.

          vi nginx-configmap.yaml

          Content of the YAML file:

          • Added from ConfigMap: To add all data in a ConfigMap to environment variables, use the envFrom parameter. The keys in the ConfigMap will become names of environment variables in the workload.
            apiVersion: apps/v1
             kind: Deployment
            @@ -99,13 +99,13 @@ CCE
            -c echo $SPECIAL_LEVEL $SPECIAL_TYPE > /usr/share/nginx/html/index.html
          -

        3. Set other workload parameters and click Create Workload.

          After the workload runs properly, log in to the container and run the following statement to check whether the ConfigMap has been set as an environment variable of the workload:

          +

        4. Configure other workload parameters and click Create Workload.

          After the workload runs properly, log in to the container and run the following statement to check whether the ConfigMap has been set as an environment variable of the workload:

          cat /usr/share/nginx/html/index.html

          The example output is as follows:

          Hello CCE

        Using kubectl

        -
        1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Create a file named nginx-configmap.yaml and edit it.

          vi nginx-configmap.yaml

          +
          1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
          2. Create a file named nginx-configmap.yaml and edit it.

            vi nginx-configmap.yaml

            As shown in the following example, the cce-configmap ConfigMap is imported to the workload. SPECIAL_LEVEL and SPECIAL_TYPE are the environment variable names in the workload, that is, the key names in the cce-configmap ConfigMap.
            apiVersion: apps/v1
             kind: Deployment
             metadata:
            @@ -162,7 +162,7 @@ spec:
             

      Mount Path

      Enter a mount point. After the ConfigMap volume is mounted, a configuration file with the key as the file name and value as the file content is generated in the mount path of the container.

      -
      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run. This may lead to container errors. Mount the volume to an empty directory. If the directory is not empty, ensure that there are no files that affect container startup. Otherwise, the files will be replaced, which leads to a container startup failure or workload creation failure.
      NOTICE:

      If the container is mounted to a high-risk directory, use an account with minimum permissions to start the container. Otherwise, high-risk files on the host may be damaged.

      +
      This parameter specifies a container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run. This may lead to container errors. Mount the volume to an empty directory. If the directory is not empty, ensure that there are no files that affect container startup. Otherwise, the files will be replaced, which leads to a container startup failure or workload creation failure.
      NOTICE:

      If the container is mounted to a high-risk directory, use an account with minimum permissions to start the container. Otherwise, high-risk files on the host may be damaged.

      @@ -110,7 +110,7 @@ spec: @@ -135,7 +135,7 @@ spec:

      The expected output is the same as the content in the secret.

      Using kubectl

      -
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create a file named nginx-secret.yaml and edit it.

        vi nginx-secret.yaml

        +
        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Create a file named nginx-secret.yaml and edit it.

          vi nginx-secret.yaml

          In the following example, the username and password in the mysecret secret are saved in the /etc/foo directory as files.
          apiVersion: apps/v1
           kind: Deployment
           metadata:
          diff --git a/docs/cce/umn/cce_10_0018.html b/docs/cce/umn/cce_10_0018.html
          index 40d068a3..1c7b087d 100644
          --- a/docs/cce/umn/cce_10_0018.html
          +++ b/docs/cce/umn/cce_10_0018.html
          @@ -4,9 +4,9 @@
           

          CCE works with AOM to collect workload logs. When a node is created, ICAgent (a DaemonSet named icagent in the kube-system namespace of a cluster) of AOM is installed by default. ICAgent collects workload logs and reports them to AOM. You can view workload logs on the CCE or AOM console.

          Constraints

          ICAgent only collects text logs in .log, .trace, and .out formats.

          -

          Using ICAgent to Collect Logs

          1. When creating a workload, set logging for the container.
          2. Click to add a log policy.

            The following uses Nginx as an example. Log policies vary depending on workloads.
            Figure 1 Adding a log policy
            +

            Using ICAgent to Collect Logs

            1. When creating a workload, set logging for the container.
            2. Click to add a log policy.

              The following uses Nginx as an example. Log policies vary depending on workloads.
              Figure 1 Adding a log policy
              -

            3. Set Volume Type to hostPath or EmptyDir.

              +

            4. Set Volume Type to hostPath or emptyDir.

      Table 1 Mounting a secret volume

      Parameter

      Mount Path

      Enter a mount point. After the secret volume is mounted, a secret file with the key as the file name and value as the file content is generated in the mount path of the container.

      -
      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run. This may cause container errors. Mount the volume to an empty directory. If the directory is not empty, ensure that there are no files that affect container startup. Otherwise, the files will be replaced, which leads to a container startup failure or workload creation failure.
      NOTICE:

      If the container is mounted to a high-risk directory, use an account with minimum permissions to start the container. Otherwise, high-risk files on the host may be damaged.

      +
      This parameter specifies a container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run. This may cause container errors. Mount the volume to an empty directory. If the directory is not empty, ensure that there are no files that affect container startup. Otherwise, the files will be replaced, which leads to a container startup failure or workload creation failure.
      NOTICE:

      If the container is mounted to a high-risk directory, use an account with minimum permissions to start the container. Otherwise, high-risk files on the host may be damaged.

      @@ -174,7 +174,7 @@ spec: + + + - - - + + + @@ -258,7 +270,12 @@ - + + + diff --git a/docs/cce/umn/cce_10_0031.html b/docs/cce/umn/cce_10_0031.html index 29fe7e0c..4eb6b617 100644 --- a/docs/cce/umn/cce_10_0031.html +++ b/docs/cce/umn/cce_10_0031.html @@ -4,15 +4,17 @@
      diff --git a/docs/cce/umn/cce_10_0035.html b/docs/cce/umn/cce_10_0035.html index 1742be9b..6bbd136d 100644 --- a/docs/cce/umn/cce_10_0035.html +++ b/docs/cce/umn/cce_10_0035.html @@ -8,6 +8,8 @@ + diff --git a/docs/cce/umn/cce_10_00356.html b/docs/cce/umn/cce_10_00356.html index b647dab1..77ff2edc 100644 --- a/docs/cce/umn/cce_10_00356.html +++ b/docs/cce/umn/cce_10_00356.html @@ -1,9 +1,9 @@ -

      Accessing a Container

      +

      Logging In to a Container

      Scenario

      If you encounter unexpected problems when using a container, you can log in to the container to debug it.

      -

      Logging In to a Container Using kubectl

      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Run the following command to view the created pod:

        kubectl get pod
        +

        Using kubectl

        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Run the following command to view the created pod:

          kubectl get pod
          The example output is as follows:
          NAME                               READY   STATUS    RESTARTS       AGE
           nginx-59d89cb66f-mhljr             1/1     Running   0              11m
          diff --git a/docs/cce/umn/cce_10_0036.html b/docs/cce/umn/cce_10_0036.html index 0c1e60e5..19475938 100644 --- a/docs/cce/umn/cce_10_0036.html +++ b/docs/cce/umn/cce_10_0036.html @@ -1,11 +1,11 @@

          Stopping a Node

          -

          Scenario

          After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not result in adverse impacts.

          +

          Scenario

          When a node in the cluster is stopped, all services on that node will also be stopped, and the node will no longer be available for scheduling. Check if your services will be affected before stopping a node.

          -

          Constraints

          • Deleting a node will lead to pod migration, which may affect services. Therefore, delete nodes during off-peak hours.
          • Unexpected risks may occur during the operation. Back up related data in advance.
          • While the node is being deleted, the backend will set the node to the unschedulable state.
          • Only worker nodes can be stopped.
          +

          Precautions

          • Deleting a node will lead to pod migration, which may affect services. Perform this operation during off-peak hours.
          • Unexpected risks may occur during the operation. Back up data beforehand.
          -

          Procedure

          1. Log in to the CCE console and click the cluster name to access the cluster console.
          2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
          3. Locate the target node and click its name.
          4. In the upper right corner of the ECS details page, click Stop. In the displayed dialog box, click Yes.

            Figure 1 ECS details page
            +

            Procedure

            1. Log in to the CCE console and click the cluster name to access the cluster console.
            2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
            3. Locate the target node and click its name.
            4. In the upper right corner of the ECS details page, click Stop. In the displayed dialog box, click Yes.

              Figure 1 ECS details page

          diff --git a/docs/cce/umn/cce_10_0044.html b/docs/cce/umn/cce_10_0044.html index 33cf02bd..b6c77e47 100644 --- a/docs/cce/umn/cce_10_0044.html +++ b/docs/cce/umn/cce_10_0044.html @@ -12,6 +12,10 @@
        3. + + diff --git a/docs/cce/umn/cce_10_0046.html b/docs/cce/umn/cce_10_0046.html index 13f96000..a2010651 100644 --- a/docs/cce/umn/cce_10_0046.html +++ b/docs/cce/umn/cce_10_0046.html @@ -8,15 +8,15 @@ - - - -
        diff --git a/docs/cce/umn/cce_10_0047.html b/docs/cce/umn/cce_10_0047.html index 15ab1241..ceea984a 100644 --- a/docs/cce/umn/cce_10_0047.html +++ b/docs/cce/umn/cce_10_0047.html @@ -7,7 +7,7 @@
      -

      Using the CCE Console

      1. Log in to the CCE console.
      2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
      3. Set basic information about the workload.

        Basic Info
        • Workload Type: Select Deployment. For details about workload types, see Overview.
        • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
        • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
        • Pods: Enter the number of pods of the workload.
        • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Kata Runtime and Common Runtime.
        • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
        +

        Using the CCE Console

        1. Log in to the CCE console.
        2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
        3. Set basic information about the workload.

          Basic Info
          • Workload Type: Select Deployment. For details about workload types, see Overview.
          • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
          • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
          • Pods: Enter the number of pods of the workload.
          • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Secure Runtime and Common Runtime.
          • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
          Container Settings
          • Container Information
            Multiple containers can be configured in a pod. You can click Add Container on the right to configure multiple containers for the pod.
            • Basic Info: Configure basic information about the container.
      Table 1 Configuring log policies

      Parameter

      Description

      @@ -155,8 +155,8 @@ spec:

      Extended host path

      Extended host paths contain pod IDs or container names to distinguish different containers into which the host path is mounted.

      -

      A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

      -
      • None: No extended path is configured.
      • PodUID: ID of a pod.
      • PodName: name of a pod.
      • PodUID/ContainerName: ID of a pod or name of a container.
      • PodName/ContainerName: name of a pod or container.
      +

      A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

      +
      • None: No extended path is configured.
      • PodUID: ID of a pod.
      • PodName: name of a pod.
      • PodUID/ContainerName: ID of a pod or name of a container.
      • PodName/ContainerName: name of a pod or container.

      policy.logs.rotate

      @@ -164,7 +164,7 @@ spec:

      Log dump

      Log dump refers to rotating log files on a local host.

      -
      • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
      • Disabled: AOM does not dump log files.
      +
      • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
      • Disabled: AOM does not dump log files.
      NOTE:
      • AOM rotates log files using copytruncate. Before enabling log dumping, ensure that log files are written in the append mode. Otherwise, file holes may occur.
      • Currently, mainstream log components such as Log4j and Logback support log file rotation. If you have already set rotation for log files, skip the configuration. Otherwise, conflicts may occur.
      • You are advised to configure log file rotation for your own services to flexibly control the size and number of rolled files.

      Collection path

      A collection path narrows down the scope of collection to specified logs.

      -
      • If no collection path is specified, log files in .log, .trace, and .out formats will be collected from the specified path.
      • /Path/**/ indicates that all log files in .log, .trace, and .out formats will be recursively collected from the specified path and all subdirectories at 5 levels deep.
      • * in log file names indicates a fuzzy match.
      +
      • If no collection path is specified, log files in .log, .trace, and .out formats will be collected from the specified path.
      • /Path/**/ indicates that all log files in .log, .trace, and .out formats will be recursively collected from the specified path and all subdirectories at 5 levels deep.
      • * in log file names indicates a fuzzy match.

      Example: The collection path /tmp/**/test*.log indicates that all .log files prefixed with test will be collected from /tmp and subdirectories at 5 levels deep.

      CAUTION:

      Ensure that ICAgent is of v5.12.22 or later.

      diff --git a/docs/cce/umn/cce_10_0020.html b/docs/cce/umn/cce_10_0020.html index cfb96256..8fecd17e 100644 --- a/docs/cce/umn/cce_10_0020.html +++ b/docs/cce/umn/cce_10_0020.html @@ -6,7 +6,7 @@

      Master Nodes

      Select the number of master nodes. The master nodes are automatically hosted by CCE and deployed with Kubernetes cluster management components such as kube-apiserver, kube-controller-manager, and kube-scheduler.

      -
      • Multiple: Three master nodes will be created for high cluster availability.
      • Single: Only one master node will be created in your cluster.
      +
      • 3 Masters: Three master nodes will be created for high cluster availability.
      • Single: Only one master node will be created in your cluster.
      You can also select AZs for the master nodes. By default, AZs are allocated automatically for the master nodes.
      • Automatic: Master nodes are randomly distributed in different AZs for cluster DR. If the number of available AZs is less than the number of nodes to be created, CCE will create the nodes in the AZs with sufficient resources to preferentially ensure cluster creation. In this case, AZ-level DR may not be ensured.
      • Custom: Master nodes are deployed in specific AZs.
        If there is one master node in your cluster, you can select one AZ for the master node. If there are multiple master nodes in your cluster, you can select multiple AZs for the master nodes.
        • AZ: Master nodes are deployed in different AZs for cluster DR.
        • Host: Master nodes are deployed on different hosts in the same AZ for cluster DR.
        • Custom: Master nodes are deployed in the AZs you specified.
      @@ -70,6 +70,13 @@

      Select the subnet to which the master nodes belong. If no subnet is available, click Create Subnet to create one. The value cannot be changed after the cluster is created.

      Default Security Group

      +
      Select the security group automatically generated by CCE or use the existing one as the default security group of the node.
      NOTICE:

      The default security group must allow traffic from certain ports to ensure normal communication. Otherwise, the node cannot be created.

      +
      +
      +

      IPv6

      If enabled, cluster resources, including nodes and workloads, can be accessed through IPv6 CIDR blocks.

      @@ -155,7 +162,7 @@

      Overload Control

      After this function is enabled, concurrent requests will be dynamically controlled based on the resource demands received by master nodes to ensure the stable running of the master nodes and the cluster. For details, see Cluster Overload Control.

      +

      After this function is enabled, concurrent requests will be dynamically controlled based on the resource demands received by master nodes to ensure the stable running of the master nodes and the cluster. For details, see Enabling Overload Control for a Cluster.

      Disk Encryption for Master Nodes

      @@ -167,8 +174,8 @@

      Resource Tag

      You can add resource tags to classify resources.

      -

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      +

      You can add resource tags to classify resources. A maximum of 20 resource tags can be added.

      +

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      Description

      @@ -214,7 +221,12 @@

      CCE Node Problem Detector

      +

      Cloud Native Cluster Monitoring

      +

      (Optional) If selected, this add-on (Cloud Native Cluster Monitoring) will be automatically installed. Cloud Native Cluster Monitoring collects monitoring metrics for your cluster and reports the metrics to AOM. The agent mode does not support HPA based on custom Prometheus statements. If related functions are required, install this add-on manually after the cluster is created.

      +

      CCE Node Problem Detector

      (Optional) If selected, this add-on (CCE Node Problem Detector) will be automatically installed to detect faults and isolate nodes for prompt cluster troubleshooting.

      CCE Node Problem Detector

      +

      Cloud Native Cluster Monitoring

      +

      Select an AOM instance for Cloud Native Cluster Monitoring to report metrics. If no AOM instance is available, click Creating Instance to create one.

      +

      CCE Node Problem Detector

      This add-on is unconfigurable. After the cluster is created, choose Add-ons in the navigation pane of the cluster console and modify the configuration.

      Parameter

      @@ -81,16 +81,16 @@

      (Optional) Service Settings

      A Service provides external access for pods. With a static IP address, a Service forwards access traffic to pods and automatically balances load for these pods.

      You can also create a Service after creating a workload. For details about Services of different types, see Overview.

      -
      (Optional) Advanced Settings
      • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Workload Upgrade Policies.
      • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Load affinity and node affinity are provided.
        • Load Affinity: Common load affinity policies are offered for quick load affinity deployment.
          • Multi-AZ deployment is preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ but onto different nodes for high availability. If there are fewer nodes than pods, the extra pods will fail to run.
          • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If there are fewer AZs than pods, the extra pods will fail to run.
          • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
          +
          (Optional) Advanced Settings
          • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Configuring Workload Upgrade Policies.
          • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Load affinity and node affinity are provided.
            • Load Affinity: Common load affinity policies are offered for quick load affinity deployment.
              • Multi-AZ deployment is preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ but onto different nodes for high availability. If there are fewer nodes than pods, the extra pods will fail to run.
              • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If there are fewer AZs than pods, the extra pods will fail to run.
              • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
            • Node Affinity: Common load affinity policies are offered for quick load affinity deployment.
              • Node Affinity: Workload pods can be deployed on specified nodes through node affinity (nodeAffinity). If no node is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
              • Specified node pool scheduling: Workload pods can be deployed in a specified node pool through node affinity (nodeAffinity). If no node pool is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
              • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
            -
          • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Taints and Tolerations.
          • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
          • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
          • Network Configuration +
          • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Configuring Tolerance Policies.
          • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Configuring Labels and Annotations.
          • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
          • Network Configuration

        • Click Create Workload in the lower right corner.

      Using kubectl

      The following procedure uses Nginx as an example to describe how to create a workload using kubectl.

      -
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create and edit the nginx-deployment.yaml file. nginx-deployment.yaml is an example file name, and you can rename it as required.

        vi nginx-deployment.yaml

        +
        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Create and edit the nginx-deployment.yaml file. nginx-deployment.yaml is an example file name, and you can rename it as required.

          vi nginx-deployment.yaml

          The following is an example YAML file. For more information about Deployments, see Kubernetes documentation.

          apiVersion: apps/v1
           kind: Deployment
          @@ -114,7 +114,7 @@ spec:
                   name: nginx
                 imagePullSecrets:
                 - name: default-secret
          -

          For details about these parameters, see Table 1.

          +

          For details about the parameters, see Table 1.

          diff --git a/docs/cce/umn/cce_10_0048.html b/docs/cce/umn/cce_10_0048.html index 3d94fea4..b8bc2e92 100644 --- a/docs/cce/umn/cce_10_0048.html +++ b/docs/cce/umn/cce_10_0048.html @@ -4,13 +4,13 @@

          Scenario

          StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.

          A container can be migrated between different hosts, but data is not stored on the hosts. To store StatefulSet data persistently, attach HA storage volumes provided by CCE to the container.

          -

          Constraints

          • When you delete or scale a StatefulSet, the system does not delete the storage volumes associated with the StatefulSet to ensure data security.
          • When you delete a StatefulSet, reduce the number of replicas to 0 before deleting the StatefulSet so that pods in the StatefulSet can be stopped in order.
          • When you create a StatefulSet, a headless Service is required for pod access. For details, see Headless Services.
          • When a node is unavailable, pods become Unready. In this case, manually delete the pods of the StatefulSet so that the pods can be migrated to a normal node.
          +

          Notes and Constraints

          • When you delete or scale a StatefulSet, the system does not delete the storage volumes associated with the StatefulSet to ensure data security.
          • When you delete a StatefulSet, reduce the number of replicas to 0 before deleting the StatefulSet so that pods in the StatefulSet can be stopped in order.
          • When you create a StatefulSet, a headless Service is required for pod access. For details, see Headless Services.
          • When a node is unavailable, pods become Unready. In this case, manually delete the pods of the StatefulSet so that the pods can be migrated to a normal node.

          Prerequisites

          • Before creating a workload, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Standard/Turbo Cluster.
          • To enable public access to a workload, ensure that an EIP or load balancer has been bound to at least one node in the cluster.

            If a pod has multiple containers, ensure that the ports used by the containers do not conflict with each other. Otherwise, creating the StatefulSet will fail.

          -

          Using the CCE Console

          1. Log in to the CCE console.
          2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
          3. Set basic information about the workload.

            Basic Info
            • Workload Type: Select StatefulSet. For details about workload types, see Overview.
            • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
            • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
            • Pods: Enter the number of pods of the workload.
            • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Kata Runtime and Common Runtime.
            • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
            +

            Using the CCE Console

            1. Log in to the CCE console.
            2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
            3. Set basic information about the workload.

              Basic Info
              • Workload Type: Select StatefulSet. For details about workload types, see Overview.
              • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
              • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
              • Pods: Enter the number of pods of the workload.
              • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Secure Runtime and Common Runtime.
              • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
              Container Settings
              • Container Information
                Multiple containers can be configured in a pod. You can click Add Container on the right to configure multiple containers for the pod.
                • Basic Info: Configure basic information about the container.
          Table 1 Deployment YAML parameters

          Parameter

          Parameter

          @@ -74,7 +74,7 @@
          -
        3. (Optional) Lifecycle: Configure operations to be performed in a specific phase of the container lifecycle, such as Startup Command, Post-Start, and Pre-Stop. For details, see Configuring Container Lifecycle Parameters.
        4. (Optional) Health Check: Set the liveness probe, ready probe, and startup probe as required. For details, see Configuring Container Health Check.
        5. (Optional) Environment Variables: Configure variables for the container running environment using key-value pairs. These variables transfer external information to containers running in pods and can be flexibly modified after application deployment. For details, see Configuring Environment Variables.
        6. (Optional) Data Storage: Mount local storage or cloud storage to the container. The application scenarios and mounting modes vary with the storage type. For details, see Storage.
          • StatefulSets support dynamic attachment of EVS disks. For details, see Dynamically Mounting an EVS Disk to a StatefulSet and Dynamically Mounting a Local PV to a StatefulSet.

            Dynamic mounting is achieved by using the volumeClaimTemplates field and depends on the dynamic creation capability of StorageClass. A StatefulSet associates each pod with a PVC using the volumeClaimTemplates field, and the PVC is bound to the corresponding PV. Therefore, after the pod is rescheduled, the original data can still be mounted based on the PVC name.

            +
          • (Optional) Lifecycle: Configure operations to be performed in a specific phase of the container lifecycle, such as Startup Command, Post-Start, and Pre-Stop. For details, see Configuring Container Lifecycle Parameters.
          • (Optional) Health Check: Set the liveness probe, ready probe, and startup probe as required. For details, see Configuring Container Health Check.
          • (Optional) Environment Variables: Configure variables for the container running environment using key-value pairs. These variables transfer external information to containers running in pods and can be flexibly modified after application deployment. For details, see Configuring Environment Variables.
          • (Optional) Data Storage: Mount local storage or cloud storage to the container. The application scenarios and mounting modes vary with the storage type. For details, see Storage.
            • StatefulSets support dynamic attachment of EVS disks. For details, see Dynamically Mounting an EVS Disk to a StatefulSet or Dynamically Mounting a Local PV to a StatefulSet.

              Dynamic mounting is achieved by using the volumeClaimTemplates field and depends on the dynamic creation capability of StorageClass. A StatefulSet associates each pod with a PVC using the volumeClaimTemplates field, and the PVC is bound to the corresponding PV. Therefore, after the pod is rescheduled, the original data can still be mounted based on the PVC name.

            • After a workload is created, the storage that is dynamically mounted cannot be updated.
          • (Optional) Security Context: Assign container permissions to protect the system and other containers from being affected. Enter the user ID to assign container permissions and prevent systems and other containers from being affected.
          • (Optional) Logging: Report standard container output logs to AOM by default, without requiring manual settings. You can manually configure the log collection path. For details, see Collecting Container Logs Using ICAgent.

            To disable the standard output of the current workload, add the annotation kubernetes.AOM.log.stdout: [] in Labels and Annotations. For details about how to use this annotation, see Table 1.

            @@ -87,18 +87,18 @@

            (Optional) Service Settings

            A Service provides external access for pods. With a static IP address, a Service forwards access traffic to pods and automatically balances load for these pods.

            You can also create a Service after creating a workload. For details about Services of different types, see Overview.

            -
            (Optional) Advanced Settings
            • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Workload Upgrade Policies.
            • Pod Management Policies

              For some distributed systems, the StatefulSet sequence is unnecessary and/or should not occur. These systems require only uniqueness and identifiers.

              +
              (Optional) Advanced Settings
              • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Configuring Workload Upgrade Policies.
              • Pod Management Policies

                For some distributed systems, the StatefulSet sequence is unnecessary and/or should not occur. These systems require only uniqueness and identifiers.

                • OrderedReady: The StatefulSet will deploy, delete, or scale pods in order and one by one. (The StatefulSet continues only after the previous pod is ready or deleted.) This is the default policy.
                • Parallel: The StatefulSet will create pods in parallel to match the desired scale without waiting, and will delete all pods at once.
              • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Load affinity and node affinity are provided.
                • Load Affinity: Common load affinity policies are offered for quick load affinity deployment.
                  • Multi-AZ deployment is preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ but onto different nodes for high availability. If there are fewer nodes than pods, the extra pods will fail to run.
                  • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If there are fewer AZs than pods, the extra pods will fail to run.
                  • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
                • Node Affinity: Common load affinity policies are offered for quick load affinity deployment.
                  • Node Affinity: Workload pods can be deployed on specified nodes through node affinity (nodeAffinity). If no node is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
                  • Specified node pool scheduling: Workload pods can be deployed in a specified node pool through node affinity (nodeAffinity). If no node pool is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
                  • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
                -
              • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Taints and Tolerations.
              • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
              • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
              • Network Configuration
                • Pod ingress/egress bandwidth limitation: You can set ingress/egress bandwidth limitation for pods. For details, see Configuring QoS for a Pod.
                • Whether to enable the static IP address: available only for clusters that support this function. After this function is enabled, you can set the interval for reclaiming expired pod IP addresses. For details, see Configuring a Static IP Address for a Pod.
                • IPv6 shared bandwidth: available only for clusters that support this function. After this function is enabled, you can configure a shared bandwidth for a pod with IPv6 dual-stack ENIs. For details, see Configuring Shared Bandwidth for a Pod with IPv6 Dual-Stack ENIs.
                +
              • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Configuring Tolerance Policies.
              • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Configuring Labels and Annotations.
              • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
              • Network Configuration
                • Pod ingress/egress bandwidth limitation: You can set ingress/egress bandwidth limitation for pods. For details, see Configuring QoS for a Pod.
                • Whether to enable the static IP address: available only for clusters that support this function. After this function is enabled, you can set the interval for reclaiming expired pod IP addresses. For details, see Configuring a Static IP Address for a Pod.
                • IPv6 shared bandwidth: available only for clusters that support this function. After this function is enabled, you can configure a shared bandwidth for a pod with IPv6 dual-stack ENIs. For details, see Configuring Shared Bandwidth for a Pod with IPv6 Dual-Stack ENIs.

            • Click Create Workload in the lower right corner.

      Using kubectl

      In this example, a Nginx workload is used and the EVS volume is dynamically mounted to it using the volumeClaimTemplates field.

      -
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create and edit the nginx-statefulset.yaml file.

        nginx-statefulset.yaml is an example file name, and you can change it as required.

        +
        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Create and edit the nginx-statefulset.yaml file.

          nginx-statefulset.yaml is an example file name, and you can change it as required.

          vi nginx-statefulset.yaml

          The following provides an example of the file contents. For more information on StatefulSet, see the Kubernetes documentation.

          apiVersion: apps/v1
          @@ -153,7 +153,7 @@ spec:
                   resources:
                     requests:
                       storage: 10Gi
          -        storageClassName: csi-disk # Storage class name. The value is csi-disk for the EVS volume.
          +        storageClassName: csi-disk # StorageClass name. The value is csi-disk for the EVS volume.
             updateStrategy:
               type: RollingUpdate

          vi nginx-headless.yaml

          diff --git a/docs/cce/umn/cce_10_0054.html b/docs/cce/umn/cce_10_0054.html index 97fe3a2b..34c770c5 100644 --- a/docs/cce/umn/cce_10_0054.html +++ b/docs/cce/umn/cce_10_0054.html @@ -1,6 +1,6 @@ -

          High-Risk Operations and Solutions

          +

          High-Risk Operations

          During service deployment or running, you may trigger high-risk operations at different levels, causing service faults or interruption. To help you better estimate and avoid operation risks, this section introduces the consequences and solutions of high-risk operations from multiple dimensions, such as clusters, nodes, networking, load balancing, logs, and EVS disks.

          Clusters and Nodes

          - - -
          Table 1 High-risk operations and solutions

          Category

          @@ -63,7 +63,7 @@

          The master node may be unavailable.

          Restore the parameter settings to the recommended values. For details, see Cluster Configuration Management.

          +

          Restore the parameter settings to the recommended values. For details, see Modifying Cluster Configurations.

          Replacing the master or etcd certificate

          @@ -107,7 +107,7 @@

          Reset the node. For details, see Resetting a Node.

          Upgrading the kernel or components on which the container platform depends (such as Open vSwitch, IPvlan, Docker, and containerd)

          +

          Upgrading the kernel or components on which the container platform depends (such as Open vSwitch, IPVLAN, Docker, and containerd)

          The node may be unavailable or the network may be abnormal.

          NOTE:

          Node running depends on the system kernel version. Do not use the yum update command to update or reinstall the operating system kernel of a node unless necessary. (Reinstalling the operating system kernel using the original image or other images is a risky operation.)

          @@ -127,7 +127,7 @@

          The node may become unavailable, and components may be insecure if security-related configurations are modified.

          Restore the parameter settings to the recommended values. For details, see Configuring a Node Pool.

          +

          Restore the parameter settings to the recommended values. For details, see Modifying Node Pool Configurations.

          Modifying OS configuration

          diff --git a/docs/cce/umn/cce_10_0059.html b/docs/cce/umn/cce_10_0059.html index 02f70447..8c519eb5 100644 --- a/docs/cce/umn/cce_10_0059.html +++ b/docs/cce/umn/cce_10_0059.html @@ -1,11 +1,11 @@ -

          Network Policies

          +

          Configuring Network Policies to Restrict Pod Access

          Network policies are designed by Kubernetes to restrict pod access. It is equivalent to a firewall at the application layer to enhance network security. The capabilities supported by network policies depend on the capabilities of the network add-ons of the cluster.

          By default, if a namespace does not have any policy, pods in the namespace accept traffic from any source and send traffic to any destination.

          Network policies are classified into the following types:

          • namespaceSelector: selects particular namespaces for which all pods should be allowed as ingress sources or egress destinations.
          • podSelector: selects particular pods in the same namespace as the network policy which should be allowed as ingress sources or egress destinations.
          • ipBlock: selects particular IP blocks to allow as ingress sources or egress destinations. (Only egress rules support IP blocks.)
          -

          Constraints

          • Only clusters that use the tunnel network model support network policies. Network policies are classified into the following types:
            • Ingress: All versions support this type.
            • Egress: Only the following OSs and cluster versions support egress rules. +

              Notes and Constraints

              • Only clusters that use the tunnel network model support network policies. Network policies are classified into the following types:
                • Ingress: All versions support this type.
                • Egress: Only the following OSs and cluster versions support egress rules.
                  + + + +

                  OS

                  Cluster Version

                  @@ -23,6 +23,13 @@

                  4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

                  HCE OS 2.0

                  +

                  v1.25 or later

                  +

                  5.10.0-60.18.0.50.r865_35.hce2.x86_64

                  +
                  @@ -47,7 +54,7 @@ spec: - protocol: TCP port: 6379

                  The following figure shows how podSelector works.

                  -
                  Figure 1 podSelector
                  +
                  Figure 1 podSelector
                • Using namespaceSelector to specify the access scope
                  apiVersion: networking.k8s.io/v1
                   kind: NetworkPolicy
                  @@ -66,11 +73,11 @@ spec:
                       - protocol: TCP
                         port: 6379

                  The following figure shows how namespaceSelector works.

                  -
                  Figure 2 namespaceSelector
                  +
                  Figure 2 namespaceSelector

              Using Egress Rules

              Egress supports not only podSelector and namespaceSelector, but also ipBlock.

              -

              Only clusters of version 1.23 or later support Egress rules. Only nodes running EulerOS 2.9 are supported.

              +

              Only clusters of version 1.23 or later support Egress rules. Only nodes running EulerOS 2.9 or HCE OS 2.0 are supported.

              apiVersion: networking.k8s.io/v1
               kind: NetworkPolicy
              @@ -90,7 +97,7 @@ spec:
                       except:
                       - 172.16.0.40/32        # This CIDR block cannot be accessed. This value must fall within the range specified by cidr.

              The following figure shows how ipBlock works.

              -
              Figure 3 ipBlock
              +
              Figure 3 ipBlock

              You can define ingress and egress in the same rule.

              apiVersion: networking.k8s.io/v1
               kind: NetworkPolicy
              @@ -118,10 +125,10 @@ spec:
                       matchLabels:
                         role: web

              The following figure shows how to use ingress and egress together.

              -
              Figure 4 Using both ingress and egress
              +
              Figure 4 Using both ingress and egress
              -

              Creating a Network Policy on the Console

              1. Log in to the CCE console and click the cluster name to access the cluster console.
              2. Choose Policies in the navigation pane, click the Network Policies tab, and click Create Network Policy in the upper right corner.

                • Policy Name: Specify a network policy name.
                • Namespace: Select a namespace in which the network policy is applied.
                • Selector: Enter a label, select the pod to be associated, and click Add. You can also click Reference Workload Label to use the label of an existing workload.
                • Inbound Rule: Click to add an inbound rule. For details about parameter settings, see Table 1.

                  -

                  +

                  Creating a Network Policy on the Console

                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                  2. Choose Policies in the navigation pane, click the Network Policies tab, and click Create Network Policy in the upper right corner.

                    • Policy Name: Specify a network policy name.
                    • Namespace: Select a namespace in which the network policy is applied.
                    • Selector: Enter a label, select the pod to be associated, and click Add. You can also click Reference Workload Label to use the label of an existing workload.
                    • Inbound Rule: Click to add an inbound rule. For details about parameter settings, see Table 1.

                      +

                      @@ -148,7 +155,7 @@ spec:
                      Table 1 Adding an inbound rule

                      Parameter

                      -
                    • Outbound Rule: Click to add an outbound rule. For details about parameter settings, see Table 1.

                      +
                    • Outbound Rule: Click to add an outbound rule. For details about parameter settings, see Table 1.

                      @@ -181,12 +188,12 @@ spec: -

                    • After the configuration is complete, click OK.
                    • +

                    • Click OK.
                    • diff --git a/docs/cce/umn/cce_10_0063.html b/docs/cce/umn/cce_10_0063.html index 8131e9e1..84e73c9b 100644 --- a/docs/cce/umn/cce_10_0063.html +++ b/docs/cce/umn/cce_10_0063.html @@ -4,13 +4,16 @@

                      Scenario

                      After a node scaling policy is created, you can delete, edit, disable, enable, or clone the policy.

                      Viewing a Node Scaling Policy

                      You can view the associated node pool, rules, and scaling history of a node scaling policy and rectify faults according to the error information displayed.

                      -
                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                      2. In the navigation pane, choose Nodes.On the page displayed, click the Node Pools tab and then the name of the node pool for which an auto scaling policy has been created to view the node pool details.
                      3. On the node pool details page, click the Auto Scaling tab to view the auto scaling configuration and scaling records.
                      +
                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                      2. In the navigation pane, choose Nodes.On the page displayed, click the Node Pools tab and then the name of the node pool for which an auto scaling policy has been created to view the node pool details.
                      3. On the node pool details page, click the Auto Scaling tab to view the auto scaling configuration and scaling records.

                        You can obtain created auto scaling policies on the Policies page.

                        +
                        1. Log in to the CCE console and click the cluster name to access the cluster console.
                        2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling Policies tab.
                        3. Check the configuration of the auto scaling policies. Choose More > Scaling History for the target policy to check the scaling records of the policy.
                        +
                        +

                      Deleting a Node Scaling Policy

                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling Policies tab, locate the row containing the target policy and choose More > Delete in the Operation column.
                      3. In the Delete Node Scaling Policy dialog box displayed, confirm whether to delete the policy.
                      4. Click Yes to delete the policy.

                      Editing a Node Scaling Policy

                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling Policies tab, locate the row containing the target policy and click Edit in the Operation column.
                      3. On the Edit Node Scaling Policy page displayed, configure policy parameters listed in Table 2.
                      4. After the configuration is complete, click OK.
                      -

                      Cloning a Node Scaling Policy

                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling Policies tab, locate the row containing the target policy and choose More > Clone in the Operation column.
                      3. On the Clone Node Scaling Policy page displayed, certain parameters have been cloned. Add or modify other policy parameters based on service requirements.
                      4. Click OK.
                      +

                      Cloning a Node Scaling Policy

                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling Policies tab, locate the row containing the target policy and choose More > Clone in the Operation column.
                      3. On the Create Node Scaling Policy page displayed, certain parameters have been cloned. Add or modify other policy parameters based on service requirements.
                      4. Click OK.

                      Enabling or Disabling a Node Scaling Policy

                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                      2. In the navigation pane, choose Policies. On the page displayed, click the Node Scaling Policies tab, locate the row containing the target policy click Disable in the Operation column. If the policy is in the disabled state, click Enable in the Operation column.
                      3. In the dialog box displayed, confirm whether to disable or enable the node policy.
                      diff --git a/docs/cce/umn/cce_10_0064.html b/docs/cce/umn/cce_10_0064.html index 75985e10..b6aa1fe3 100644 --- a/docs/cce/umn/cce_10_0064.html +++ b/docs/cce/umn/cce_10_0064.html @@ -6,23 +6,15 @@
                      diff --git a/docs/cce/umn/cce_10_0066.html b/docs/cce/umn/cce_10_0066.html index 1f33446a..aefb01fc 100644 --- a/docs/cce/umn/cce_10_0066.html +++ b/docs/cce/umn/cce_10_0066.html @@ -4,11 +4,11 @@

                      Introduction

                      Everest is a cloud native container storage system, which enables clusters of Kubernetes v1.15.6 or later to access cloud storage services through the CSI.

                      Everest is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.15 or later is created.

                      -

                      Constraints

                      • If your cluster is upgraded from v1.13 to v1.15, storage-driver will be replaced by Everest (v1.1.6 or later) for container storage. The takeover does not affect the original storage functions.
                      • In version 1.2.0 of the Everest add-on, key authentication is optimized when OBS is used. After the Everest add-on is upgraded from a version earlier than 1.2.0, restart all workloads that use OBS in the cluster. Otherwise, workloads may not be able to use OBS.
                      • By default, this add-on is installed in clusters of v1.15 and later. For clusters of v1.13 and earlier, the storage-driver add-on is installed by default.
                      +

                      Notes and Constraints

                      • If your cluster is upgraded from v1.13 to v1.15, storage-driver will be replaced by Everest (v1.1.6 or later) for container storage. The takeover does not affect the original storage functions.
                      • In version 1.2.0 of the Everest add-on, key authentication is optimized when OBS is used. After the Everest add-on is upgraded from a version earlier than 1.2.0, restart all workloads that use OBS in the cluster. Otherwise, workloads may not be able to use OBS.
                      • By default, this add-on is installed in clusters of v1.15 and later. For clusters of v1.13 and earlier, the storage-driver add-on is installed by default.

                      Installing the Add-on

                      This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

                      1. Log in to the CCE console and click the cluster name to access the cluster console. Click Add-ons in the navigation pane, locate CCE Container Storage (Everest) on the right, and click Install.
                      2. On the Install Add-on page, configure the specifications.

                        -

                      Table 2 Adding an outbound rule

                      Parameter

                      Table 1 Everest parameters

                      Parameter

                      +
                      @@ -244,7 +244,7 @@ @@ -279,10 +279,275 @@
                      Table 1 Add-on configuration

                      Parameter

                      Description

                      Using both taints and tolerations allows (not forcibly) the add-on Deployment to be scheduled to a node with the matching taints, and controls the Deployment eviction policies after the node where the Deployment is located is tainted.

                      The add-on adds the default tolerance policy for the node.kubernetes.io/not-ready and node.kubernetes.io/unreachable taints, respectively. The tolerance time window is 60s.

                      -

                      For details, see Taints and Tolerations.

                      +

                      For details, see Configuring Tolerance Policies.

                      +

                      Collecting Prometheus Metrics

                      everest-csi-controller exposes Prometheus metrics over port 3225. You can create an on-premises Prometheus collector to identify and obtain everest-csi-controller metrics from http://{{everest-csi-controller pod IP address}}:3225/metrics.

                      +

                      Prometheus metrics can be exposed only when the Everest add-on version is 2.4.4 or later.

                      +
                      + +
                      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                      Table 6 Key metrics

                      Metric

                      +

                      Type

                      +

                      Description

                      +

                      Label

                      +

                      Example

                      +

                      everest_action_result_total

                      +

                      Counter

                      +

                      Invoking of different functions

                      +

                      action: indicates different functions. For details, see Table 7.

                      +

                      result: indicates that the invoking is successful or fails.

                      +

                      everest_action_result_total{action="create_snapshot:disk.csi.everest.io",result="success"} 2

                      +

                      everest_function_duration_seconds_bucket

                      +

                      Histogram

                      +

                      Number of times that different functions are executed at different time

                      +

                      function: indicates different functions. For details, see Table 7.

                      +

                      everest_function_duration_seconds_bucket{function="create_snapshot:disk.csi.everest.io",le="10"} 2

                      +

                      everest_function_duration_seconds_sum

                      +

                      Histogram

                      +

                      Total invoking time of different functions

                      +

                      function: indicates different functions. For details, see Table 7.

                      +

                      everest_function_duration_seconds_sum{function="create:disk.csi.everest.io"} 24.381399053

                      +

                      everest_function_duration_seconds_count

                      +

                      Histogram

                      +

                      Number of invoking times of different functions

                      +

                      function: indicates different functions. For details, see Table 7.

                      +

                      everest_function_duration_seconds_count{function="attach:disk.csi.everest.io"} 4

                      +
                      +
                      +

                      action and function specify different CSI drivers and their functions, and are in the format of {Function}:{CSI driver}. For example, create:disk.csi.everest.io specifies that the function is to create a volume and the volume type is EVS disk.

                      + +
                      + + + + + + + + + + + + + + + + + + + + + + + + + +
                      Table 7 Functions

                      Operation

                      +

                      Description

                      +

                      create

                      +

                      Creates a volume.

                      +

                      delete

                      +

                      Deletes a volume.

                      +

                      attach

                      +

                      Mounts a volume.

                      +

                      detach

                      +

                      Detaches a volume.

                      +

                      expand

                      +

                      Expands the capacity of a volume.

                      +

                      create_snapshot

                      +

                      Creates a volume snapshot.

                      +

                      delete_snapshot

                      +

                      Deletes a volume snapshot

                      +
                      +
                      +
                      +

                      Change History

                      +
                      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                      Table 8 Release history

                      Add-on Version

                      +

                      Supported Cluster Version

                      +

                      New Feature

                      +

                      2.4.28

                      +

                      v1.23

                      +

                      v1.25

                      +

                      v1.27

                      +

                      v1.28

                      +

                      v1.29

                      +

                      Fixed some issues.

                      +

                      2.3.23

                      +

                      v1.21

                      +

                      v1.23

                      +

                      v1.25

                      +

                      v1.27

                      +

                      v1.28

                      +

                      Subdirectories can be created in an SFS Turbo file system.

                      +

                      2.3.21

                      +

                      v1.21

                      +

                      v1.23

                      +

                      v1.25

                      +

                      v1.27

                      +

                      v1.28

                      +

                      Fixed some issues.

                      +

                      2.3.14

                      +

                      v1.21

                      +

                      v1.23

                      +

                      v1.25

                      +

                      v1.27

                      +

                      v1.28

                      +

                      CCE clusters 1.28 are supported.

                      +

                      2.1.51

                      +

                      v1.19

                      +

                      v1.21

                      +

                      v1.23

                      +

                      v1.25

                      +

                      v1.27

                      +

                      Supported HCE OS 2.0.

                      +

                      2.1.30

                      +

                      v1.19

                      +

                      v1.21

                      +

                      v1.23

                      +

                      v1.25

                      +
                      • Supported anti-affinity scheduling of add-on pods on nodes in different AZs.
                      • Adapts the obsfs package to Ubuntu 22.04.
                      +

                      2.1.13

                      +

                      v1.19

                      +

                      v1.21

                      +

                      v1.23

                      +

                      v1.25

                      +

                      Optimized the performance of creating subpath PVCs in batches for SFS Turbo volumes.

                      +

                      1.3.28

                      +

                      v1.19

                      +

                      v1.21

                      +

                      v1.23

                      +
                      • Enabled graceful exit.
                      • Supported OBS process monitoring.
                      +

                      1.2.78

                      +

                      v1.15

                      +

                      v1.17

                      +

                      v1.19

                      +

                      v1.21

                      +

                      Supported anti-affinity scheduling of add-on pods on nodes in different AZs.

                      +

                      1.2.70

                      +

                      v1.15

                      +

                      v1.17

                      +

                      v1.19

                      +

                      v1.21

                      +

                      Optimized the performance of creating subpath PVCs in batches for SFS Turbo volumes.

                      +

                      1.2.44

                      +

                      v1.15

                      +

                      v1.17

                      +

                      v1.19

                      +

                      v1.21

                      +
                      • By default, the enable_noobj_cache parameter is no longer used for mounting OBS buckets.
                      +

                      1.2.30

                      +

                      v1.15

                      +

                      v1.17

                      +

                      v1.19

                      +

                      v1.21

                      +

                      Supported emptyDir.

                      +

                      1.2.13

                      +

                      v1.15

                      +

                      v1.17

                      +

                      v1.19

                      +

                      Supported EulerOS 2.10.

                      +

                      1.1.11

                      +

                      v1.15

                      +

                      v1.17

                      +
                      • Supports security hardening.
                      • Supports third-party OBS storage.
                      • Switches to the EVS query API with better performance.
                      • Uses snapshots to create disks in clone mode by default.
                      • Optimizes and enhances disk status detection and log output for attaching and detaching operations.
                      • Improves the reliability of determining authentication expiration.
                      +
                      +
                      +
                      diff --git a/docs/cce/umn/cce_10_0068.html b/docs/cce/umn/cce_10_0068.html index f2613a90..7f0a987f 100644 --- a/docs/cce/umn/cce_10_0068.html +++ b/docs/cce/umn/cce_10_0068.html @@ -4,6 +4,8 @@

                      Node Pool Architecture

                      Generally, all nodes in a node pool have the following same attributes:

                      -
                      • Node OS
                      • Node flavor
                      • Node login mode
                      • Node container runtime
                      • Startup parameters of Kubernetes components on a node
                      • User-defined startup script of a node
                      • Kubernetes Labels and Taints
                      +
                      • Node OS
                      • Node login mode
                      • Node container runtime
                      • Startup parameters of Kubernetes components on a node
                      • Custom startup script of a node
                      • Kubernetes labels and taints

                      CCE provides the following extended attributes for node pools:

                      • Node pool OS
                      • Maximum number of pods on each node in a node pool
                      -

                      Description of DefaultPool

                      DefaultPool is not a real node pool. It only classifies nodes that are not in the user-created node pools. These nodes are directly created on the console or by calling APIs. DefaultPool does not support any user-created node pool functions, including scaling and parameter configuration. DefaultPool cannot be edited, deleted, expanded, or auto scaled, and nodes in it cannot be migrated.

                      +

                      Description of DefaultPool

                      DefaultPool is not a real node pool. It only classifies nodes that are not in the custom node pools. These nodes are directly created on the console or by calling APIs. DefaultPool does not support any user-created node pool functions, including scaling and parameter configuration. DefaultPool cannot be edited, deleted, expanded, or auto scaled, and nodes in it cannot be migrated.

                      -

                      Applicable Scenarios

                      When a large-scale cluster is required, you are advised to use node pools to manage nodes.

                      +

                      Application Scenarios

                      When a large-scale cluster is required, you are advised to use node pools to manage nodes.

                      The following table describes multiple scenarios of large-scale cluster management and the functions of node pools in each scenario.

                      - @@ -72,7 +72,7 @@ - @@ -86,9 +86,9 @@ - - @@ -111,7 +111,7 @@ - diff --git a/docs/cce/umn/cce_10_0083.html b/docs/cce/umn/cce_10_0083.html index b1a85918..f2e403b4 100644 --- a/docs/cce/umn/cce_10_0083.html +++ b/docs/cce/umn/cce_10_0083.html @@ -1,88 +1,27 @@

                      Managing Workload Scaling Policies

                      -

                      Scenario

                      After an HPA policy is created, you can update and delete the policy, as well as edit the YAML file.

                      +

                      Scenario

                      After a workload scaling policy is created, you can update and delete the policy, as well as edit the YAML file.

                      -

                      Checking an HPA Policy

                      You can view the rules, status, and events of an HPA policy and handle exceptions based on the error information displayed.

                      -
                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                      2. In the navigation pane, choose Policies. On the page displayed, click the HPA Policies tab and then next to the target HPA policy.
                      3. In the expanded area, choose View Events in the Operation column. If the policy malfunctions, locate and rectify the fault based on the error message displayed on the page.

                        You can also view the created HPA policy on the workload details page.

                        -
                        1. Log in to the CCE console and click the cluster name to access the cluster console.
                        2. In the navigation pane, choose Workloads. Click the workload name to view its details.
                        3. On the workload details page, switch to the Auto Scaling tab page to view the HPA policies. You can also view the scaling policies you configured on the Policies page.
                        +

                        Procedure

                        You can view the rules, status, and events of a workload scaling policy and handle exceptions based on the error information displayed.

                        +
                        1. Log in to the CCE console and click the cluster name to access the cluster console.
                        2. In the navigation pane, choose Policies. On the displayed page, click the HPA/CronHPA Policies tab page based on the scaling policy type.
                        3. Check the status, rules, and associated workloads of a scaling policy.

                          You can also check a created scaling policy on the workload details page.

                          +
                          1. Log in to the CCE console and click the cluster name to access the cluster console.
                          2. In the navigation pane, choose Workloads. Click the workload name to view its details.
                          3. On the workload details page, switch to the Auto Scaling tab page to obtain the scaling policies. You can also obtain the scaling policies you configured on the Policies page.
                          - -
                      Table 1 Using node pools for different management scenarios

                      Scenario

                      @@ -60,7 +60,7 @@

                      Deleting a node pool

                      When a node pool is deleted, the nodes in the node pool are deleted first. Workloads on the original nodes are automatically migrated to available nodes in other node pools.

                      +

                      Deleting a node pool will delete nodes in the pool. Pods on these nodes will be automatically migrated to available nodes in other node pools.

                      If pods in the node pool have a specific node selector and none of the other nodes in the cluster satisfies the node selector, the pods will become unschedulable.

                      Do not store important data on nodes in a node pool because the nodes may be deleted after scale-in. Data on the deleted nodes cannot be restored.

                      Enabling auto scaling for a node pool

                      +

                      Disabling auto scaling for a node pool

                      After auto scaling is disabled, the number of nodes in a node pool will not automatically change with the cluster loads.

                      After auto scaling is enabled, you are not advised to manually adjust the node pool size.

                      Changing node pool configurations

                      +

                      Modifying node pool configurations

                      You can modify the node pool name, node quantity, Kubernetes labels (and their quantity), resource tags, and taints.

                      +

                      You can change the node pool name and number of nodes, add or delete Kubernetes labels, resource tags, and taints, and adjust node pool configurations such as the disk, OS, and container engine of the node pool.

                      The deleted or added Kubernetes labels and taints (as well as their quantity) will apply to all nodes in the node pool, which may cause pod re-scheduling. Therefore, exercise caution when performing this operation.

                      You can configure core components with fine granularity.

                      • This function is supported only in clusters of v1.15 and later. It is not displayed for versions earlier than v1.15.
                      • The default node pool DefaultPool does not support this type of configuration.
                      +
                      • This function is supported only in clusters of v1.15 and later. It is not displayed for versions earlier than v1.15.
                      • The default node pool does not support this type of configuration.
                      Table 1 Event types and names

                      Event Type

                      +

                    • Manage scaling policies.

                      +

                      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -90,13 +29,6 @@

                      -

                      Editing an HPA Policy

                      An HPA policy is used as an example.

                      -
                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                      2. In the navigation pane, choose Policies. On the page displayed, click the HPA Policies tab. Locate the row containing the target policy and choose More > Edit in the Operation column.
                      3. On the Edit HPA Policy page, configure policy parameters listed in Table 1.
                      4. Click OK.
                      -
                      -

                      Editing the YAML File (HPA Policy)

                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                      2. In the navigation pane, choose Policies. On the page displayed, click the HPA Policies tab. Locate the row containing the target policy and click Edit YAML in the Operation column.
                      3. In the dialog box displayed, edit or download the YAML file.
                      -
                      -

                      Deleting an HPA Policy

                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                      2. In the navigation pane, choose Policies. Choose More > Delete in the Operation column of the target policy.
                      3. In the dialog box displayed, click Yes.
                      -

                      Scaling Policy Type

                      Event Name

                      -

                      Description

                      +

                      Operation

                      Normal

                      +

                      HPA

                      SuccessfulRescale

                      -

                      The scaling is performed successfully.

                      +
                      • View Events: Check HPA policy events. If an error occurred, locate and rectify the fault based on the error message displayed on the page.
                      • Edit YAML: In the dialog box displayed, edit, copy, or download the YAML file.
                      • Edit: On the Edit HPA Policy page, configure policy parameters listed in Table 1.
                      • Clone: Duplicate an existing auto scaling policy and modify the parameter settings as required.
                      • Delete: In the dialog box displayed, click Yes.

                      Abnormal

                      +

                      CronHPA

                      InvalidTargetRange

                      -

                      Invalid target range.

                      -

                      InvalidSelector

                      -

                      Invalid selector.

                      -

                      FailedGetObjectMetric

                      -

                      Objects fail to be obtained.

                      -

                      FailedGetPodsMetric

                      -

                      Pods fail to be obtained.

                      -

                      FailedGetResourceMetric

                      -

                      Resources fail to be obtained.

                      -

                      FailedGetExternalMetric

                      -

                      External metrics fail to be obtained.

                      -

                      InvalidMetricSourceType

                      -

                      Invalid metric source type.

                      -

                      FailedConvertHPA

                      -

                      HPA conversion failed.

                      -

                      FailedGetScale

                      -

                      The scale fails to be obtained.

                      -

                      FailedComputeMetricsReplicas

                      -

                      Failed to calculate metric-defined replicas.

                      -

                      FailedGetScaleWindow

                      -

                      Failed to obtain ScaleWindow.

                      -

                      FailedRescale

                      -

                      Failed to scale the service.

                      +
                      • View YAML: In the dialog box displayed, copy or download the YAML file but you are not allowed to modify it.
                      • Delete: In the dialog box displayed, click Yes.

                      Cluster Type

                      diff --git a/docs/cce/umn/cce_10_0091.html b/docs/cce/umn/cce_10_0091.html index c3b9acf7..b7309f72 100644 --- a/docs/cce/umn/cce_10_0091.html +++ b/docs/cce/umn/cce_10_0091.html @@ -10,10 +10,10 @@ - + diff --git a/docs/cce/umn/cce_10_0094.html b/docs/cce/umn/cce_10_0094.html index ed803b10..f79eed71 100644 --- a/docs/cce/umn/cce_10_0094.html +++ b/docs/cce/umn/cce_10_0094.html @@ -3,18 +3,18 @@

                      Overview

                      Why We Need Ingresses

                      A Service is generally used to forward access requests based on TCP and UDP and provide layer-4 load balancing for clusters. However, in actual scenarios, if there is a large number of HTTP/HTTPS access requests on the application layer, the Service cannot meet the forwarding requirements. Therefore, the Kubernetes cluster provides an HTTP-based access mode, ingress.

                      An ingress is an independent resource in the Kubernetes cluster and defines rules for forwarding external access traffic. As shown in Figure 1, you can customize forwarding rules based on domain names and URLs to implement fine-grained distribution of access traffic.

                      -
                      Figure 1 Ingress diagram
                      +
                      Figure 1 Ingress diagram

                      The following describes the ingress-related definitions:

                      • Ingress object: a set of access rules that forward requests to specified Services based on domain names or URLs. It can be added, deleted, modified, and queried by calling APIs.
                      • Ingress Controller: an executor for request forwarding. It monitors the changes of resource objects such as ingresses, Services, endpoints, secrets (mainly TLS certificates and keys), nodes, and ConfigMaps in real time, parses rules defined by ingresses, and forwards requests to the target backend Services.

                      Working Rules of LoadBalancer Ingress Controller

                      LoadBalancer Ingress Controller developed by CCE implements layer-7 network access for the internet and intranet (in the same VPC) based on ELB and distributes access traffic to the corresponding Services using different URLs.

                      LoadBalancer Ingress Controller is deployed on the master node and bound to the load balancer in the VPC where the cluster resides. Different domain names, ports, and forwarding policies can be configured for the same load balancer (with the same IP address). Figure 2 shows the working rules of LoadBalancer Ingress Controller.

                      1. A user creates an ingress object and configures a traffic access rule in the ingress, including the load balancer, URL, SSL, and backend service port.
                      2. When Ingress Controller detects that the ingress object changes, it reconfigures the listener and backend server route on the ELB side according to the traffic access rule.
                      3. When a user accesses a workload, the traffic is forwarded to the corresponding backend service port based on the forwarding policy configured on ELB, and then forwarded to each associated workload through the Service.
                      -
                      Figure 2 Working rules of shared LoadBalancer ingresses in CCE standard and Turbo clusters
                      +
                      Figure 2 Working rules of shared LoadBalancer ingresses in CCE standard and Turbo clusters

                      When you use a dedicated load balancer in a CCE Turbo cluster, pod IP addresses are allocated from the VPC and the load balancer can directly access the pods. When creating an ingress for external cluster access, you can use ELB to access a ClusterIP Service and use pods as the backend server of the ELB listener. In this way, external traffic can directly access the pods in the cluster without being forwarded by node ports.

                      -
                      Figure 3 Working rules of passthrough networking for dedicated LoadBalancer ingresses in CCE Turbo clusters
                      +
                      Figure 3 Working rules of passthrough networking for dedicated LoadBalancer ingresses in CCE Turbo clusters
                      -

                      Services Supported by Ingresses

                      Table 1 lists the services supported by LoadBalancer ingresses. +

                      Services Supported by Ingresses

                      Table 1 lists the Services supported by LoadBalancer ingresses.
                      -
                      Table 1 Services supported by LoadBalancer ingresses

                      Cluster Type

                      ELB Type

                      diff --git a/docs/cce/umn/cce_10_0105.html b/docs/cce/umn/cce_10_0105.html index 940e458a..48a49b3a 100644 --- a/docs/cce/umn/cce_10_0105.html +++ b/docs/cce/umn/cce_10_0105.html @@ -132,7 +132,7 @@

                      CLI

                      Set commands to be executed in the container for pre-stop processing. The command format is Command Args[1] Args[2].... Command is a system command or a user-defined executable program. If no path is specified, an executable program in the default path will be selected. If multiple commands need to be executed, write the commands into a script for execution.

                      +

                      Set commands to be executed in the container for pre-stop processing. The command format is Command Args[1] Args[2].... Command is a system command or a user-defined executable program. If no path is specified, an executable program in the default path will be selected. If multiple commands need to be executed, write the commands into a script for execution.

                      Example command:

                      exec: 
                         command: 
                      @@ -152,7 +152,7 @@
                       
                       

                      -

                      Example YAML

                      This section uses Nginx as an example to describe how to set the container lifecycle.

                      +

                      YAML Example

                      This section uses Nginx as an example to describe how to set the container lifecycle.

                      In the following configuration file, the postStart command is defined to run the install.sh command in the /bin/bash directory. preStop is defined to run the uninstall.sh command.

                      apiVersion: apps/v1
                       kind: Deployment
                      @@ -191,7 +191,7 @@ spec:
                       
                      diff --git a/docs/cce/umn/cce_10_0107.html b/docs/cce/umn/cce_10_0107.html index d114977f..ed5382a7 100644 --- a/docs/cce/umn/cce_10_0107.html +++ b/docs/cce/umn/cce_10_0107.html @@ -6,26 +6,26 @@

                      Permissions

                      When you access a cluster using kubectl, CCE uses kubeconfig generated on the cluster for authentication. This file contains user information, based on which CCE determines which Kubernetes resources can be accessed by kubectl. The permissions recorded in a kubeconfig file vary from user to user.

                      For details about user permissions, see Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based).

                      -

                      Using kubectl

                      To connect to a Kubernetes cluster from a PC, you can use kubectl, a Kubernetes command line tool. You can log in to the CCE console and click the name of the target cluster to access the cluster console. On the Overview page, view the access address and kubectl connection procedure.

                      +

                      Using kubectl

                      To connect to a Kubernetes cluster from a PC, you can use kubectl, a Kubernetes command line tool. You can log in to the CCE console and click the name of the target cluster to access the cluster console. On the Overview page, view the access address and kubectl connection procedure.

                      CCE allows you to access a cluster through a private network or a public network.
                      • Intranet access: The client that accesses the cluster must be in the same VPC as the cluster.
                      • Public access: The client that accesses the cluster must be able to access public networks and the cluster has been bound with a public network IP.

                        To bind an EIP to the cluster, go to the Overview page and click Bind next to EIP in the Connection Information area. In a cluster with an EIP bound, kube-apiserver will be exposed to the Internet and may be attacked. To solve this problem, you can configure Advanced Anti-DDoS for the EIP of the node on which kube-apiserver runs.

                      -

                      Download kubectl and the configuration file. Copy the file to your client, and configure kubectl. After the configuration is complete, you can access your Kubernetes clusters. Procedure:

                      +

                      Download kubectl and the configuration file. Copy the file to your client, and configure kubectl. After the configuration is complete, you can access your Kubernetes clusters. The process is as follows:

                      1. Download kubectl.

                        Prepare a computer that can access the public network and install kubectl in CLI mode. You can run the kubectl version command to check whether kubectl has been installed. If kubectl has been installed, skip this step.

                        This section uses the Linux environment as an example to describe how to install and configure kubectl. For details, see Installing kubectl.

                        1. Log in to your client and download kubectl.
                          cd /home
                           curl -LO https://dl.k8s.io/release/{v1.25.0}/bin/linux/amd64/kubectl
                          -

                          {v1.25.0} specifies the version number. Replace it as required.

                          +

                          {v1.25.0} specifies the version. Replace it as required.

                        2. Install kubectl.
                          chmod +x kubectl
                           mv -f kubectl /usr/local/bin
                        -

                      2. Obtain the kubectl configuration file (kubeconfig).

                        On the Overview page, locate the Connection Info area, click Configure next to kubectl. On the page displayed, download the configuration file.

                        +

                      3. Obtain the kubectl configuration file.

                        In the Connection Info pane on the Overview page, click Configure next to kubectl to check the kubectl connection. On the displayed page, choose Intranet access or Public network access and download the configuration file.

                        • The kubectl configuration file kubeconfig is used for cluster authentication. If the file is leaked, your clusters may be attacked.
                        • The Kubernetes permissions assigned by the configuration file downloaded by IAM users are the same as those assigned to the IAM users on the CCE console.
                        • If the KUBECONFIG environment variable is configured in the Linux OS, kubectl preferentially loads the KUBECONFIG environment variable instead of $home/.kube/config.
                        -

                      4. Configure kubectl.

                        Configure kubectl (A Linux OS is used).
                        1. Log in to your client and copy the kubeconfig.yaml file downloaded in 2 to the /home directory on your client.
                        2. Configure the kubectl authentication file.
                          cd /home
                          +

                        3. Configure kubectl.

                          Configure kubectl (A Linux OS is used).
                          1. Log in to your client and copy the configuration file (for example, kubeconfig.yaml) downloaded in 2 to the /home directory on your client.
                          2. Configure the kubectl authentication file.
                            cd /home
                             mkdir -p $HOME/.kube
                            -mv -f kubeconfig.yaml $HOME/.kube/config
                            +mv -f kubeconfig.yaml $HOME/.kube/config
                    • Switch the kubectl access mode based on service scenarios.
                      • Run this command to enable intra-VPC access:
                        kubectl config use-context internal
                      • Run this command to enable public access (EIP required):
                        kubectl config use-context external
                      • Run this command to enable public access and two-way authentication (EIP required):
                        kubectl config use-context externalTLSVerify
                        @@ -36,7 +36,7 @@ mv -f kubeconfig.yaml $HOME/.kube/config

                      • Two-Way Authentication for Domain Names

                        CCE supports two-way authentication for domain names.

                        -
                        • After an EIP is bound to an API Server, two-way domain name authentication is disabled by default if kubectl is used to access the cluster. You can run kubectl config use-context externalTLSVerify to enable the two-way domain name authentication.
                        • When an EIP is bound to or unbound from a cluster, or a custom domain name is configured or updated, the cluster server certificate will be added the latest cluster access address (including the EIP bound to the cluster and all custom domain names configured for the cluster).
                        • Asynchronous cluster synchronization takes about 5 to 10 minutes. You can view the synchronization result in Synchronize Certificate in Operation Records.
                        • For a cluster that has been bound to an EIP, if the authentication fails (x509: certificate is valid) when two-way authentication is used, bind the EIP again and download kubeconfig.yaml again.
                        • If the two-way domain name authentication is not supported, kubeconfig.yaml contains the "insecure-skip-tls-verify": true field, as shown in Figure 1. To use two-way authentication, download the kubeconfig.yaml file again and enable two-way authentication for the domain names.
                          Figure 1 Two-way authentication disabled for domain names
                          +
                          • After an EIP is bound to an API Server, two-way domain name authentication is disabled by default if kubectl is used to access the cluster. You can run kubectl config use-context externalTLSVerify to enable the two-way domain name authentication.
                          • When an EIP is bound to or unbound from a cluster, or a custom domain name is configured or updated, the cluster server certificate will be added the latest cluster access address (including the EIP bound to the cluster and all custom domain names configured for the cluster).
                          • Asynchronous cluster synchronization takes about 5 to 10 minutes. You can view the synchronization result in Synchronize Certificate in Operation Records.
                          • For a cluster that has been bound to an EIP, if the authentication fails (x509: certificate is valid) when two-way authentication is used, bind the EIP again and download kubeconfig.yaml again.
                          • If the two-way domain name authentication is not supported, kubeconfig.yaml contains the "insecure-skip-tls-verify": true field, as shown in Figure 1. To use two-way authentication, download the kubeconfig.yaml file again and enable two-way authentication for the domain names.
                            Figure 1 Two-way authentication disabled for domain names

                        FAQs

                        • Error from server Forbidden

                          When you use kubectl to create or query Kubernetes resources, the following output is returned:

                          diff --git a/docs/cce/umn/cce_10_0112.html b/docs/cce/umn/cce_10_0112.html index cff0b184..9d34d3b0 100644 --- a/docs/cce/umn/cce_10_0112.html +++ b/docs/cce/umn/cce_10_0112.html @@ -12,17 +12,17 @@
                        • CLI

                          CLI is an efficient tool for health check. When using the CLI, you must specify an executable command in a container. The cluster periodically runs the command in the container. If the command output is 0, the health check is successful. Otherwise, the health check fails.

                          The CLI mode can be used to replace the HTTP request-based and TCP port-based health check.

                          • For a TCP port, you can use a program script to connect to a container port. If the connection is successful, the script returns 0. Otherwise, the script returns –1.
                          • For an HTTP request, you can use the script command to run the wget command to detect the container.

                            wget http://127.0.0.1:80/health-check

                            -

                            Check the return code of the response. If the return code is within 200–399, the script returns 0. Otherwise, the script returns –1.

                            +

                            Check the return code of the response. If the return code is within 200–399, the script returns 0. Otherwise, the script returns –1.

                            • Put the program to be executed in the container image so that the program can be executed.
                            • If the command to be executed is a shell script, do not directly specify the script as the command, but add a script parser. For example, if the script is /data/scripts/health_check.sh, you must specify sh/data/scripts/health_check.sh for command execution.
                          -
                        • gRPC Check
                          gRPC checks can configure startup, liveness, and readiness probes for your gRPC application without exposing any HTTP endpoint, nor do you need an executable. Kubernetes can connect to your workload via gRPC and obtain its status.
                          • The gRPC check is supported only in CCE clusters of v1.25 or later.
                          • To use gRPC for check, your application must support the gRPC health checking protocol.
                          • Similar to HTTP and TCP probes, if the port is incorrect or the application does not support the health checking protocol, the check fails.
                          +
                        • gRPC check
                          gRPC checks can configure startup, liveness, and readiness probes for your gRPC application without exposing any HTTP endpoint, nor do you need an executable. Kubernetes can connect to your workload via gRPC and obtain its status.
                          • The gRPC check is supported only in CCE clusters of v1.25 or later.
                          • To use gRPC for check, your application must support the gRPC health checking protocol.
                          • Similar to HTTP and TCP probes, if the port is incorrect or the application does not support the health checking protocol, the check fails.

                        Common Parameters

                        -
                        Table 1 Common parameter description

                        Parameter

                        +
                        @@ -94,7 +94,7 @@ spec: periodSeconds: 5 startupProbe: # Startup probe httpGet: # Checking an HTTP request is used as an example. - path: /healthz # The HTTP check path is /healthz. + path: /healthz # The HTTP check path is /healthz. port: 80 # The check port number is 80. failureThreshold: 30 periodSeconds: 10 @@ -102,7 +102,7 @@ spec:
                        diff --git a/docs/cce/umn/cce_10_0113.html b/docs/cce/umn/cce_10_0113.html index 9e4e14fa..c12190b4 100644 --- a/docs/cce/umn/cce_10_0113.html +++ b/docs/cce/umn/cce_10_0113.html @@ -9,7 +9,7 @@

                        Environment variables can be set in the following modes:

                        • Custom: Enter the environment variable name and parameter value.
                        • Added from ConfigMap key: Import all keys in a ConfigMap as environment variables.
                        • Added from ConfigMap: Import a key in a ConfigMap as the value of an environment variable. As shown in Figure 1, if you import configmap_value of configmap_key in configmap-example as the value of environment variable key1, an environment variable named key1 whose value is configmap_value is available in the container.
                        • Added from secret: Import all keys in a secret as environment variables.
                        • Added from secret key: Import the value of a key in a secret as the value of an environment variable. As shown in Figure 1, if you import secret_value of secret_key in secret-example as the value of environment variable key2, an environment variable named key2 whose value is secret_value is available in the container.
                        • Variable value/reference: Use the field defined by a pod as the value of the environment variable. As shown in Figure 1, if the pod name is imported as the value of environment variable key3, an environment variable named key3 whose value is the pod name is available in the container.
                        • Resource Reference: The value of Request or Limit defined by the container is used as the value of the environment variable. As shown in Figure 1, if you import the CPU limit of container-1 as the value of environment variable key4, an environment variable named key4 whose value is the CPU limit of container-1 is available in the container.
                        -

                        Adding Environment Variables

                        1. Log in to the CCE console.
                        2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
                        3. When creating a workload, modify the container information in Container Settings and click the Environment Variables tab.
                        4. Configure environment variables.

                          Figure 1 Configuring environment variables
                          +

                          Adding Environment Variables

                          1. Log in to the CCE console.
                          2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
                          3. When creating a workload, modify the container information in Container Settings and click the Environment Variables tab.
                          4. Configure environment variables.

                            Figure 1 Configuring environment variables

                          YAML Example

                          apiVersion: apps/v1
                          @@ -102,7 +102,7 @@ secret_key=secret_value               # Added from key. The key value in the ori
                           
                          diff --git a/docs/cce/umn/cce_10_0125.html b/docs/cce/umn/cce_10_0125.html index fcc89d8c..b2842e63 100644 --- a/docs/cce/umn/cce_10_0125.html +++ b/docs/cce/umn/cce_10_0125.html @@ -11,7 +11,9 @@
                        5. - + diff --git a/docs/cce/umn/cce_10_0127.html b/docs/cce/umn/cce_10_0127.html index 6ecef4b7..8196a3ec 100644 --- a/docs/cce/umn/cce_10_0127.html +++ b/docs/cce/umn/cce_10_0127.html @@ -4,7 +4,7 @@

                          Introduction

                          CCE Container Storage (FlexVolume), also called storage-driver, functions as a standard Kubernetes FlexVolume plugin to allow containers to use EVS, SFS, OBS, and SFS Turbo storage resources. By installing and upgrading storage-driver, you can quickly install and update cloud storage capabilities.

                          FlexVolume is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.13 or earlier is created.

                          -

                          Constraints

                          • For clusters created in CCE, Kubernetes v1.15.11 is a transitional version in which the FlexVolume add-on is compatible with the CSI add-on (Everest). Clusters of v1.17 and later versions do not support FlexVolume anymore. Use the Everest add-on.
                          • The FlexVolume add-on will be maintained by Kubernetes developers, but new functionality will only be added to Everest. Do not create CCE storage that uses the FlexVolume add-on (storage-driver) anymore. Otherwise, storage may malfunction.
                          • This add-on can be installed only in clusters of v1.13 or earlier. By default, the Everest add-on is installed when clusters of v1.15 or later are created.

                            In a cluster of v1.13 or earlier, when an upgrade or bug fix is available for storage functionalities, you only need to install or upgrade the storage-driver add-on. Upgrading the cluster or creating a cluster is not required.

                            +

                            Notes and Constraints

                            • For clusters created in CCE, Kubernetes v1.15.11 is a transitional version in which the FlexVolume add-on is compatible with the CSI add-on (Everest). Clusters of v1.17 and later versions do not support FlexVolume anymore. Use the Everest add-on.
                            • The FlexVolume add-on will be maintained by Kubernetes developers, but new functionality will only be added to Everest. Do not create CCE storage that uses the FlexVolume add-on (storage-driver) anymore. Otherwise, storage may malfunction.
                            • This add-on can be installed only in clusters of v1.13 or earlier. By default, the Everest add-on is installed when clusters of v1.15 or later are created.

                              In a cluster of v1.13 or earlier, when an upgrade or bug fix is available for storage functionalities, you only need to install or upgrade the storage-driver add-on. Upgrading the cluster or creating a cluster is not required.

                            @@ -15,7 +15,7 @@
                            diff --git a/docs/cce/umn/cce_10_0129.html b/docs/cce/umn/cce_10_0129.html index 1d2e8bb0..ab290b79 100644 --- a/docs/cce/umn/cce_10_0129.html +++ b/docs/cce/umn/cce_10_0129.html @@ -10,11 +10,11 @@

                            For details, see DNS.

                            -

                            Constraints

                            To run CoreDNS properly or upgrade CoreDNS in a cluster, ensure the number of available nodes in the cluster is greater than or equal to the number of CoreDNS instances and all CoreDNS instances are running. Otherwise, the add-on will malfunction or the upgrade will fail.

                            +

                            Notes and Constraints

                            To run CoreDNS properly or upgrade CoreDNS in a cluster, ensure the number of available nodes in the cluster is greater than or equal to the number of CoreDNS instances and all CoreDNS instances are running. Otherwise, the add-on will malfunction or the upgrade will fail.

                            Installing the Add-on

                            This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

                            1. Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane, locate CoreDNS on the right, and click Install.
                            2. On the Install Add-on page, configure the specifications.

                              -

                        Table 1 Common parameters

                        Parameter

                        Description

                        @@ -101,12 +101,12 @@

                      • Click Install.

                        On the Releases tab page, you can view the installation status of the release.

                      • -

                        Upgrading a Chart-based Workload

                        1. Log in to the CCE console and click the cluster name to access the cluster console. Choose App Templates in the navigation pane and click the Releases tab.
                        2. Click Upgrade in the row where the desired workload resides and set the parameters for the workload.
                        3. Select a chart version for Chart Version.
                        4. Follow the prompts to modify the chart parameters. Confirm the modification and click Upgrade.
                        5. If the execution status is Upgraded, the workload has been upgraded.
                        +

                        Upgrading a Chart-based Workload

                        1. Log in to the CCE console and click the cluster name to access the cluster console. Choose App Templates in the navigation pane. In the right pane, click the Releases tab.
                        2. Click Upgrade in the row where the desired workload resides and set the parameters for the workload.
                        3. Select a chart version for Chart Version.
                        4. Follow the prompts to modify the chart parameters. Confirm the modification and click Upgrade.
                        5. If the execution status is Upgraded, the workload has been upgraded.
                        -

                        Rolling Back a Chart-based Workload

                        1. Log in to the CCE console and click the cluster name to access the cluster console. Choose App Templates in the navigation pane and click the Releases tab.
                        2. Click More > Roll Back for the workload to be rolled back, select the workload version, and click Roll back to this version.

                          In the workload list, if the status is Rollback successful, the workload is rolled back successfully.

                          +

                          Rolling Back a Chart-based Workload

                          1. Log in to the CCE console and click the cluster name to access the cluster console. Choose App Templates in the navigation pane. In the right pane, click the Releases tab.
                          2. Click More > Roll Back for the workload to be rolled back, select the workload version, and click Roll back to this version.

                            In the workload list, if the status is Rollback successful, the workload is rolled back successfully.

                          -

                          Uninstalling a Chart-based Workload

                          1. Log in to the CCE console and click the cluster name to access the cluster console. Choose App Templates in the navigation pane and click the Releases tab.
                          2. Click More > Uninstall next to the release to be uninstalled, and click Yes. Exercise caution when performing this operation because releases cannot be restored after being uninstalled.
                          +

                          Uninstalling a Chart-based Workload

                          1. Log in to the CCE console and click the cluster name to access the cluster console. Choose App Templates in the navigation pane. In the right pane, click the Releases tab.
                          2. Click More > Uninstall next to the release to be uninstalled, and click Yes. Exercise caution when performing this operation because releases cannot be restored after being uninstalled.
                          diff --git a/docs/cce/umn/cce_10_0150.html b/docs/cce/umn/cce_10_0150.html index 0b5ce823..85e4ad88 100644 --- a/docs/cce/umn/cce_10_0150.html +++ b/docs/cce/umn/cce_10_0150.html @@ -8,7 +8,7 @@

                          Prerequisites

                          Resources have been created. For details, see Creating a Node. If clusters and nodes are available, you need not create them again.

                          -

                          Using the CCE Console

                          1. Log in to the CCE console.
                          2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
                          3. Set basic information about the workload.

                            Basic Info
                            • Workload Type: Select Job. For details about workload types, see Overview.
                            • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
                            • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
                            • Pods: Enter the number of pods of the workload.
                            • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Kata Runtime and Common Runtime.
                            +

                            Using the CCE Console

                            1. Log in to the CCE console.
                            2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
                            3. Set basic information about the workload.

                              Basic Info
                              • Workload Type: Select Job. For details about workload types, see Overview.
                              • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
                              • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
                              • Pods: Enter the number of pods of the workload.
                              • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Secure Runtime and Common Runtime.
                              Container Settings
                              • Container Information
                                Multiple containers can be configured in a pod. You can click Add Container on the right to configure multiple containers for the pod.
                                • Basic Info: Configure basic information about the container.
                        Table 1 CoreDNS parameters

                        Parameter

                        +
                        @@ -23,7 +23,7 @@ - @@ -307,7 +303,7 @@ $configBlock @@ -345,12 +341,176 @@ $configBlock
                        1. The query is first sent to the DNS caching layer in CoreDNS.
                        2. From the caching layer, the suffix of the request is examined and then the request is forwarded to the corresponding DNS:
                          • Names with the cluster suffix, for example, .cluster.local: The request is sent to CoreDNS.
                          • Names with the stub domain suffix, for example, .acme.local: The request is sent to the configured custom DNS resolver that listens, for example, on 1.2.3.4.
                          • Names that do not match the suffix (for example, widget.com): The request is forwarded to the upstream DNS.
                        -
                        Figure 1 Routing
                        +
                        Figure 1 Routing
                        + +

                        Change History

                        +
                        Table 1 Add-on configuration

                        Parameter

                        Description

                        Pods

                        Number of pods for the add-on.

                        -

                        High availability is not possible with a single add-on pod. If an error occurs on the node where the add-on instance runs, the add-on will fail.

                        +

                        High availability is not possible with a single pod. If an error occurs on the node where the add-on instance runs, the add-on will fail.

                        Containers

                        @@ -129,14 +129,17 @@

                        Advance Config

                        • parameterSyncStrategy: indicates whether to configure consistency check when the add-on is upgraded.
                          • ensureConsistent: indicates that the configuration consistency check is enabled. If the configuration recorded in the cluster is inconsistent with the actual configuration, the add-on cannot be upgraded.
                          • force: indicates that the configuration consistency check is ignored during an upgrade. In this case, you must ensure that the current effective configuration is the same as the original configuration. After the add-on is upgraded, restore the value of parameterSyncStrategy to ensureConsistent to enable the configuration consistency check again.
                          • inherit: indicates that custom settings are automatically inherited during an upgrade. After the add-on is upgraded, restore the value of parameterSyncStrategy to ensureConsistent to enable the configuration consistency check again.
                          -
                        • stub_domains: sub domains, which allow you to configure a domain name server for a custom domain name. A sub domain is in the format of a key-value pair, where the key is the suffix of a DNS domain name and the value is one or more DNS IP addresses.
                        • upstream_nameservers: IP address of the upstream DNS server.
                        • servers: nameservers, which are available in CoreDNS v1.23.1 and later versions. You can customize nameservers. For details, see dns-custom-nameservers.
                          plugins indicates the configuration of each component in CoreDNS. Retain the default settings typically to prevent CoreDNS from being unavailable due to configuration errors. Each plugin component contains name, parameters (optional), and configBlock (optional). The format of the generated Corefile is as follows:
                          $name  $parameters {
                          +
                        • parameterSyncStrategy: indicates whether to configure consistency check when the add-on is upgraded.
                          • ensureConsistent: indicates that the configuration consistency check is enabled. If the configuration recorded in the cluster is inconsistent with the actual configuration, the add-on cannot be upgraded.
                          • force: indicates that the configuration consistency check is ignored during an upgrade. In this case, you must ensure that the current effective configuration is the same as the original configuration. After the add-on is upgraded, restore the value of parameterSyncStrategy to ensureConsistent to enable the configuration consistency check again.
                          • inherit: indicates that custom settings are automatically inherited during an upgrade. After the add-on is upgraded, the value of parameterSyncStrategy is automatically restored to ensureConsistent to enable the configuration consistency check again.
                          +
                        • servers: nameservers, which are available in CoreDNS v1.23.1 and later versions. You can customize nameservers. For details, see dns-custom-nameservers.
                          plugins indicates the configuration of each component in CoreDNS. Retain the default settings typically to prevent CoreDNS from being unavailable due to configuration errors. Each plugin component contains name, parameters (optional), and configBlock (optional). The format of the generated Corefile is as follows:
                          $name  $parameters {
                           $configBlock
                           }

                          Table 4 describes common plugins. For details, see Plugins.

                          -

                          Example:

                          +
                        • upstream_nameservers: specifies the IP address of the upstream DNS server.
                        +

                        Example of advanced configurations:

                        {
                        +     "annotations": {},
                        +     "parameterSyncStrategy": "ensureConsistent",
                              "servers": [
                         		   {
                         			"plugins": [
                        @@ -157,7 +160,7 @@ $configBlock
                         				},
                                                         {
                         					"name": "ready",
                        -					"{$POD_IP}:8081"
                        +					"parameters": "{$POD_IP}:8081"
                         				},
                         				{
                         					"configBlock": "pods insecure\nfallthrough in-addr.arpa ip6.arpa",
                        @@ -189,15 +192,8 @@ $configBlock
                         			]
                         		}
                         	],
                        -	"stub_domains": {
                        -		"acme.local": [
                        -			"1.2.3.4",
                        -			"6.7.8.9"
                        -		]
                        -	},
                         	"upstream_nameservers": ["8.8.8.8", "8.8.4.4"]
                         }
                        -

                        Using both taints and tolerations allows (not forcibly) the add-on Deployment to be scheduled to a node with the matching taints, and controls the Deployment eviction policies after the node where the Deployment is located is tainted.

                        The add-on adds the default tolerance policy for the node.kubernetes.io/not-ready and node.kubernetes.io/unreachable taints, respectively. The tolerance time window is 60s.

                        -

                        For details, see Taints and Tolerations.

                        +

                        For details, see Configuring Tolerance Policies.

                        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                        Table 7 Release history

                        Add-on Version

                        +

                        Supported Cluster Version

                        +

                        New Feature

                        +

                        Community Version

                        +

                        1.29.4

                        +

                        v1.21

                        +

                        v1.23

                        +

                        v1.25

                        +

                        v1.27

                        +

                        v1.28

                        +

                        v1.29

                        +

                        CCE clusters 1.29 are supported.

                        +

                        1.10.1

                        +

                        1.28.7

                        +

                        v1.21

                        +

                        v1.23

                        +

                        v1.25

                        +

                        v1.27

                        +

                        v1.28

                        +

                        Supported hot module replacement. Rolling upgrade is not required.

                        +

                        1.10.1

                        +

                        1.28.5

                        +

                        v1.21

                        +

                        v1.23

                        +

                        v1.25

                        +

                        v1.27

                        +

                        v1.28

                        +

                        Fixed some issues.

                        +

                        1.10.1

                        +

                        1.28.4

                        +

                        v1.21

                        +

                        v1.23

                        +

                        v1.25

                        +

                        v1.27

                        +

                        v1.28

                        +

                        CCE clusters 1.28 are supported.

                        +

                        1.10.1

                        +

                        1.27.4

                        +

                        v1.19

                        +

                        v1.21

                        +

                        v1.23

                        +

                        v1.25

                        +

                        v1.27

                        +

                        None

                        +

                        1.10.1

                        +

                        1.25.11

                        +

                        v1.19

                        +

                        v1.21

                        +

                        v1.23

                        +

                        v1.25

                        +
                        • Supported anti-affinity scheduling of add-on pods on nodes in different AZs.
                        • Upgrades to its community version 1.10.1.
                        +

                        1.10.1

                        +

                        1.25.1

                        +

                        v1.19

                        +

                        v1.21

                        +

                        v1.23

                        +

                        v1.25

                        +

                        CCE clusters 1.25 are supported.

                        +

                        1.8.4

                        +

                        1.23.3

                        +

                        v1.15

                        +

                        v1.17

                        +

                        v1.19

                        +

                        v1.21

                        +

                        v1.23

                        +

                        Regular upgrade of add-on dependencies

                        +

                        1.8.4

                        +

                        1.23.1

                        +

                        v1.15

                        +

                        v1.17

                        +

                        v1.19

                        +

                        v1.21

                        +

                        v1.23

                        +

                        CCE clusters 1.23 are supported.

                        +

                        1.8.4

                        +

                        1.17.15

                        +

                        v1.15

                        +

                        v1.17

                        +

                        v1.19

                        +

                        v1.21

                        +

                        CCE clusters 1.21 are supported.

                        +

                        1.8.4

                        +

                        1.17.9

                        +

                        v1.15

                        +

                        v1.17

                        +

                        v1.19

                        +

                        Regular upgrade of add-on dependencies

                        +

                        1.8.4

                        +

                        1.17.4

                        +

                        v1.17

                        +

                        v1.19

                        +

                        CCE clusters 1.19 are supported.

                        +

                        1.6.5

                        +
                        +
                        diff --git a/docs/cce/umn/cce_10_0130.html b/docs/cce/umn/cce_10_0130.html index 50d0cb01..3f68b032 100644 --- a/docs/cce/umn/cce_10_0130.html +++ b/docs/cce/umn/cce_10_0130.html @@ -1,9 +1,11 @@ -

                        Configuring a Container

                        +

                        Configuring a Workload

                        diff --git a/docs/cce/umn/cce_10_0132.html b/docs/cce/umn/cce_10_0132.html index 428f5016..998f9ae0 100644 --- a/docs/cce/umn/cce_10_0132.html +++ b/docs/cce/umn/cce_10_0132.html @@ -4,14 +4,14 @@

                        Introduction

                        CCE node problem detector (NPD) is an add-on that monitors abnormal events of cluster nodes and connects to a third-party monitoring platform. It is a daemon running on each node. It collects node issues from different daemons and reports them to the API server. The NPD add-on can run as a DaemonSet or a daemon.

                        For more information, see node-problem-detector.

                        -

                        Constraints

                        • When using this add-on, do not format or partition node disks.
                        • Each NPD process occupies 30 m CPU and 100 MB memory.
                        • If the NPD version is 1.18.45 or later, the EulerOS version of the host machine must be 2.5 or later.
                        +

                        Notes and Constraints

                        • When using this add-on, do not format or partition node disks.
                        • Each NPD process occupies 30m CPUs and 100 MiB memory.
                        • If the NPD version is 1.18.45 or later, the EulerOS version of the host machine must be 2.5 or later.

                        Permissions

                        To monitor kernel logs, the NPD add-on needs to read the host /dev/kmsg. Therefore, the privileged mode must be enabled. For details, see privileged.

                        In addition, CCE mitigates risks according to the least privilege principle. Only the following privileges are available for NPD running:

                        • cap_dac_read_search: permission to access /run/log/journal.
                        • cap_sys_admin: permission to access /dev/kmsg.

                        Installing the Add-on

                        1. Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane, locate CCE Node Problem Detector on the right, and click Install.
                        2. On the Install Add-on page, configure the specifications.

                          -

                          Table 1 NPD configuration

                          Parameter

                          +
                          @@ -25,6 +25,7 @@ - @@ -91,7 +92,7 @@ @@ -155,7 +156,7 @@

                          Typical scenario: Disk I/O suspension causes process suspension.

                          @@ -167,7 +168,7 @@ @@ -208,6 +209,8 @@
                          Table 1 Add-on configuration

                          Parameter

                          Description

                          Pods

                          If you select Custom, you can adjust the number of pods as required.

                          +

                          High availability is not possible with a single pod. If an error occurs on the node where the add-on instance runs, the add-on will fail.

                          Containers

                          @@ -55,7 +56,7 @@

                          npc.maxTaintedNode

                          The maximum number of nodes that NPC can add taints to when a single fault occurs on multiple nodes for minimizing impact.

                          +

                          The maximum number of nodes that NPC can add taints to when an individual fault occurs on multiple nodes for minimizing impact.

                          The value can be in int or percentage format.

                          Using both taints and tolerations allows (not forcibly) the add-on Deployment to be scheduled to a node with the matching taints, and controls the Deployment eviction policies after the node where the Deployment is located is tainted.

                          The add-on adds the default tolerance policy for the node.kubernetes.io/not-ready and node.kubernetes.io/unreachable taints, respectively. The tolerance time window is 60s.

                          -

                          For details, see Taints and Tolerations.

                          +

                          For details, see Configuring Tolerance Policies.

                          Warning event

                          -

                          Listening object: /dev/kmsg

                          +

                          Listening object: /dev/kmsg

                          Matching rule: "task \\S+:\\w+ blocked for more than \\w+ seconds\\."

                          Warning event

                          -

                          Listening object: /dev/kmsg

                          +

                          Listening object: /dev/kmsg

                          Matching rule: Remounting filesystem read-only

                          • Default threshold: 10 restarts within 10 minutes

                            If Kubelet restarts for 10 times within 10 minutes, it indicates that the system restarts frequently and a fault alarm is generated.

                          • Listening object: logs in the /run/log/journal directory
                          +
                          NOTE:

                          The Ubuntu and HCE 2.0 OSs do not support the preceding check items due to incompatible log formats.

                          +

                          Frequent restarts of Docker

                          @@ -520,6 +523,9 @@
                          +

                          Viewing NPD Events

                          Events reported by the NPD add-on can be queried on the Nodes page.

                          +
                          1. Log in to the CCE console.
                          2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane.
                          3. Locate the row that contains the target node, and click View Events.
                          +

                          Collecting Prometheus Metrics

                          The NPD daemon pod exposes Prometheus metric data on port 19901. By default, the NPD pod is added with the annotation metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"prometheus","path":"/metrics","port":"19901","names":""}]'. You can build a Prometheus collector to identify and obtain NPD metrics from http://{{NpdPodIP}}:{{NpdPodPort}}/metrics.

                          If the NPD add-on version is earlier than 1.16.5, the exposed port of Prometheus metrics is 20257.

                          @@ -538,10 +544,104 @@ problem_gauge{reason="CRIIsDown",type="CRIProblem"} 0 problem_gauge{reason="CRIIsUp",type="CRIProblem"} 0 ..
                          +

                          Change History

                          +
                          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                          Table 12 Release history

                          Add-on Version

                          +

                          Supported Cluster Version

                          +

                          New Feature

                          +

                          Community Version

                          +

                          1.19.1

                          +

                          v1.21

                          +

                          v1.23

                          +

                          v1.25

                          +

                          v1.27

                          +

                          v1.28

                          +

                          v1.29

                          +

                          Fixed some issues.

                          +

                          0.8.10

                          +

                          1.19.0

                          +

                          v1.21

                          +

                          v1.23

                          +

                          v1.25

                          +

                          v1.27

                          +

                          v1.28

                          +

                          Fixed some issues.

                          +

                          0.8.10

                          +

                          1.18.48

                          +

                          v1.21

                          +

                          v1.23

                          +

                          v1.25

                          +

                          v1.27

                          +

                          v1.28

                          +

                          Fixed some issues.

                          +

                          0.8.10

                          +

                          1.18.46

                          +

                          v1.21

                          +

                          v1.23

                          +

                          v1.25

                          +

                          v1.27

                          +

                          v1.28

                          +

                          CCE clusters 1.28 are supported.

                          +

                          0.8.10

                          +

                          1.18.22

                          +

                          v1.19

                          +

                          v1.21

                          +

                          v1.23

                          +

                          v1.25

                          +

                          v1.27

                          +

                          None

                          +

                          0.8.10

                          +

                          1.17.4

                          +

                          v1.17

                          +

                          v1.19

                          +

                          v1.21

                          +

                          v1.23

                          +

                          v1.25

                          +

                          Optimizes DiskHung check item.

                          +

                          0.8.10

                          +
                          +
                          +
                          diff --git a/docs/cce/umn/cce_10_0140.html b/docs/cce/umn/cce_10_0140.html index 8218a116..6efa89ee 100644 --- a/docs/cce/umn/cce_10_0140.html +++ b/docs/cce/umn/cce_10_0140.html @@ -6,10 +6,12 @@
                        +

                        Change History

                        +
                        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                        Table 2 Release history

                        Add-on Version

                        +

                        Supported Cluster Version

                        +

                        New Feature

                        +

                        2.6.4

                        +

                        v1.28

                        +

                        v1.29

                        +

                        Updated the isolation logic of GPU cards.

                        +

                        2.6.1

                        +

                        v1.28

                        +

                        v1.29

                        +

                        Upgraded the base images of the add-on.

                        +

                        2.5.6

                        +

                        v1.28

                        +

                        Fixed an issue that occurred during the installation of the driver.

                        +

                        2.5.4

                        +

                        v1.28

                        +

                        Clusters 1.28 are supported.

                        +

                        2.0.69

                        +

                        v1.21

                        +

                        v1.23

                        +

                        v1.25

                        +

                        v1.27

                        +

                        Upgraded the base images of the add-on.

                        +

                        2.0.48

                        +

                        v1.21

                        +

                        v1.23

                        +

                        v1.25

                        +

                        v1.27

                        +

                        Fixed an issue that occurred during the installation of the driver.

                        +

                        2.0.46

                        +

                        v1.21

                        +

                        v1.23

                        +

                        v1.25

                        +

                        v1.27

                        +
                        • Supported Nvidia driver 535.
                        • Non-root users can use xGPUs.
                        • Optimized startup logic.
                        +

                        1.2.28

                        +

                        v1.19

                        +

                        v1.21

                        +

                        v1.23

                        +

                        v1.25

                        +
                        • Adapted to Ubuntu 22.04.
                        • Optimized the automatic mounting of the GPU driver directory.
                        +

                        1.2.20

                        +

                        v1.19

                        +

                        v1.21

                        +

                        v1.23

                        +

                        v1.25

                        +

                        Set the add-on alias to gpu.

                        +

                        1.2.15

                        +

                        v1.15

                        +

                        v1.17

                        +

                        v1.19

                        +

                        v1.21

                        +

                        v1.23

                        +

                        CCE clusters 1.23 are supported.

                        +

                        1.2.9

                        +

                        v1.15

                        +

                        v1.17

                        +

                        v1.19

                        +

                        v1.21

                        +

                        CCE clusters 1.21 are supported.

                        +

                        1.2.2

                        +

                        v1.15

                        +

                        v1.17

                        +

                        v1.19

                        +

                        Supported the new EulerOS kernel.

                        +
                        +
                        +
                        diff --git a/docs/cce/umn/cce_10_0142.html b/docs/cce/umn/cce_10_0142.html index b85c2884..a9a52529 100644 --- a/docs/cce/umn/cce_10_0142.html +++ b/docs/cce/umn/cce_10_0142.html @@ -2,17 +2,17 @@

                        NodePort

                        Scenario

                        A Service is exposed on each node's IP address at a static port (NodePort). When you create a NodePort Service, Kubernetes automatically allocates an internal IP address (ClusterIP) of the cluster. When clients outside the cluster access <NodeIP>:<NodePort>, the traffic will be forwarded to the target pod through the ClusterIP of the NodePort Service.

                        -
                        Figure 1 NodePort access
                        +
                        Figure 1 NodePort access
                        -

                        Constraints

                        • By default, a NodePort Service is accessed within a VPC. To use an EIP to access a NodePort Service through public networks, bind an EIP to the node in the cluster in advance.
                        • After a Service is created, if the affinity setting is switched from the cluster level to the node level, the connection tracing table will not be cleared. Do not modify the Service affinity setting after the Service is created. To modify it, create a Service again.
                        • CCE Turbo clusters support only cluster-level service affinity.
                        • In VPC network mode, when container A is published through a NodePort service and the service affinity is set to the node level (that is, externalTrafficPolicy is set to local), container B deployed on the same node cannot access container A through the node IP address and NodePort service.
                        • When a NodePort service is created in a cluster of v1.21.7 or later, the port on the node is not displayed using netstat by default. If the cluster forwarding mode is iptables, run the iptables -t nat -L command to view the port. If the cluster forwarding mode is IPVS, run the ipvsadm -Ln command to view the port.
                        +

                        Notes and Constraints

                        • By default, a NodePort Service is accessed within a VPC. To use an EIP to access a NodePort Service through public networks, bind an EIP to the node in the cluster in advance.
                        • After a Service is created, if the affinity setting is switched from the cluster level to the node level, the connection tracing table will not be cleared. Do not modify the Service affinity setting after the Service is created. To modify it, create a Service again.
                        • In a CCE Turbo cluster, node-level affinity is supported only when the Service backend is connected to a HostNetwork pod.
                        • In VPC network mode, when container A is published through a NodePort service and the service affinity is set to the node level (that is, externalTrafficPolicy is set to local), container B deployed on the same node cannot access container A through the node IP address and NodePort service.
                        • When a NodePort service is created in a cluster of v1.21.7 or later, the port on the node is not displayed using netstat by default. If the cluster forwarding mode is iptables, run the iptables -t nat -L command to view the port. If the cluster forwarding mode is IPVS, run the ipvsadm -Ln command to view the port.
                        -

                        Creating a NodePort Service

                        1. Log in to the CCE console and click the cluster name to access the cluster console.
                        2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
                        3. Configure intra-cluster access parameters.

                          • Service Name: Specify a Service name, which can be the same as the workload name.
                          • Service Type: Select NodePort.
                          • Namespace: Namespace to which the workload belongs.
                          • Service Affinity: For details, see externalTrafficPolicy (Service Affinity).
                            • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
                            • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
                            -
                          • Selector: Add a label and click Confirm. A Service selects a pod based on the added label. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
                          • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
                          • Port Settings
                            • Protocol: protocol used by the Service.
                            • Service Port: port used by the Service. The port number ranges from 1 to 65535.
                            • Container Port: port on which the workload listens. For example, Nginx uses port 80 by default.
                            • Node Port: You are advised to select Auto. You can also specify a port. The default port ranges from 30000 to 32767.
                            +

                            Creating a NodePort Service

                            1. Log in to the CCE console and click the cluster name to access the cluster console.
                            2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
                            3. Configure intra-cluster access parameters.

                              • Service Name: Specify a Service name, which can be the same as the workload name.
                              • Service Type: Select NodePort.
                              • Namespace: namespace that the workload belongs to.
                              • Service Affinity: For details, see externalTrafficPolicy (Service Affinity).
                                • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
                                • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
                                +
                              • Selector: Add a label and click Confirm. The Service will use this label to select pods. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
                              • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
                              • Ports
                                • Protocol: protocol used by the Service.
                                • Service Port: port used by the Service. The port number ranges from 1 to 65535.
                                • Container Port: listener port of the workload. For example, Nginx uses port 80 by default.
                                • Node Port: You are advised to select Auto. You can also specify a port. The default port ranges from 30000 to 32767.

                            4. Click OK.
                            -

                            Using kubectl

                            You can run kubectl commands to set the access type. This section uses an Nginx workload as an example to describe how to set a NodePort Service using kubectl.

                            -
                            1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                            2. Create and edit the nginx-deployment.yaml and nginx-nodeport-svc.yaml files.

                              The file names are user-defined. nginx-deployment.yaml and nginx-nodeport-svc.yaml are merely example file names.

                              +

                              Using kubectl

                              You can configure Service access using kubectl. This section uses an Nginx workload as an example to describe how to configure a NodePort Service using kubectl.

                              +
                              1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                              2. Create and edit the nginx-deployment.yaml and nginx-nodeport-svc.yaml files.

                                The file names are user-defined. nginx-deployment.yaml and nginx-nodeport-svc.yaml are merely example file names.

                                vi nginx-deployment.yaml

                                apiVersion: apps/v1
                                 kind: Deployment
                                diff --git a/docs/cce/umn/cce_10_0146.html b/docs/cce/umn/cce_10_0146.html
                                index 73385ff3..30069f28 100644
                                --- a/docs/cce/umn/cce_10_0146.html
                                +++ b/docs/cce/umn/cce_10_0146.html
                                @@ -2,7 +2,7 @@
                                 
                                 

                                Deploying an Application from a Chart

                                On the CCE console, you can upload a Helm chart package, deploy it, and manage the deployed pods.

                                -

                                Constraints

                                • The number of charts that can be uploaded by a single user is limited. The value displayed on the console of each region is the allowed quantity.
                                • A chart with multiple versions consumes the same amount of portion of chart quota.
                                • Users with chart operation permissions can perform multiple operations on clusters. Therefore, exercise caution when assigning users the chart lifecycle management permissions, including uploading charts and creating, deleting, and updating chart releases.
                                +

                                Notes and Constraints

                                • The number of charts that can be uploaded by a single user is limited. The value displayed on the console of each region is the allowed quantity.
                                • A chart with multiple versions consumes the same amount of portion of chart quota.
                                • Users with chart operation permissions can perform multiple operations on clusters. Therefore, exercise caution when assigning users the chart lifecycle management permissions, including uploading charts and creating, deleting, and updating chart releases.

                                Chart Specifications

                                The Redis workload is used as an example to illustrate the chart specifications.

                                • Naming Requirement

                                  A chart package is named in the format of {name}-{version}.tgz, where {version} indicates the version number in the format of Major version number.Minor version number.Revision number, for example, redis-0.4.2.tgz.

                                  @@ -33,7 +33,7 @@

                        Describes configuration parameters required by templates.

                        NOTICE:

                        Make sure that the image address set in the values.yaml file is the same as the image address in the container image repository. Otherwise, an exception occurs when you create a workload, and the system displays a message indicating that the image fails to be pulled.

                        -

                        To obtain the image address, perform the following operations: Log in to the CCE console. In the navigation pane, choose Image Repository to access the SWR console. Choose My Images > Private Images and click the name of the uploaded image. On the Image Tags tab page, obtain the image address from the pull command. You can click to copy the command in the Image Pull Command column.

                        +

                        To obtain the image address, perform the following operations: Log in to the CCE console. In the navigation pane, choose Image Repository to access the SWR console. Choose My Images > Private Images and click the name of the uploaded image. On the Image Tags tab page, obtain the image address from the pull command. You can click to copy the command in the Image Pull Command column.

                        Parameter

                        @@ -80,7 +80,7 @@
                      • Image Access Credential: Select the credential used for accessing the image repository. The default value is default-secret. You can use default-secret to access images in SWR. For details about default-secret, see default-secret.
                      • (Optional) GPU: All is selected by default. The workload instance will be scheduled to the node of the specified GPU type.
                      • -
                        (Optional) Advanced Settings
                        • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
                        +
                        (Optional) Advanced Settings
                        • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Configuring Labels and Annotations.
                        • Job Settings
                          • Parallel Pods: Maximum number of pods that can run in parallel during job execution. The value cannot be greater than the total number of pods in the job.
                          • Timeout (s): Once a job reaches this time, the job status becomes failed and all pods in this job will be deleted. If you leave this parameter blank, the job will never time out.
                          • Completion Mode
                            • Non-indexed: A job is considered complete when all the pods are successfully executed. Each pod completion is homologous to each other.
                            • Indexed: Each pod gets an associated completion index from 0 to the number of pods minus 1. The job is considered complete when every pod allocated with an index is successfully executed. For an indexed job, pods are named in the format of $(job-name)-$(index).
                          • Suspend Job: By default, a job is executed immediately after being created. The job's execution will be suspended if you enable this option, and resumed after you disable it.
                        • Network Configuration diff --git a/docs/cce/umn/cce_10_0151.html b/docs/cce/umn/cce_10_0151.html index 366a17a4..e94596bd 100644 --- a/docs/cce/umn/cce_10_0151.html +++ b/docs/cce/umn/cce_10_0151.html @@ -9,7 +9,7 @@

                        Prerequisites

                        Resources have been created. For details, see Creating a Node.

                        -

                        Using the CCE Console

                        1. Log in to the CCE console.
                        2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
                        3. Set basic information about the workload.

                          Basic Info
                          • Workload Type: Select Cron Job. For details about workload types, see Overview.
                          • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
                          • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
                          • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Kata Runtime and Common Runtime.
                          +

                          Using the CCE Console

                          1. Log in to the CCE console.
                          2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
                          3. Set basic information about the workload.

                            Basic Info
                            • Workload Type: Select Cron Job. For details about workload types, see Overview.
                            • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
                            • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
                            • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Secure Runtime and Common Runtime.
                            Container Settings
                            • Container Information
                              Multiple containers can be configured in a pod. You can click Add Container on the right to configure multiple containers for the pod.
                              • Basic Info: Configure basic information about the container.
                                - - - - - - - - @@ -53,38 +53,38 @@ - - - - - - - @@ -93,7 +93,7 @@

                                For a small-capacity node, adjust the maximum number of instances based on the site requirements. Alternatively, when creating a node on the CCE console, you can adjust the maximum number of instances for the node based on the node specifications.

                                -

                                Rules for Reserving Node Memory v2

                                For clusters of v1.21.4-r0, v1.23.3-r0, or later, the node memory reservation model is optimized to v2 and can be dynamically adjusted using the node pool parameters kube-reserved-mem and system-reserved-mem. For details, see Configuring a Node Pool.

                                +

                                Rules for Reserving Node Memory v2

                                For clusters of v1.21.4-r0, v1.23.3-r0, or later, the node memory reservation model is optimized to v2 and can be dynamically adjusted using the node pool parameters kube-reserved-mem and system-reserved-mem. For details, see Modifying Node Pool Configurations.

                                The total reserved node memory of the v2 model is equal to the sum of that reserved for the OS and that reserved for CCE to manage pods.

                                Reserved memory includes basic and floating parts. For the OS, the floating memory depends on the node specifications. For CCE, the floating memory depends on the number of pods on a node.

                                @@ -101,9 +101,9 @@
                                - - @@ -111,14 +111,14 @@ - - - @@ -127,15 +127,15 @@ - - -

                                Parameter

                                @@ -85,7 +85,7 @@
                              • Job Records: You can set the number of jobs that are successfully executed or fail to be executed. Setting a limit to 0 corresponds to keeping none of the jobs after they finish.
                              • -
                                (Optional) Advanced Settings
                                • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
                                +
                                (Optional) Advanced Settings
                                • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Configuring Labels and Annotations.
                                diff --git a/docs/cce/umn/cce_10_0152.html b/docs/cce/umn/cce_10_0152.html index 98f69cc1..49a85b7b 100644 --- a/docs/cce/umn/cce_10_0152.html +++ b/docs/cce/umn/cce_10_0152.html @@ -6,7 +6,7 @@

                                Benefits of ConfigMaps:

                                • Manage configurations of different environments and services.
                                • Deploy workloads in different environments. Multiple versions are supported for configuration files so that you can update and roll back workloads easily.
                                • Quickly import configurations in the form of files to containers.
                                -

                                Constraints

                                • The size of a ConfigMap resource file cannot exceed 1 MB.
                                • ConfigMaps cannot be used in static pods.
                                +

                                Notes and Constraints

                                • The size of a ConfigMap resource file cannot exceed 1 MB.
                                • ConfigMaps cannot be used in static pods.

                                Procedure

                                1. Log in to the CCE console and click the cluster name to access the cluster console.
                                2. Choose ConfigMaps and Secrets in the navigation pane and click Create ConfigMap in the upper right corner.
                                3. Configure parameters.

                                  @@ -194,16 +192,534 @@

                                  Scale-In Cool-Down Period

                                  Scale-in cooling intervals can be configured in the node pool settings and the Autoscaler add-on settings.

                                  Scale-in cooling interval configured in a node pool

                                  -

                                  This interval indicates the period during which nodes added to the current node pool after a scale-out operation cannot be deleted. This interval takes effect at the node pool level.

                                  +

                                  This interval indicates the period during which nodes added to the current node pool after a scale-out cannot be deleted. This setting takes effect in the entire node pool.

                                  Scale-in cooling interval configured in the Autoscaler add-on

                                  -

                                  The interval after a scale-out indicates the period during which the entire cluster cannot be scaled in after the Autoscaler add-on triggers scale-out (due to the unschedulable pods, metrics, and scaling policies). This interval takes effect at the cluster level.

                                  -

                                  The interval after a node is deleted indicates the period during which the cluster cannot be scaled in after the Autoscaler add-on triggers scale-in. This interval takes effect at the cluster level.

                                  +

                                  The interval after a scale-out indicates the period during which the entire cluster cannot be scaled in after the Autoscaler add-on triggers a scale-out (due to the unschedulable pods, metrics, and scaling policies). This setting takes effect in the entire cluster.

                                  +

                                  The interval after a node is deleted indicates the period during which the cluster cannot be scaled in after the auto scaling add-on triggers a scale-in. This setting takes effect in the entire cluster.

                                  The interval after a failed scale-in indicates the period during which the cluster cannot be scaled in after the Autoscaler add-on triggers scale-in. This interval takes effect at the cluster level.

                                  +

                                  Change History

                                  +
                                  Table 1 Parameters for creating a ConfigMap

                                  Parameter

                                  @@ -33,7 +33,7 @@

                                  Data

                                  Data of a ConfigMap, in the key-value pair format.

                                  -

                                  Click to add data. The value can be in string, JSON, or YAML format.

                                  +

                                  Click to add data. The value can be in string, JSON, or YAML format.

                                  Label

                                  @@ -47,7 +47,7 @@

                                4. Click OK.

                                  The new ConfigMap is displayed in the ConfigMap list.

                                5. -

                                  Creating a ConfigMap Using kubectl

                                  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                  2. Create a file named cce-configmap.yaml and edit it.

                                    vi cce-configmap.yaml

                                    +

                                    Creating a ConfigMap Using kubectl

                                    1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                                    2. Create a file named cce-configmap.yaml and edit it.

                                      vi cce-configmap.yaml

                                      apiVersion: v1
                                       kind: ConfigMap
                                       metadata:
                                      diff --git a/docs/cce/umn/cce_10_0153.html b/docs/cce/umn/cce_10_0153.html
                                      index e6a5e61d..60e817b1 100644
                                      --- a/docs/cce/umn/cce_10_0153.html
                                      +++ b/docs/cce/umn/cce_10_0153.html
                                      @@ -3,7 +3,7 @@
                                       

                                      Creating a Secret

                                      Scenario

                                      A secret is a type of resource that holds sensitive data, such as authentication and key information. Its content is user-defined. After creating secrets, you can use them as files or environment variables in a containerized workload.

                                      -

                                      Constraints

                                      Secrets cannot be used in static pods.

                                      +

                                      Notes and Constraints

                                      Secrets cannot be used in static pods.

                                      Procedure

                                      1. Log in to the CCE console and click the cluster name to access the cluster console.
                                      2. Choose ConfigMaps and Secrets in the navigation pane, click the Secrets tab, and click Create Secret in the upper right corner.
                                      3. Configure parameters.

                                        @@ -67,7 +67,7 @@ data: kind: Secret metadata: name: mysecret #Secret name - namespace: default #Namespace. The default value is default. + namespace: default #Namespace. The default value is default. data: .dockerconfigjson: eyJh***** # Content encoded using Base64. type: kubernetes.io/dockerconfigjson @@ -86,7 +86,7 @@ data: apiVersion: v1 metadata: name: mysecret #Secret name - namespace: default #Namespace. The default value is default. + namespace: default #Namespace. The default value is default. data: tls.crt: LS0tLS1CRU*****FURS0tLS0t # Certificate content, which must be encoded using Base64. tls.key: LS0tLS1CRU*****VZLS0tLS0= # Private key content, which must be encoded using Base64. @@ -96,7 +96,7 @@ data: apiVersion: v1 metadata: name: mysecret #Secret name - namespace: default #Namespace. The default value is default. + namespace: default #Namespace. The default value is default. data: tls.crt: LS0tLS1CRU*****FURS0tLS0t # Certificate content, which must be encoded using Base64. tls.key: LS0tLS1CRU*****VZLS0tLS0= # Private key content, which must be encoded using Base64. @@ -104,7 +104,7 @@ data: -

                                        Creating a Secret Using kubectl

                                        1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                        2. Create and edit the Base64-encoded cce-secret.yaml file.

                                          # echo -n "content to be encoded" | base64
                                          +

                                          Creating a Secret Using kubectl

                                          1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                                          2. Create and edit the Base64-encoded cce-secret.yaml file.

                                            # echo -n "content to be encoded" | base64
                                             ******

                                            vi cce-secret.yaml

                                            The following YAML file uses the Opaque type as an example. For details about other types, see Secret Resource File Configuration Example.

                                            diff --git a/docs/cce/umn/cce_10_0154.html b/docs/cce/umn/cce_10_0154.html index 57a6a2bb..e811513c 100644 --- a/docs/cce/umn/cce_10_0154.html +++ b/docs/cce/umn/cce_10_0154.html @@ -19,7 +19,7 @@
                                        -

                                        Constraints

                                        • Ensure that there are sufficient resources for installing the add-on.
                                        • The default node pool does not support auto scaling. For details, see Description of DefaultPool.
                                        • When Autoscaler is used, some taints or annotations may affect auto scaling. Therefore, do not use the following taints or annotations in clusters:
                                          • ignore-taint.cluster-autoscaler.kubernetes.io: The taint works on nodes. Kubernetes-native Autoscaler supports protection against abnormal scale outs and periodically evaluates the proportion of available nodes in the cluster. When the proportion of non-ready nodes exceeds 45%, protection will be triggered. In this case, all nodes with the ignore-taint.cluster-autoscaler.kubernetes.io taint in the cluster are filtered out from the Autoscaler template and recorded as non-ready nodes, which affects cluster scaling.
                                          • cluster-autoscaler.kubernetes.io/enable-ds-eviction: The annotation works on pods, which determines whether DaemonSet pods can be evicted by Autoscaler. For details, see Well-Known Labels, Annotations and Taints.
                                          +

                                          Notes and Constraints

                                          • Ensure that there are sufficient resources for installing the add-on.
                                          • The default node pool does not support auto scaling. For details, see Description of DefaultPool.
                                          • Node scale-in will cause PVC/PV data loss for the local PVs associated with the node. These PVCs and PVs cannot be restored or used again. In a node scale-in, a pod that uses the local PV will be evicted from the node. A new pod will be created, but it remains in a pending state because the label of the PVC bound to it conflicts with the node label.
                                          • When Autoscaler is used, some taints or annotations may affect auto scaling. Therefore, do not use the following taints or annotations in clusters:
                                            • ignore-taint.cluster-autoscaler.kubernetes.io: The taint works on nodes. Kubernetes-native Autoscaler supports protection against abnormal scale outs and periodically evaluates the proportion of available nodes in the cluster. When the proportion of non-ready nodes exceeds 45%, protection will be triggered. In this case, all nodes with the ignore-taint.cluster-autoscaler.kubernetes.io taint in the cluster are filtered out from the Autoscaler template and recorded as non-ready nodes, which affect cluster scaling.
                                            • cluster-autoscaler.kubernetes.io/enable-ds-eviction: The annotation works on pods, which determines whether DaemonSet pods can be evicted by Autoscaler. For details, see Well-Known Labels, Annotations and Taints.

                                          Installing the Add-on

                                          1. Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane, locate CCE Cluster Autoscaler on the right, and click Install.
                                          2. On the Install Add-on page, configure the specifications.

                                            @@ -130,7 +130,7 @@

                                        - @@ -138,8 +138,6 @@
                                        Table 1 Parameters for creating a secret

                                        Parameter

                                        @@ -36,7 +36,7 @@

                                        Secret Data

                                        Workload secret data can be used in containers.

                                        -
                                        • If Secret Type is Opaque, click . In the dialog box displayed, enter a key-value pair and select Auto Base64 Encoding.
                                        • If Secret Type is kubernetes.io/dockerconfigjson, enter the account and password for logging in to the private image repository.
                                        • If Secret Type is kubernetes.io/tls or IngressTLS, upload the certificate file and private key file.
                                          NOTE:
                                          • A certificate is a self-signed or CA-signed credential used for identity authentication.
                                          • A certificate request is a request for a signature with a private key.
                                          +
                                          • If Secret Type is Opaque, click . In the dialog box displayed, enter a key-value pair and select Auto Base64 Encoding.
                                          • If Secret Type is kubernetes.io/dockerconfigjson, enter the account and password for logging in to the private image repository.
                                          • If Secret Type is kubernetes.io/tls or IngressTLS, upload the certificate file and private key file.
                                            NOTE:
                                            • A certificate is a self-signed or CA-signed credential used for identity authentication.
                                            • A certificate request is a request for a signature with a private key.

                                        Maximum sum of CPU cores of all nodes in a cluster, within which cluster scale-out is performed.

                                        Total Memory

                                        +

                                        Total Memory (GiB)

                                        Maximum sum of memory of all nodes in a cluster, within which cluster scale-out is performed.

                                        -

                                        When the total number of nodes, CPUs, or memory is counted, unavailable nodes and resources on them in the default node pool are not included.

                                        -

                                      4. Configure scheduling policies for the add-on.

                                        • Scheduling policies do not take effect on add-on instances of the DaemonSet type.
                                        • When configuring multi-AZ deployment or node affinity, ensure that there are nodes meeting the scheduling policy and that resources are sufficient in the cluster. Otherwise, the add-on cannot run.
                                        @@ -164,7 +162,7 @@

                                  Using both taints and tolerations allows (not forcibly) the add-on Deployment to be scheduled to a node with the matching taints, and controls the Deployment eviction policies after the node where the Deployment is located is tainted.

                                  The add-on adds the default tolerance policy for the node.kubernetes.io/not-ready and node.kubernetes.io/unreachable taints, respectively. The tolerance time window is 60s.

                                  -

                                  For details, see Taints and Tolerations.

                                  +

                                  For details, see Configuring Tolerance Policies.

                                  + + + + + + + + + + + + + + + + +
                                  Table 6 Updates of the add-on adapted to clusters 1.29

                                  Add-on Version

                                  +

                                  Supported Cluster Version

                                  +

                                  New Feature

                                  +

                                  Community Version

                                  +

                                  1.29.17

                                  +

                                  v1.29

                                  +

                                  Optimized events.

                                  +

                                  1.29.1

                                  +

                                  1.29.13

                                  +

                                  v1.29

                                  +

                                  Clusters 1.29 are supported.

                                  +

                                  1.29.1

                                  +
                                  +
                                  + +
                                  + + + + + + + + + + + + + + + + + + + + + + + + + + +
                                  Table 7 Updates of the add-on adapted to clusters 1.28

                                  Add-on Version

                                  +

                                  Supported Cluster Version

                                  +

                                  New Feature

                                  +

                                  Community Version

                                  +

                                  1.28.55

                                  +

                                  v1.28

                                  +

                                  Optimized events.

                                  +

                                  1.28.1

                                  +

                                  1.28.22

                                  +

                                  v1.28

                                  +

                                  Fixed some issues.

                                  +

                                  1.28.1

                                  +

                                  1.28.20

                                  +

                                  v1.28

                                  +

                                  Fixed some issues.

                                  +

                                  1.28.1

                                  +

                                  1.28.17

                                  +

                                  v1.28

                                  +

                                  Fixed the issue that scale-in cannot be performed when there are custom pod controllers in a cluster.

                                  +

                                  1.28.1

                                  +
                                  +
                                  + +
                                  + + + + + + + + + + + + + + + + + + + + + + + + + + +
                                  Table 8 Updates of the add-on adapted to clusters 1.27

                                  Add-on Version

                                  +

                                  Supported Cluster Version

                                  +

                                  New Feature

                                  +

                                  Community Version

                                  +

                                  1.27.55

                                  +

                                  v1.27

                                  +

                                  Fixed some issues.

                                  +

                                  1.27.1

                                  +

                                  1.27.53

                                  +

                                  v1.27

                                  +

                                  Fixed some issues.

                                  +

                                  1.27.1

                                  +

                                  1.27.51

                                  +

                                  v1.27

                                  +

                                  Fixed some issues.

                                  +

                                  1.27.1

                                  +

                                  1.27.14

                                  +

                                  v1.27

                                  +

                                  Fixed the scale-in failure of nodes of different specifications in the same node pool and unexpected PreferNoSchedule taint issues.

                                  +

                                  1.27.1

                                  +
                                  +
                                  + +
                                  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                                  Table 9 Updates of the add-on adapted to clusters 1.25

                                  Add-on Version

                                  +

                                  Supported Cluster Version

                                  +

                                  New Feature

                                  +

                                  Community Version

                                  +

                                  1.25.88

                                  +

                                  v1.25

                                  +

                                  Fixed some issues.

                                  +

                                  1.25.0

                                  +

                                  1.25.86

                                  +

                                  v1.25

                                  +

                                  Fixed some issues.

                                  +

                                  1.25.0

                                  +

                                  1.25.84

                                  +

                                  v1.25

                                  +

                                  Fixed some issues.

                                  +

                                  1.25.0

                                  +

                                  1.25.46

                                  +

                                  v1.25

                                  +

                                  Fixed the scale-in failure of nodes of different specifications in the same node pool and unexpected PreferNoSchedule taint issues.

                                  +

                                  1.25.0

                                  +

                                  1.25.21

                                  +

                                  v1.25

                                  +
                                  • Fixed the issue that the autoscaler's least-waste is disabled by default.
                                  • Fixed the issue that the node pool cannot be switched to another pool for scaling out after a scale-out failure and the add-on has to restart.
                                  • The default taint tolerance duration is changed to 60s.
                                  • Fixed the issue that scale-out is still triggered after the scale-out rule is disabled.
                                  +

                                  1.25.0

                                  +

                                  1.25.7

                                  +

                                  v1.25

                                  +
                                  • CCE clusters 1.25 are supported.
                                  • Modified the memory request and limit of a customized flavor.
                                  • Enabled to report an event indicating that scaling cannot be performed in a node pool with auto scaling disabled.
                                  +

                                  1.25.0

                                  +
                                  +
                                  + +
                                  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                                  Table 10 Updates of the add-on adapted to clusters 1.23

                                  Add-on Version

                                  +

                                  Supported Cluster Version

                                  +

                                  New Feature

                                  +

                                  Community Version

                                  +

                                  1.23.95

                                  +

                                  v1.23

                                  +

                                  Fixed some issues.

                                  +

                                  1.23.0

                                  +

                                  1.23.93

                                  +

                                  v1.23

                                  +

                                  Fixed some issues.

                                  +

                                  1.23.0

                                  +

                                  1.23.91

                                  +

                                  v1.23

                                  +

                                  Fixed some issues.

                                  +

                                  1.23.0

                                  +

                                  1.23.54

                                  +

                                  v1.23

                                  +

                                  Fixed the scale-in failure of nodes of different specifications in the same node pool and unexpected PreferNoSchedule taint issues.

                                  +

                                  1.23.0

                                  +

                                  1.23.31

                                  +

                                  v1.23

                                  +
                                  • Fixed the issue that the autoscaler's least-waste is disabled by default.
                                  • Fixed the issue that the node pool cannot be switched to another pool for scaling out after a scale-out failure and the add-on has to restart.
                                  • The default taint tolerance duration is changed to 60s.
                                  • Fixed the issue that scale-out is still triggered after the scale-out rule is disabled.
                                  +

                                  1.23.0

                                  +

                                  1.23.17

                                  +

                                  v1.23

                                  +
                                  • Supported node scaling policies without a step.
                                  • Fixed a bug so that deleted node pools are automatically removed.
                                  • Supported scheduling by priority.
                                  • Supported the emptyDir scheduling policy.
                                  • Fixed a bug so that scale-in can be triggered on the nodes whose capacity is lower than the scale-in threshold when the node scaling policy is disabled.
                                  • Modified the memory request and limit of a customized flavor.
                                  • Enabled to report an event indicating that scaling cannot be performed in a node pool with auto scaling disabled.
                                  +

                                  1.23.0

                                  +

                                  1.23.10

                                  +

                                  v1.23

                                  +
                                  • Optimized logging.
                                  • Supported scale-in waiting so that operations such as data dump can be performed before a node is deleted.
                                  +

                                  1.23.0

                                  +
                                  +
                                  + +
                                  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                                  Table 11 Updates of the add-on adapted to clusters 1.21

                                  Add-on Version

                                  +

                                  Supported Cluster Version

                                  +

                                  New Feature

                                  +

                                  Community Version

                                  +

                                  1.21.89

                                  +

                                  v1.21

                                  +

                                  Fixed some issues.

                                  +

                                  1.21.0

                                  +

                                  1.21.87

                                  +

                                  v1.21

                                  +

                                  Fixed some issues.

                                  +

                                  1.21.0

                                  +

                                  1.21.86

                                  +

                                  v1.21

                                  +

                                  Fixed the issue that the node pool auto scaling cannot meet expectations after AZ topology constraints are configured for nodes.

                                  +

                                  1.21.0

                                  +

                                  1.21.51

                                  +

                                  v1.21

                                  +

                                  Fixed the scale-in failure of nodes of different specifications in the same node pool and unexpected PreferNoSchedule taint issues.

                                  +

                                  1.21.0

                                  +

                                  1.21.29

                                  +

                                  v1.21

                                  +
                                  • Supported anti-affinity scheduling of add-on pods on nodes in different AZs.
                                  • Added the tolerance time during which the pods with temporary storage volumes cannot be scheduled.
                                  • Fixed the issue that the number of node pools cannot be restored when scaling group resources are insufficient.
                                  • Fixed the issue that the node pool cannot be switched to another pool for scaling out after a scale-out failure and the add-on has to restart.
                                  • The default taint tolerance duration is changed to 60s.
                                  • Fixed the issue that scale-out is still triggered after the scale-out rule is disabled.
                                  +

                                  1.21.0

                                  +

                                  1.21.16

                                  +

                                  v1.21

                                  +
                                  • Supported node scaling policies without a step.
                                  • Fixed a bug so that deleted node pools are automatically removed.
                                  • Supported scheduling by priority.
                                  • Supported the emptyDir scheduling policy.
                                  • Fixed a bug so that scale-in can be triggered on the nodes whose capacity is lower than the scale-in threshold when the node scaling policy is disabled.
                                  • Modified the memory request and limit of a customized flavor.
                                  • Enabled to report an event indicating that scaling cannot be performed in a node pool with auto scaling disabled.
                                  +

                                  1.21.0

                                  +

                                  1.21.9

                                  +

                                  v1.21

                                  +
                                  • Optimized logging.
                                  • Supported scale-in waiting so that operations such as data dump can be performed before a node is deleted.
                                  +

                                  1.21.0

                                  +

                                  1.21.1

                                  +

                                  v1.21

                                  +

                                  Fixed the issue that the node pool modification in the existing periodic auto scaling rule does not take effect.

                                  +

                                  1.21.0

                                  +
                                  +
                                  + +
                                  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                                  Table 12 Updates of the add-on adapted to clusters 1.19

                                  Add-on Version

                                  +

                                  Supported Cluster Version

                                  +

                                  New Feature

                                  +

                                  Community Version

                                  +

                                  1.19.76

                                  +

                                  v1.19

                                  +
                                  • Optimized the method of identifying GPUs and NPUs.
                                  • Used the remaining node quota of a cluster for the extra nodes that are beyond the cluster scale.
                                  +

                                  1.19.0

                                  +

                                  1.19.56

                                  +

                                  v1.19

                                  +

                                  Fixed the scale-in failure of nodes of different specifications in the same node pool and unexpected PreferNoSchedule taint issues.

                                  +

                                  1.19.0

                                  +

                                  1.19.35

                                  +

                                  v1.19

                                  +
                                  • Supported anti-affinity scheduling of add-on pods on nodes in different AZs.
                                  • Added the tolerance time during which the pods with temporary storage volumes cannot be scheduled.
                                  • Fixed the issue that the number of node pools cannot be restored when scaling group resources are insufficient.
                                  • Fixed the issue that the node pool cannot be switched to another pool for scaling out after a scale-out failure and the add-on has to restart.
                                  • The default taint tolerance duration is changed to 60s.
                                  • Fixed the issue that scale-out is still triggered after the scale-out rule is disabled.
                                  +

                                  1.19.0

                                  +

                                  1.19.22

                                  +

                                  v1.19

                                  +
                                  • Supported node scaling policies without a step.
                                  • Fixed a bug so that deleted node pools are automatically removed.
                                  • Supported scheduling by priority.
                                  • Supported the emptyDir scheduling policy.
                                  • Fixed a bug so that scale-in can be triggered on the nodes whose capacity is lower than the scale-in threshold when the node scaling policy is disabled.
                                  • Modified the memory request and limit of a customized flavor.
                                  • Enabled to report an event indicating that scaling cannot be performed in a node pool with auto scaling disabled.
                                  +

                                  1.19.0

                                  +

                                  1.19.14

                                  +

                                  v1.19

                                  +
                                  • Optimized logging.
                                  • Supported scale-in waiting so that operations such as data dump can be performed before a node is deleted.
                                  +

                                  1.19.0

                                  +

                                  1.19.11

                                  +

                                  v1.19

                                  +

                                  Fixed the issue that authentication fails due to incorrect signature in the add-on request retries.

                                  +

                                  1.19.0

                                  +

                                  1.19.8

                                  +

                                  v1.19

                                  +

                                  Fixed the issue that the node pool modification in the existing periodic auto scaling rule does not take effect.

                                  +

                                  1.19.0

                                  +

                                  1.19.7

                                  +

                                  v1.19

                                  +

                                  Regular upgrade of add-on dependencies

                                  +

                                  1.19.0

                                  +
                                  +
                                  + +
                                  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                                  Table 13 Updates of the add-on adapted to clusters 1.17

                                  Add-on Version

                                  +

                                  Supported Cluster Version

                                  +

                                  New Feature

                                  +

                                  Community Version

                                  +

                                  1.17.27

                                  +

                                  v1.17

                                  +
                                  • Optimized logging.
                                  • Fixed a bug so that deleted node pools are automatically removed.
                                  • Supported scheduling by priority.
                                  • Fixed the issue that taints on newly added nodes are overwritten.
                                  • Fixed a bug so that scale-in can be triggered on the nodes whose capacity is lower than the scale-in threshold when the node scaling policy is disabled.
                                  • Modified the memory request and limit of a customized flavor.
                                  • Enabled to report an event indicating that scaling cannot be performed in a node pool with auto scaling disabled.
                                  +

                                  1.17.0

                                  +

                                  1.17.22

                                  +

                                  v1.17

                                  +

                                  Optimized logging.

                                  +

                                  1.17.0

                                  +

                                  1.17.19

                                  +

                                  v1.17

                                  +

                                  Fixed the issue that authentication fails due to incorrect signature in the add-on request retries.

                                  +

                                  1.17.0

                                  +

                                  1.17.16

                                  +

                                  v1.17

                                  +

                                  Fixed the issue that the node pool modification in the existing periodic auto scaling rule does not take effect.

                                  +

                                  1.17.0

                                  +

                                  1.17.15

                                  +

                                  v1.17

                                  +

                                  Unified resource specification configuration unit.

                                  +

                                  1.17.0

                                  +

                                  1.17.2

                                  +

                                  v1.17

                                  +

                                  Clusters 1.17 are supported.

                                  +

                                  1.17.0

                                  +
                                  +
                                  +
                                diff --git a/docs/cce/umn/cce_10_0163.html b/docs/cce/umn/cce_10_0163.html index 1a1d92ef..843ab2b5 100644 --- a/docs/cce/umn/cce_10_0163.html +++ b/docs/cce/umn/cce_10_0163.html @@ -10,7 +10,7 @@

                                When creating a workload, you are advised to set the upper and lower limits of CPU and memory resources. If the upper and lower resource limits are not set for a workload, a resource leak of this workload will make resources unavailable for other workloads deployed on the same node. In addition, workloads that do not have upper and lower resource limits cannot be accurately monitored.

                                -

                                Configuration Description

                                In real-world scenarios, the recommended ratio of Request to Limit is about 1:1.5. For some sensitive services, the recommended ratio is 1:1. If the Request is too small and the Limit is too large, node resources are oversubscribed. During service peaks, the memory or CPU of a node may be used up. As a result, the node is unavailable.

                                +

                                Configuration

                                In real-world scenarios, the recommended ratio of Request to Limit is about 1:1.5. For some sensitive services, the recommended ratio is 1:1. If the Request is too small and the Limit is too large, node resources are oversubscribed. During service peaks, the memory or CPU of a node may be used up. As a result, the node is unavailable.

                                • CPU quota: The unit of CPU resources is core, which can be expressed by quantity or an integer suffixed with the unit (m). For example, 0.1 core in the quantity expression is equivalent to 100m in the expression. However, Kubernetes does not allow CPU resources whose precision is less than 1m.
                                  @@ -55,7 +55,7 @@
                                  Table 1 CPU quotas

                                  Parameter

                                  Recommended configuration

                                  -

                                  Actual available memory of a node ≥ Sum of memory limits of all containers on the current node ≥ Sum of memory requests of all containers on the current node. You can view the actual available memory of a node on the CCE console (Resource Management > Nodes > Allocatable).

                                  +

                                  Actual available memory of a node ≥ Sum of memory limits of all containers on the current node ≥ Sum of memory requests of all containers on the current node. You can view the actual available memory of a node on the CCE console (Resource Management > Nodes > Allocatable).

                                The allocatable resources are calculated based on the resource request value (Request), which indicates the upper limit of resources that can be requested by pods on this node, but does not indicate the actual available resources of the node (for details, see Example of CPU and Memory Quota Usage). The calculation formula is as follows:

                                • Allocatable CPU = Total CPU – Requested CPU of all pods – Reserved CPU for other resources
                                • Allocatable memory = Total memory – Requested memory of all pods – Reserved memory for other resources
                                @@ -105,7 +105,7 @@

                                In this case, the remaining 1 core 5 GiB can be used by the next new pod.

                                If pod 1 is under heavy load during peak hours, it will use more CPUs and memory within the limit. Therefore, the actual allocatable resources are fewer than 1 core 5 GiB.

                                -

                                Quotas of Other Resources

                                Typically, nodes support local ephemeral storage, which is provided by locally mounted writable devices or RAM. Ephemeral storage does not ensure long-term data availability. Pods can use local ephemeral storage to buffer data and store logs, or mount emptyDir storage volumes to containers. For details, see Local ephemeral storage.

                                +

                                Quotas of Other Resources

                                Typically, nodes support local ephemeral storage, which is provided by locally mounted writable devices or RAM. EV does not ensure long-term data availability. Pods can use local EVs to buffer data and store logs, or mount emptyDir volumes to containers. For details, see Local ephemeral storage.

                                Kubernetes allows you to specify the requested value and limit value of ephemeral storage in container configurations to manage the local ephemeral storage. The following attributes can be configured for each container in a pod:

                                • spec.containers[].resources.limits.ephemeral-storage
                                • spec.containers[].resources.requests.ephemeral-storage
                                @@ -144,7 +144,7 @@ spec:
                                diff --git a/docs/cce/umn/cce_10_0164.html b/docs/cce/umn/cce_10_0164.html index 80831242..07de0fd6 100644 --- a/docs/cce/umn/cce_10_0164.html +++ b/docs/cce/umn/cce_10_0164.html @@ -14,8 +14,6 @@ - diff --git a/docs/cce/umn/cce_10_0175.html b/docs/cce/umn/cce_10_0175.html index 36228a12..fbf9480f 100644 --- a/docs/cce/umn/cce_10_0175.html +++ b/docs/cce/umn/cce_10_0175.html @@ -1,14 +1,14 @@ -

                                Connecting to a Cluster Using an X.509 Certificate

                                -

                                Scenario

                                This section describes how to obtain the cluster certificate from the console and use it access Kubernetes clusters.

                                +

                                Accessing a Cluster Using an X.509 Certificate

                                +

                                Scenario

                                This section describes how to obtain the cluster certificate from the console and use it to access Kubernetes clusters.

                                -

                                Procedure

                                1. Log in to the CCE console and click the cluster name to access the cluster console.
                                2. On the Overview page, locate the Connection Info area, and click Download next to X.509 certificate.
                                3. In the Obtain Certificate dialog box displayed, select the certificate expiration time and download the X.509 certificate of the cluster as prompted.

                                  Figure 1 Downloading a certificate
                                  +

                                  Procedure

                                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                                  2. On the Overview page, locate the Connection Info area, and click Download next to X.509 certificate.
                                  3. In the Obtain Certificate dialog box displayed, select the certificate expiration time and download the X.509 certificate of the cluster as prompted.

                                    Figure 1 Downloading a certificate
                                    • The downloaded certificate contains three files: client.key, client.crt, and ca.crt. Keep these files secure.
                                    • Certificates are not required for mutual access between containers in a cluster.
                                    -

                                  4. Call native Kubernetes APIs using the cluster certificate.

                                    For example, run the curl command to call an API to view the pod information. In the following information, 192.168.0.18:5443 indicates the IP address of the API server in the cluster.

                                    +

                                  5. Call native Kubernetes APIs using the cluster certificate.

                                    For example, run the curl command to call an API to obtain the pod information. In the following information, 192.168.0.18:5443 indicates the private IP address or EIP and port number of the API server in the cluster.

                                    curl --cacert ./ca.crt --cert ./client.crt --key ./client.key  https://192.168.0.18:5443/api/v1/namespaces/default/pods/
                                    -

                                    For more cluster APIs, see Kubernetes APIs.

                                    +

                                    For more cluster APIs, see Kubernetes API.

                                diff --git a/docs/cce/umn/cce_10_0178.html b/docs/cce/umn/cce_10_0178.html index aead90ab..9643e14f 100644 --- a/docs/cce/umn/cce_10_0178.html +++ b/docs/cce/umn/cce_10_0178.html @@ -5,7 +5,7 @@

                                To ensure node stability, a certain number of CCE node resources will be reserved for Kubernetes components (such as kubelet, kube-proxy, and docker) based on the node specifications.

                                CCE calculates the resources that can be allocated to user nodes as follows:

                                Allocatable resources = Total amount - Reserved amount - Eviction threshold

                                -

                                The memory eviction threshold is fixed at 100 MB.

                                +

                                The memory eviction threshold is fixed at 100 MiB.

                                Total amount indicates the available memory of the ECS, excluding the memory used by system components. Therefore, the total amount is slightly less than the memory of the node flavor.

                                When the memory consumed by all pods on a node increases, the following behaviors may occur:

                                @@ -21,24 +21,24 @@

                                TM ≤ 8 GB

                                +

                                TM ≤ 8 GiB

                                0 MB

                                +

                                0 MiB

                                8 GB < TM ≤ 16 GB

                                +

                                8 GiB < TM ≤ 16 GiB

                                [(TM – 8 GB) x 1024 x 10%] MB

                                +

                                [(TM – 8 GiB) x 1024 x 10%] MiB

                                16 GB < TM ≤ 128 GB

                                +

                                16 GiB < TM ≤ 128 GiB

                                [8 GB x 1024 x 10% + (TM – 16 GB) x 1024 x 6%] MB

                                +

                                [8 GiB x 1024 x 10% + (TM – 16 GiB) x 1024 x 6%] MiB

                                TM > 128 GB

                                +

                                TM > 128 GiB

                                (8 GB x 1024 x 10% + 112 GB x 1024 x 6% + (TM – 128 GB) x 1024 x 2%) MB

                                +

                                (8 GiB x 1024 x 10% + 112 GiB x 1024 x 6% + (TM – 128 GiB) x 1024 x 2%) MiB

                                TM ≤ 2 GB

                                +

                                TM ≤ 2 GiB

                                None

                                TM x 25%

                                TM > 2 GB

                                +

                                TM > 2 GiB

                                0 < Max. pods on a node ≤ 16

                                700 MB

                                +

                                700 MiB

                                16 < Max. pods on a node ≤ 32

                                [700 + (Max. pods on a node – 16) x 18.75] MB

                                +

                                [700 + (Max. pods on a node – 16) x 18.75] MiB

                                32 < Max. pods on a node ≤ 64

                                [1024 + (Max. pods on a node – 32) x 6.25] MB

                                +

                                [1024 + (Max. pods on a node – 32) x 6.25] MiB

                                64 < Max. pods on a node ≤ 128

                                [1230 + (Max. pods on a node – 64) x 7.80] MB

                                +

                                [1230 + (Max. pods on a node – 64) x 7.80] MiB

                                Max. pods on a node > 128

                                [1740 + (Max. pods on a node – 128) x 11.20] MB

                                +

                                [1740 + (Max. pods on a node – 128) x 11.20] MiB

                                Basic/Floating

                                Reservation

                                +

                                Reservation

                                Used by

                                +

                                Used by

                                Basic

                                400 MB (fixed)

                                +

                                Fixed at 400 MiB

                                OS service components such as sshd and systemd-journald.

                                +

                                OS service components such as sshd and systemd-journald.

                                Floating (depending on the node memory)

                                25 MB/GB

                                +

                                25MiB/GiB

                                Kernel

                                Basic

                                500 MB (fixed)

                                +

                                Fixed at 500 MiB

                                Container engine components, such as kubelet and kube-proxy, when the node is unloaded

                                +

                                Container engine components, such as kubelet and kube-proxy, when the node is unloaded

                                Floating (depending on the number of pods on the node)

                                Docker: 20 MB/pod

                                -

                                containerd: 5 MB/pod

                                +

                                Docker: 20 MiB/Pod

                                +

                                containerd: 5 MiB/Pod

                                Container engine components when the number of pods increases

                                NOTE:

                                When the v2 model reserves memory for a node by default, the default maximum number of pods is estimated based on the memory. For details, see Table 1.

                                diff --git a/docs/cce/umn/cce_10_0183.html b/docs/cce/umn/cce_10_0183.html index 14ebd694..a421e86d 100644 --- a/docs/cce/umn/cce_10_0183.html +++ b/docs/cce/umn/cce_10_0183.html @@ -6,9 +6,9 @@ -

                                CCE Cluster Permissions and IAM RBAC

                                CCE is compatible with IAM system roles for permissions management. You are advised to use fine-grained policies provided by IAM to simplify permissions management.

                                +

                                CCE Cluster Permissions and IAM RBAC

                                CCE is compatible with IAM system roles for permissions management. Use fine-grained policies provided by IAM to simplify permissions management.

                                CCE supports the following roles:

                                • Basic IAM roles:
                                  • te_admin (Tenant Administrator): Users with this role can call all APIs of all services except IAM.
                                  • readonly (Tenant Guest): Users with this role can call APIs with the read-only permissions of all services except IAM.
                                • Custom CCE administrator role: CCE Administrator

                                If a user has the Tenant Administrator or CCE Administrator system role, the user has the cluster-admin permissions in Kubernetes RBAC and the permissions cannot be removed after the cluster is created.

                                -
                                If the user is the cluster creator, the cluster-admin permissions in Kubernetes RBAC are granted to the user by default. The permissions can be manually removed after the cluster is created.
                                • Method 1: Choose Permissions Management > Namespace-Level Permissions > Delete at the same role as cluster-creator on the CCE console.
                                • Method 2: Delete ClusterRoleBinding: cluster-creator through the API or kubectl.
                                +
                                If the user is the cluster creator, the cluster-admin permissions in Kubernetes RBAC are granted to the user by default. The permissions can be manually removed after the cluster is created.
                                • Method 1: Choose Permissions Management > Namespace-Level Permissions > Delete in the same role as cluster-creator on the CCE console.
                                • Method 2: Delete ClusterRoleBinding: cluster-creator through the API or kubectl.
                                -

                                When RBAC and IAM policies co-exist, the backend authentication logic for open APIs or console operations on CCE is as follows:

                                -

                                +

                                When RBAC and IAM policies co-exist, the backend authentication logic for open APIs or console operations on CCE is as follows.

                                +

                                diff --git a/docs/cce/umn/cce_10_0189.html b/docs/cce/umn/cce_10_0189.html index 4534e03e..2fa073f7 100644 --- a/docs/cce/umn/cce_10_0189.html +++ b/docs/cce/umn/cce_10_0189.html @@ -4,7 +4,7 @@

                                Namespace Permissions (Kubernetes RBAC-based)

                                You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles. The RBAC API declares four kinds of Kubernetes objects: Role, ClusterRole, RoleBinding, and ClusterRoleBinding, which are described as follows:

                                • Role: defines a set of rules for accessing Kubernetes resources in a namespace.
                                • RoleBinding: defines the relationship between users and roles.
                                • ClusterRole: defines a set of rules for accessing Kubernetes resources in a cluster (including all namespaces).
                                • ClusterRoleBinding: defines the relationship between users and cluster roles.

                                Role and ClusterRole specify actions that can be performed on specific resources. RoleBinding and ClusterRoleBinding bind roles to specific users, user groups, or ServiceAccounts. Illustration:

                                -
                                Figure 1 Role binding
                                +
                                Figure 1 Role binding
                                On the CCE console, you can assign permissions to a user or user group to access resources in one or multiple namespaces. By default, the CCE console provides the following ClusterRoles:
                                • view (read-only): read-only permission on most resources in all or selected namespaces.
                                • edit (development): read and write permissions on most resources in all or selected namespaces. If this ClusterRole is configured for all namespaces, its capability is the same as the O&M permission.
                                • admin (O&M): read and write permissions on most resources in all namespaces, and read-only permission on nodes, storage volumes, namespaces, and quota management.
                                • cluster-admin (administrator): read and write permissions on all resources in all namespaces.
                                • drainage-editor: drain a node.
                                • drainage-viewer: view the nodal drainage status but cannot drain a node.

                                In addition to the preceding typical ClusterRoles, you can define Role and RoleBinding to grant the permissions to add, delete, modify, and obtain global resources (such as nodes, PVs, and CustomResourceDefinitions) and different resources (such as pods, Deployments, and Services) in namespaces for refined permission control.

                                @@ -145,7 +145,7 @@ csi-nas everest-csi-provisioner Delete Immediate csi-obs everest-csi-provisioner Delete Immediate false 75d

                                Example: Assigning Namespace O&M Permissions (admin)

                                The admin role has the read and write permissions on most namespace resources. You can grant the admin permission on all namespaces to a user or user group.

                                -

                                In the following example kubectl output, a RoleBinding has been created and binds the admin role to the user group cce-role-group.

                                +

                                In the following example kubectl output, a RoleBinding has been created and the admin role is bound to the user group cce-role-group.

                                # kubectl get rolebinding
                                 NAME                                                      ROLE                AGE
                                 clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7   ClusterRole/admin   18s
                                diff --git a/docs/cce/umn/cce_10_0191.html b/docs/cce/umn/cce_10_0191.html
                                index 9b6c0584..ae30fb9b 100644
                                --- a/docs/cce/umn/cce_10_0191.html
                                +++ b/docs/cce/umn/cce_10_0191.html
                                @@ -2,11 +2,11 @@
                                 
                                 

                                Overview

                                CCE provides a console for managing Helm charts, helping you easily deploy applications using the charts and manage applications on the console.

                                -

                                Helm

                                Helm is a package manager for Kubernetes and manages charts. A Helm chart is a series of YAML files used to encapsulate native Kubernetes applications. When deploying an application, you can customize some metadata of the application for easy application distribution. Application releasers can use Helm to package applications, manage application dependencies and application versions, and release applications to the software repository. After using Helm, users do not need to compile complex application deployment files. They can easily search for, install, upgrade, roll back, and uninstall applications on Kubernetes.

                                +

                                Helm

                                Helm is a package manager for Kubernetes and manages charts. A Helm chart is a series of YAML files used to encapsulate native Kubernetes applications. When deploying an application, you can customize some metadata of the application for easy application distribution. Application releasors can use Helm to package applications, manage application dependencies and application versions, and release applications to the software repository. After using Helm, users do not need to compile complex application deployment files. They can easily search for, install, upgrade, roll back, and uninstall applications on Kubernetes.

                                The relationship between Helm and Kubernetes is as follows:

                                • Helm <–> Kubernetes
                                • Apt <–> Ubuntu
                                • Yum <–> CentOS
                                • Pip <–> Python

                                The following figure shows the solution architecture:

                                -

                                +

                                Helm can help application orchestration for Kubernetes:

                                • Manages, edits, and updates a large number of Kubernetes configuration files.
                                • Deploys a complex Kubernetes application that contains a large number of configuration files.
                                • Shares and reuses Kubernetes configurations and applications.
                                • Supports multiple environments with parameter-based configuration templates.
                                • Manages the release of applications, including rolling back the application, finding differences (using the diff command), and viewing the release history.
                                • Controls phases in a deployment cycle.
                                • Tests and verifies the released version.
                                diff --git a/docs/cce/umn/cce_10_0193.html b/docs/cce/umn/cce_10_0193.html index 8dce80b2..4e30661a 100644 --- a/docs/cce/umn/cce_10_0193.html +++ b/docs/cce/umn/cce_10_0193.html @@ -11,7 +11,7 @@

                                Installing the Add-on

                                1. Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane, locate Volcano Scheduler on the right, and click Install.
                                2. On the Install Add-on page, configure the specifications.

                                  -

                                  Table 1 Volcano configuration

                                  Parameter

                                  +
                                  @@ -26,6 +26,7 @@
                                  Table 1 Add-on configuration

                                  Parameter

                                  Description

                                  Number of pods that will be created to match the selected add-on specifications.

                                  If you select Custom, you can adjust the number of pods as required.

                                  +

                                  High availability is not possible with a single pod. If an error occurs on the node where the add-on instance runs, the add-on will fail.

                                  Containers

                                  @@ -148,7 +149,8 @@
                                  -

                                3. Configure the add-on parameters.

                                  Configure parameters of the default Volcano scheduler. For details, see Table 4.
                                  colocation_enable: ''
                                  +

                                4. Configure the add-on parameters.

                                  • Application Scaling Priority Policy: After this function is enabled, application scale-in is performed based on the default priority policy and customized policies. If application scale-out is required, you need to set the default scheduler of the cluster to volcano.
                                  • Advanced Settings: You can configure the default scheduler parameters. For details, see Table 4.
                                  +
                                  Example:
                                  colocation_enable: ''
                                   default_scheduler_conf:
                                     actions: 'allocate, backfill, preempt'
                                     tiers:
                                  @@ -185,65 +187,65 @@ tolerations:
                                       operator: Exists
                                       tolerationSeconds: 60
                                  -
                                  Table 3 Advanced Volcano configuration parameters

                                  Plugin

                                  +
                                  - - - - - - - - - - - - - - - - - - - - - - -
                                  Table 3 Advanced Volcano configuration parameters

                                  Plugin

                                  Function

                                  +

                                  Function

                                  Description

                                  +

                                  Description

                                  Demonstration

                                  +

                                  Demonstration

                                  colocation_enable

                                  +

                                  colocation_enable

                                  Whether to enable hybrid deployment.

                                  +

                                  Whether to enable hybrid deployment.

                                  Value:

                                  +

                                  Value:

                                  • true: hybrid enabled
                                  • false: hybrid disabled

                                  None

                                  +

                                  None

                                  default_scheduler_conf

                                  +

                                  default_scheduler_conf

                                  Used to schedule pods. It consists of a series of actions and plugins and features high scalability. You can specify and implement actions and plugins based on your requirements.

                                  +

                                  Used to schedule pods. It consists of a series of actions and plugins and features high scalability. You can specify and implement actions and plugins based on your requirements.

                                  It consists of actions and tiers.

                                  +

                                  It consists of actions and tiers.

                                  • actions: defines the types and sequence of actions to be executed by the scheduler.
                                  • tiers: configures the plugin list.

                                  None

                                  +

                                  None

                                  actions

                                  +

                                  actions

                                  Actions to be executed in each scheduling phase. The configured action sequence is the scheduler execution sequence. For details, see Actions.

                                  +

                                  Actions to be executed in each scheduling phase. The configured action sequence is the scheduler execution sequence. For details, see Actions.

                                  The scheduler traverses all jobs to be scheduled and performs actions such as enqueue, allocate, preempt, and backfill in the configured sequence to find the most appropriate node for each job.

                                  The following options are supported:

                                  +

                                  The following options are supported:

                                  • enqueue: uses a series of filtering algorithms to filter out tasks to be scheduled and sends them to the queue to wait for scheduling. After this action, the task status changes from pending to inqueue.
                                  • allocate: selects the most suitable node based on a series of pre-selection and selection algorithms.
                                  • preempt: performs preemption scheduling for tasks with higher priorities in the same queue based on priority rules.
                                  • backfill: schedules pending tasks as much as possible to maximize the utilization of node resources.
                                  actions: 'allocate, backfill, preempt'
                                  +
                                  actions: 'allocate, backfill, preempt'
                                  NOTE:

                                  When configuring actions, use either preempt or enqueue.

                                  plugins

                                  +

                                  plugins

                                  Implementation details of algorithms in actions based on different scenarios. For details, see Plugins.

                                  +

                                  Implementation details of algorithms in actions based on different scenarios. For details, see Plugins.

                                  For details, see Table 4.

                                  +

                                  For details, see Table 4.

                                  None

                                  +

                                  None

                                  tolerations

                                  +

                                  tolerations

                                  Tolerance of the add-on to node taints.

                                  +

                                  Tolerance of the add-on to node taints.

                                  By default, the add-on can run on nodes with the node.kubernetes.io/not-ready or node.kubernetes.io/unreachable taint and the taint effect value is NoExecute, but it'll be evicted in 60 seconds.

                                  +

                                  By default, the add-on can run on nodes with the node.kubernetes.io/not-ready or node.kubernetes.io/unreachable taint and the taint effect value is NoExecute, but it'll be evicted in 60 seconds.

                                  tolerations:
                                  +
                                  tolerations:
                                     - effect: NoExecute
                                       key: node.kubernetes.io/not-ready
                                       operator: Exists
                                  @@ -258,24 +260,24 @@ tolerations:
                                   
                                  -
                                  - - @@ -98,7 +103,7 @@ - @@ -111,7 +116,7 @@
                                  Table 4 Supported plugins

                                  Plugin

                                  +
                                  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -619,8 +621,7 @@ tolerations:

                                  Only Volcano of v1.7.1 and later support this function. On the new add-on page, options such as resource_exporter_enable are replaced by default_scheduler_conf.

                                  Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane. On the right of the page, locate Volcano Scheduler and click Install or Upgrade. In the Parameters area, configure the Volcano parameters.

                                  -
                                  • Using resource_exporter:
                                    {
                                    -    "ca_cert": "",
                                    +
                                    • Using resource_exporter:
                                      ...
                                           "default_scheduler_conf": {
                                               "actions": "allocate, backfill, preempt",
                                               "tiers": [
                                      @@ -684,9 +685,7 @@ tolerations:
                                                   }
                                               ]
                                           },
                                      -    "server_cert": "",
                                      -    "server_key": ""
                                      -}
                                      +...

                                    After this function is enabled, you can use the functions of both numa-aware and resource_exporter.

                                  @@ -721,8 +720,7 @@ data: - name: nodeCSIscheduling - name: networkresource -

                                5. Enter the customized content in the Parameters area on the console.

                                  {
                                  -    "ca_cert": "",
                                  +

                                6. Enter the customized content in the Parameters area on the console.

                                  ...
                                       "default_scheduler_conf": {
                                           "actions": "enqueue, allocate, backfill",
                                           "tiers": [
                                  @@ -792,9 +790,7 @@ data:
                                               }
                                           ]
                                       },
                                  -    "server_cert": "",
                                  -    "server_key": ""
                                  -}
                                  +...

                                  When this function is used, the original content in volcano-scheduler-configmap will be overwritten. Therefore, you must check whether volcano-scheduler-configmap has been modified during the upgrade. If yes, synchronize the modification to the upgrade page.

                                7. @@ -986,10 +982,111 @@ data:
                                  Table 4 Supported plugins

                                  Plugin

                                  Function

                                  +

                                  Function

                                  Description

                                  +

                                  Description

                                  Demonstration

                                  +

                                  Demonstration

                                  binpack

                                  +

                                  binpack

                                  Schedule pods to nodes with high resource usage (not allocating pods to light-loaded nodes) to reduce resource fragments.

                                  +

                                  Schedule pods to nodes with high resource usage (not allocating pods to light-loaded nodes) to reduce resource fragments.

                                  arguments:

                                  +

                                  arguments:

                                  • binpack.weight: weight of the binpack plugin.
                                  • binpack.cpu: ratio of CPUs to all resources. The parameter value defaults to 1.
                                  • binpack.memory: ratio of memory resources to all resources. The parameter value defaults to 1.
                                  • binpack.resources: other custom resource types requested by the pod, for example, nvidia.com/gpu. Multiple types can be configured and be separated by commas (,).
                                  • binpack.resources.<your_resource>: weight of your custom resource in all resources. Multiple types of resources can be added. <your_resource> indicates the resource type defined in binpack.resources, for example, binpack.resources.nvidia.com/gpu.
                                  - plugins:
                                  +
                                  - plugins:
                                     - name: binpack
                                       arguments:
                                         binpack.weight: 10
                                  @@ -286,60 +288,60 @@ tolerations:
                                         binpack.resources.example.com/foo: 3

                                  conformance

                                  +

                                  conformance

                                  Prevent key pods, such as the pods in the kube-system namespace from being preempted.

                                  +

                                  Prevent key pods, such as the pods in the kube-system namespace from being preempted.

                                  None

                                  +

                                  None

                                  - plugins:
                                  +
                                  - plugins:
                                     - name: 'priority'
                                     - name: 'gang'
                                       enablePreemptable: false
                                     - name: 'conformance'

                                  lifecycle

                                  +

                                  lifecycle

                                  By collecting statistics on service scaling rules, pods with similar lifecycles are preferentially scheduled to the same node. With the horizontal scaling capability of the Autoscaler, resources can be quickly scaled in and released, reducing costs and improving resource utilization.

                                  +

                                  By collecting statistics on service scaling rules, pods with similar lifecycles are preferentially scheduled to the same node. With the horizontal scaling capability of the Autoscaler, resources can be quickly scaled in and released, reducing costs and improving resource utilization.

                                  1. Collects statistics on the lifecycle of pods in the service load and schedules pods with similar lifecycles to the same node.

                                  2. For a cluster configured with an automatic scaling policy, adjust the scale-in annotation of the node to preferentially scale in the node with low usage.

                                  arguments:
                                  • lifecycle.WindowSize: The value is an integer greater than or equal to 1 and defaults to 10.

                                    Record the number of times that the number of replicas changes. If the load changes regularly and periodically, decrease the value. If the load changes irregularly and the number of replicas changes frequently, increase the value. If the value is too large, the learning period is prolonged and too many events are recorded.

                                    +
                                  arguments:
                                  • lifecycle.WindowSize: The value is an integer greater than or equal to 1 and defaults to 10.

                                    Record the number of times that the number of replicas changes. If the load changes regularly and periodically, decrease the value. If the load changes irregularly and the number of replicas changes frequently, increase the value. If the value is too large, the learning period is prolonged and too many events are recorded.

                                  • lifecycle.MaxGrade: The value is an integer greater than or equal to 3 and defaults to 3.

                                    It indicates levels of replicas. For example, if the value is set to 3, the replicas are classified into three levels. If the load changes regularly and periodically, decrease the value. If the load changes irregularly, increase the value. Setting an excessively small value may result in inaccurate lifecycle forecasts.

                                  • lifecycle.MaxScore: float64 floating point number. The value must be greater than or equal to 50.0. The default value is 200.0.

                                    Maximum score (equivalent to the weight) of the lifecycle plugin.

                                  • lifecycle.SaturatedTresh: float64 floating point number. If the value is less than 0.5, use 0.5. If the value is greater than 1, use 1. The default value is 0.8.

                                    Threshold for determining whether the node usage is too high. If the node usage exceeds the threshold, the scheduler preferentially schedules jobs to other nodes.

                                  - plugins:
                                  +
                                  - plugins:
                                     - name: priority
                                     - name: gang
                                       enablePreemptable: false
                                     - name: conformance
                                     - name: lifecycle
                                       arguments:
                                  -      lifecycle.MaxGrade: 10
                                  +      lifecycle.MaxGrade: 3
                                         lifecycle.MaxScore: 200.0
                                  -      lifecycle.SaturatedTresh: 1.0
                                  +      lifecycle.SaturatedTresh: 0.8
                                         lifecycle.WindowSize: 10
                                  NOTE:
                                  • For nodes that do not want to be scaled in, manually mark them as long-period nodes and add the annotation volcano.sh/long-lifecycle-node: true to them. For an unmarked node, the lifecycle plugin automatically marks the node based on the lifecycle of the load on the node.
                                  • The default value of MaxScore is 200.0, which is twice the weight of other plugins. When the lifecycle plugin does not have obvious effect or conflicts with other plugins, disable other plugins or increase the value of MaxScore.
                                  • After the scheduler is restarted, the lifecycle plugin needs to re-record the load change. The optimal scheduling effect can be achieved only after several periods of statistics are collected.

                                  Gang

                                  +

                                  Gang

                                  Consider a group of pods as a whole for resource allocation. This plugin checks whether the number of scheduled pods in a job meets the minimum requirements for running the job. If yes, all pods in the job will be scheduled. If no, the pods will not be scheduled.

                                  +

                                  Consider a group of pods as a whole for resource allocation. This plugin checks whether the number of scheduled pods in a job meets the minimum requirements for running the job. If yes, all pods in the job will be scheduled. If no, the pods will not be scheduled.

                                  NOTE:

                                  If a gang scheduling policy is used, if the remaining resources in the cluster are greater than or equal to half of the minimum number of resources for running a job but less than the minimum of resources for running the job, Autoscaler scale-outs will not be triggered.

                                  • enablePreemptable:
                                    • true: Preemption enabled
                                    • false: Preemption not enabled
                                    +
                                  • enablePreemptable:
                                    • true: Preemption enabled
                                    • false: Preemption not enabled
                                  • enableJobStarving:
                                    • true: Resources are preempted based on the minAvailable setting of jobs.
                                    • false: Resources are preempted based on job replicas.
                                    NOTE:
                                    • The default value of minAvailable for Kubernetes-native workloads (such as Deployments) is 1. It is a good practice to set enableJobStarving to false.
                                    • In AI and big data scenarios, you can specify the minAvailable value when creating a vcjob. It is a good practice to set enableJobStarving to true.
                                    • In Volcano versions earlier than v1.11.5, enableJobStarving is set to true by default. In Volcano versions later than v1.11.5, enableJobStarving is set to false by default.
                                  - plugins:
                                  +
                                  - plugins:
                                     - name: priority
                                     - name: gang
                                       enablePreemptable: false
                                  @@ -347,66 +349,66 @@ tolerations:
                                     - name: conformance

                                  priority

                                  +

                                  priority

                                  Schedule based on custom load priorities.

                                  +

                                  Schedule based on custom load priorities.

                                  None

                                  +

                                  None

                                  - plugins:
                                  +
                                  - plugins:
                                     - name: priority
                                     - name: gang
                                       enablePreemptable: false
                                     - name: conformance

                                  overcommit

                                  +

                                  overcommit

                                  Resources in a cluster are scheduled after being accumulated in a certain multiple to improve the workload enqueuing efficiency. If all workloads are Deployments, remove this plugin or set the raising factor to 2.0.

                                  +

                                  Resources in a cluster are scheduled after being accumulated in a certain multiple to improve the workload enqueuing efficiency. If all workloads are Deployments, remove this plugin or set the raising factor to 2.0.

                                  NOTE:

                                  This plugin is supported in Volcano 1.6.5 and later versions.

                                  arguments:

                                  +

                                  arguments:

                                  • overcommit-factor: inflation factor, which defaults to 1.2.
                                  - plugins:
                                  +
                                  - plugins:
                                     - name: overcommit
                                       arguments:
                                         overcommit-factor: 2.0

                                  drf

                                  +

                                  drf

                                  The Dominant Resource Fairness (DRF) scheduling algorithm, which schedules jobs based on their dominant resource share. Jobs with a smaller resource share will be scheduled with a higher priority.

                                  +

                                  The Dominant Resource Fairness (DRF) scheduling algorithm, which schedules jobs based on their dominant resource share. Jobs with a smaller resource share will be scheduled with a higher priority.

                                  -

                                  +

                                  -

                                  - plugins:
                                  +
                                  - plugins:
                                     - name: 'drf'
                                     - name: 'predicates'
                                     - name: 'nodeorder'

                                  predicates

                                  +

                                  predicates

                                  Determine whether a task is bound to a node by using a series of evaluation algorithms, such as node/pod affinity, taint tolerance, node repetition, volume limits, and volume zone matching.

                                  +

                                  Determine whether a task is bound to a node by using a series of evaluation algorithms, such as node/pod affinity, taint tolerance, node repetition, volume limits, and volume zone matching.

                                  None

                                  +

                                  None

                                  - plugins:
                                  +
                                  - plugins:
                                     - name: 'drf'
                                     - name: 'predicates'
                                     - name: 'nodeorder'

                                  nodeorder

                                  +

                                  nodeorder

                                  A common algorithm for selecting nodes. Nodes are scored in simulated resource allocation to find the most suitable node for the current job.

                                  +

                                  A common algorithm for selecting nodes. Nodes are scored in simulated resource allocation to find the most suitable node for the current job.

                                  Scoring parameters:

                                  -
                                  • nodeaffinity.weight: Pods are scheduled based on node affinity. This parameter defaults to 2.
                                  • podaffinity.weight: Pods are scheduled based on pod affinity. This parameter defaults to 2.
                                  • leastrequested.weight: Pods are scheduled to the node with the least requested resources. This parameter defaults to 1.
                                  • balancedresource.weight: Pods are scheduled to the node with balanced resource allocation. This parameter defaults to 1.
                                  • mostrequested.weight: Pods are scheduled to the node with the most requested resources. This parameter defaults to 0.
                                  • tainttoleration.weight: Pods are scheduled to the node with a high taint tolerance. This parameter defaults to 3.
                                  • imagelocality.weight: Pods are scheduled to the node where the required images exist. This parameter defaults to 1.
                                  • selectorspread.weight: Pods are evenly scheduled to different nodes. This parameter defaults to 0.
                                  • podtopologyspread.weight: Pods are scheduled based on the pod topology. This parameter defaults to 2.
                                  +

                                  Scoring parameters:

                                  +
                                  • nodeaffinity.weight: Pods are scheduled based on node affinity. This parameter defaults to 2.
                                  • podaffinity.weight: Pods are scheduled based on pod affinity. This parameter defaults to 2.
                                  • leastrequested.weight: Pods are scheduled to the node with the least requested resources. This parameter defaults to 1.
                                  • balancedresource.weight: Pods are scheduled to the node with balanced resource allocation. This parameter defaults to 1.
                                  • mostrequested.weight: Pods are scheduled to the node with the most requested resources. This parameter defaults to 0.
                                  • tainttoleration.weight: Pods are scheduled to the node with a high taint tolerance. This parameter defaults to 3.
                                  • imagelocality.weight: Pods are scheduled to the node where the required images exist. This parameter defaults to 1.
                                  • podtopologyspread.weight: Pods are scheduled based on the pod topology. This parameter defaults to 2.
                                  - plugins:
                                  +
                                  - plugins:
                                     - name: nodeorder
                                       arguments:
                                         leastrequested.weight: 1
                                  @@ -419,50 +421,50 @@ tolerations:
                                         podtopologyspread.weight: 2

                                  cce-gpu-topology-predicate

                                  +

                                  cce-gpu-topology-predicate

                                  GPU-topology scheduling preselection algorithm

                                  +

                                  GPU-topology scheduling preselection algorithm

                                  None

                                  +

                                  None

                                  - plugins:
                                  +
                                  - plugins:
                                     - name: 'cce-gpu-topology-predicate'
                                     - name: 'cce-gpu-topology-priority'
                                     - name: 'cce-gpu'

                                  cce-gpu-topology-priority

                                  +

                                  cce-gpu-topology-priority

                                  GPU-topology scheduling priority algorithm

                                  +

                                  GPU-topology scheduling priority algorithm

                                  None

                                  +

                                  None

                                  - plugins:
                                  +
                                  - plugins:
                                     - name: 'cce-gpu-topology-predicate'
                                     - name: 'cce-gpu-topology-priority'
                                     - name: 'cce-gpu'

                                  cce-gpu

                                  +

                                  cce-gpu

                                  GPU resource allocation that supports decimal GPU configurations by working with the gpu add-on.

                                  +

                                  GPU resource allocation that supports decimal GPU configurations by working with the gpu add-on.

                                  None

                                  +

                                  None

                                  - plugins:
                                  +
                                  - plugins:
                                     - name: 'cce-gpu-topology-predicate'
                                     - name: 'cce-gpu-topology-priority'
                                     - name: 'cce-gpu'

                                  numa-aware

                                  +

                                  numa-aware

                                  NUMA affinity scheduling.

                                  +

                                  NUMA affinity scheduling.

                                  arguments:

                                  +

                                  arguments:

                                  • weight: weight of the numa-aware plugin
                                  - plugins:
                                  +
                                  - plugins:
                                     - name: 'nodelocalvolume'
                                     - name: 'nodeemptydirvolume'
                                     - name: 'nodeCSIscheduling'
                                  @@ -474,14 +476,14 @@ tolerations:
                                         weight: 10

                                  networkresource

                                  +

                                  networkresource

                                  The ENI requirement node can be preselected and filtered. The parameters are transferred by CCE and do not need to be manually configured.

                                  +

                                  The ENI requirement node can be preselected and filtered. The parameters are transferred by CCE and do not need to be manually configured.

                                  arguments:

                                  +

                                  arguments:

                                  • NetworkType: network type (eni or vpc-router)
                                  - plugins:
                                  +
                                  - plugins:
                                     - name: 'nodelocalvolume'
                                     - name: 'nodeemptydirvolume'
                                     - name: 'nodeCSIscheduling'
                                  @@ -490,39 +492,39 @@ tolerations:
                                         NetworkType: vpc-router

                                  nodelocalvolume

                                  +

                                  nodelocalvolume

                                  Filter out nodes that do not meet local volume requirements.

                                  +

                                  Filter out nodes that do not meet local volume requirements.

                                  None

                                  +

                                  None

                                  - plugins:
                                  +
                                  - plugins:
                                     - name: 'nodelocalvolume'
                                     - name: 'nodeemptydirvolume'
                                     - name: 'nodeCSIscheduling'
                                     - name: 'networkresource'

                                  nodeemptydirvolume

                                  +

                                  nodeemptydirvolume

                                  Filter out nodes that do not meet the emptyDir requirements.

                                  +

                                  Filter out nodes that do not meet the emptyDir requirements.

                                  None

                                  +

                                  None

                                  - plugins:
                                  +
                                  - plugins:
                                     - name: 'nodelocalvolume'
                                     - name: 'nodeemptydirvolume'
                                     - name: 'nodeCSIscheduling'
                                     - name: 'networkresource'

                                  nodeCSIscheduling

                                  +

                                  nodeCSIscheduling

                                  Filter out nodes with malfunctional Everest.

                                  +

                                  Filter out nodes with malfunctional Everest.

                                  None

                                  +

                                  None

                                  - plugins:
                                  +
                                  - plugins:
                                     - name: 'nodelocalvolume'
                                     - name: 'nodeemptydirvolume'
                                     - name: 'nodeCSIscheduling'
                                  @@ -557,7 +559,7 @@ tolerations:
                                   

                                  Using both taints and tolerations allows (not forcibly) the add-on Deployment to be scheduled to a node with the matching taints, and controls the Deployment eviction policies after the node where the Deployment is located is tainted.

                                  The add-on adds the default tolerance policy for the node.kubernetes.io/not-ready and node.kubernetes.io/unreachable taints, respectively. The tolerance time window is 60s.

                                  -

                                  For details, see Taints and Tolerations.

                                  +

                                  For details, see Configuring Tolerance Policies.

                                  +

                                  Change History

                                  It is a good practice to upgrade Volcano to the latest version that is supported by the cluster.

                                  +
                                  + +
                                  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                                  Table 9 Release history

                                  Add-on Version

                                  +

                                  Supported Cluster Version

                                  +

                                  New Feature

                                  +

                                  1.13.3

                                  +

                                  v1.21

                                  +

                                  v1.23

                                  +

                                  v1.25

                                  +

                                  v1.27

                                  +

                                  v1.28

                                  +

                                  v1.29

                                  +
                                  • Supported scale-in of customized resources based on node priorities.
                                  • Optimized the association between preemption and node scale-out.
                                  +

                                  1.12.1

                                  +

                                  v1.19.16

                                  +

                                  v1.21

                                  +

                                  v1.23

                                  +

                                  v1.25

                                  +

                                  v1.27

                                  +

                                  v1.28

                                  +

                                  Optimized application auto scaling performance.

                                  +

                                  1.11.21

                                  +

                                  v1.19.16

                                  +

                                  v1.21

                                  +

                                  v1.23

                                  +

                                  v1.25

                                  +

                                  v1.27

                                  +

                                  v1.28

                                  +
                                  • Supported Kubernetes 1.28.
                                  • Supported load-aware scheduling.
                                  • Updated image OS to HCE 2.0.
                                  • Optimized CSI resource preemption.
                                  • Optimized load-aware rescheduling.
                                  • Optimized preemption in hybrid deployment scenarios.
                                  +

                                  1.11.6

                                  +

                                  v1.19.16

                                  +

                                  v1.21

                                  +

                                  v1.23

                                  +

                                  v1.25

                                  +

                                  v1.27

                                  +
                                  • Supported Kubernetes 1.27.
                                  • Supported rescheduling.
                                  • Supported affinity scheduling of nodes in the node pool.
                                  • Optimized the scheduling performance.
                                  +

                                  1.9.1

                                  +

                                  v1.19.16

                                  +

                                  v1.21

                                  +

                                  v1.23

                                  +

                                  v1.25

                                  +
                                  • Fixes the issue that the counting pipeline pod of the networkresource add-on occupies supplementary network interfaces (Sub-ENI).
                                  • Fixes the issue where the binpack add-on scores nodes with insufficient resources.
                                  • Fixes the issue of processing resources in the pod with unknown end status.
                                  • Optimizes event output.
                                  • Supports HA deployment by default.
                                  +

                                  1.7.1

                                  +

                                  v1.19.16

                                  +

                                  v1.21

                                  +

                                  v1.23

                                  +

                                  v1.25

                                  +

                                  Adapts to clusters 1.25.

                                  +

                                  1.4.5

                                  +

                                  v1.17

                                  +

                                  v1.19

                                  +

                                  v1.21

                                  +

                                  Changes the deployment mode of volcano-scheduler from statefulset to deployment, and fixes the issue that pods cannot be automatically migrated when the node is abnormal.

                                  +

                                  1.3.7

                                  +

                                  v1.15

                                  +

                                  v1.17

                                  +

                                  v1.19

                                  +

                                  v1.21

                                  +
                                  • Supports hybrid deployment of online and offline jobs and resource oversubscription.
                                  • Optimizes the scheduling throughput for clusters.
                                  • Fixes the issue where the scheduler panics in certain scenarios.
                                  • Fixes the issue that the volumes.secret verification of the volcano job in the CCE clusters 1.15 fails.
                                  • Fixes the issue that jobs fail to be scheduled when volumes are mounted.
                                  +
                                  +
                                  +
                                  diff --git a/docs/cce/umn/cce_10_0196.html b/docs/cce/umn/cce_10_0196.html index 083de630..97919528 100644 --- a/docs/cce/umn/cce_10_0196.html +++ b/docs/cce/umn/cce_10_0196.html @@ -1,6 +1,6 @@ -

                                  Binding a Subnet and Security Group to a Namespace or Workload

                                  +

                                  Binding a Subnet and Security Group to a Namespace or Workload Using a Container Network Configuration

                                  Scenario

                                  In a CCE Turbo cluster, you can configure subnets and security groups for containers by namespace or workload using NetworkAttachmentDefinition CRDs. If you want to configure a specified container subnet and security group for a specified namespace or workload, create a container network configuration and associate it with the target namespace or workload. In this way, service subnets can be planned or services can be securely isolated.

                                  The following table lists the resources that a container network configuration can be associated with.
                                  - - @@ -38,24 +38,22 @@
                                  Table 1 Associated resources

                                  Category

                                  @@ -28,9 +28,9 @@

                                  Available only in CCE Turbo clusters of 1.23.11-r0, 1.25.6-r0, 1.27.3-r0, 1.28.1-r0, or later.

                                  Constraints

                                  +

                                  Notes and Constraints

                                  The namespaces associated with different container network configurations must be unique.

                                  +

                                  A namespace can be associated with only one container network configuration.

                                  Only the custom container network configurations that are not associated with any namespace can be specified.

                                  -
                                  -
                                  -

                                  Constraints

                                  • Only the default container network configuration default-network supports container ENI prebinding. The speed of creating pods using a custom container network configuration is slower than that of creating pods using default-network. Therefore, this function is not suitable for ultra-fast pod scaling.
                                  • default-network cannot be deleted.
                                  • If a workload with a fixed IP address needs to be associated with a new container network configuration, the fixed IP address will be invalid when pods are rebuilt. In this case, delete the workload, release the fixed IP address, and create a workload again.
                                  • Before deleting a custom container network configuration, delete the pods (with the cni.yangtse.io/network-status annotation) created using the configuration in the target namespace. For details, see Deleting a Container Network Configuration.
                                  -
                                  -

                                  Using the CCE Console to Create a Container Network Configuration of the Namespace Type

                                  1. Log in to the CCE console.
                                  2. Click the cluster name to access the cluster console. Choose Settings in the navigation pane and click the Network tab.

                                    If default-network is available in the cluster, it takes effect on all pods where no custom container network configuration has been configured. The default container subnet in the network settings on the Overview page is the container subnet in default-network.

                                    +
                                    • The priorities (in descending order) of the container network configurations used by a pod are as follows: Container network configuration directly associated with the pod > Container network configuration associated with the pod namespace > Default container network configuration of the cluster (default-network)
                                    • If default-network is available in a cluster, it takes effect on all pods where no custom container network configuration has been configured. The default container subnet in the network settings on the Overview page is the container subnet in default-network. default-network cannot be deleted.
                                    -

                                  3. View the Container Network Security Policy Configuration (Namespace Level) and click Add. In the window that is displayed, configure parameters such as the pod subnet and security group.

                                    • Name: Enter a name that contains a maximum of 253 characters. Do not use default-network, default, mgnt0, or mgnt1.
                                    • Associated Resource Type: resource type associated with the custom container network configuration. For details, see Table 1. To create a container network configuration of the namespace type, select Namespace.
                                    • Namespace: Select the namespace to be associated. The namespaces associated with different container network configurations must be unique. If no namespace is available, click Create Namespace to create one.
                                    • Pod Subnet: Select a subnet. If no subnet is available, click Create Subnet to create one. After the subnet is created, click the refresh button. A maximum of 20 subnets can be selected.
                                    • Associate Security Group: The default value is the container ENI security group. You can also click Create Security Group to create one. After the security group is created, click the refresh button. A maximum of five security groups can be selected.
                                    +
                                  +
                                  +

                                  Notes and Constraints

                                  • Only the default container network configuration supports dynamic pre-binding of container NICs. When the quota of node NICs is used up, the pod that uses the custom container network configuration attempts to unbind the pre-bound NIC of the default container network configuration, leading to slower pod startup. Therefore, if you need to frequently use the custom container network configuration, disable dynamic pre-binding of global container NICs in the target cluster. If you require high-speed pod elasticity using the default container network configuration, properly plan dynamic pre-binding of container NICs in the target node pool based on scheduling.
                                  • If a workload with a fixed IP address needs to be associated with a new container network configuration, the fixed IP address will be invalid when pods are rebuilt. In this case, delete the workload, release the fixed IP address, and create a workload again.
                                  • Before deleting a custom container network configuration, delete the pods (with the cni.yangtse.io/network-status annotation) created using the configuration in the target namespace. For details, see Deleting a Container Network Configuration.
                                  +
                                  +

                                  Using the CCE Console to Create a Container Network Configuration of the Namespace Type

                                  1. Log in to the CCE console.
                                  2. Click the cluster name to access the cluster console. Choose Settings in the navigation pane. In the right pane, click the Network tab.
                                  3. View the Container Network Security Policy Configuration (Namespace Level) and click Add. In the window that is displayed, configure parameters such as the pod subnet and security group.

                                    • Name: Enter a name that contains a maximum of 253 characters. Do not use default-network, default, mgnt0, or mgnt1.
                                    • Associated Resource Type: resource type associated with the custom container network configuration. For details, see Table 1. To create a container network configuration of the namespace type, select Namespace.
                                    • Namespace: Select the namespace to be associated. The namespaces associated with different container network configurations must be unique. If no namespace is available, click Create Namespace to create one.
                                    • Pod Subnet: Select a subnet. If no subnet is available, click Create Subnet to create one. After the subnet is created, click the refresh button. A maximum of 20 subnets can be selected.
                                    • Associate Security Group: The default value is the container ENI security group. You can also click Create Security Group to create one. After the security group is created, click the refresh button. A maximum of five security groups can be selected.

                                  4. Click OK. After the creation, you will be redirected to the custom container network configuration list, where the new container network configuration is included.
                                  -

                                  Using the CCE Console to Create a Container Network Configuration of the Workload Type

                                  1. Log in to the CCE console.
                                  2. Click the cluster name to access the cluster console. Choose Settings in the navigation pane and click the Network tab.

                                    If default-network is available in the cluster, it takes effect on all pods where no custom container network configuration has been configured. The default container subnet in the network settings on the Overview page is the container subnet in default-network.

                                    -
                                    -

                                  3. View the Container Network Security Policy Configuration (Namespace Level) and click Add. In the window that is displayed, configure parameters such as the pod subnet and security group.

                                    • Name: Enter a name that contains a maximum of 253 characters. Do not use default-network, default, mgnt0, or mgnt1.
                                    • Associated Resource Type: resource type associated with the custom container network configuration. For details, see Table 1. To create a container network configuration of the workload type, select Workload.
                                    • Pod Subnet: Select a subnet. If no subnet is available, click Create Subnet to create one. After the subnet is created, click the refresh button. A maximum of 20 subnets can be selected.
                                    • Associate Security Group: The default value is the container ENI security group. You can also click Create Security Group to create one. After the security group is created, click the refresh button. A maximum of five security groups can be selected.
                                    +

                                    Using the CCE Console to Create a Container Network Configuration of the Workload Type

                                    1. Log in to the CCE console.
                                    2. Click the cluster name to access the cluster console. Choose Settings in the navigation pane. In the right pane, click the Network tab.
                                    3. View the Container Network Security Policy Configuration (Namespace Level) and click Add. In the window that is displayed, configure parameters such as the pod subnet and security group.

                                      • Name: Enter a name that contains a maximum of 253 characters. Do not use default-network, default, mgnt0, or mgnt1.
                                      • Associated Resource Type: resource type associated with the custom container network configuration. For details, see Table 1. To create a container network configuration of the workload type, select Workload.
                                      • Pod Subnet: Select a subnet. If no subnet is available, click Create Subnet to create one. After the subnet is created, click the refresh button. A maximum of 20 subnets can be selected.
                                      • Associate Security Group: The default value is the container ENI security group. You can also click Create Security Group to create one. After the security group is created, click the refresh button. A maximum of five security groups can be selected.

                                    4. Click OK. After the creation, you will be redirected to the custom container network configuration list, where the new container network configuration is included.
                                    5. When creating a workload, you can select a custom container network configuration.

                                      1. In the navigation pane, choose Workloads. In the right pane, click the Deployments tab.
                                      2. Click Create Workload in the upper right corner of the page. In the Advanced Settings area, choose Network Configuration and determine whether to enable a specified container network configuration.
                                      3. Select an existing container network configuration. If no configuration is available, click Add to create one.
                                      4. After the configuration, click Create Workload.

                                        Return to the Settings page. In the container network configuration list, the name of the resource associated with the created container network configuration is displayed.

                                    Using Kubectl to Create a Container Network Configuration of the Namespace Type

                                    This section describes how to use kubectl to create a container network configuration of the namespace type.

                                    -
                                    1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                    2. Modify the networkattachment-test.yaml file.

                                      vi networkattachment-test.yaml

                                      +
                                      1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                                      2. Modify the networkattachment-test.yaml file.

                                        vi networkattachment-test.yaml

                                        apiVersion: k8s.cni.cncf.io/v1
                                         kind: NetworkAttachmentDefinition
                                         metadata:
                                        @@ -64,8 +62,8 @@ metadata:
                                           name: example
                                           namespace: kube-system
                                         spec:
                                        -  config: 
                                        -  '{
                                        +  config: |
                                        +   {
                                             "type":"eni-neutron",
                                             "args":{
                                               "securityGroups":"41891**",
                                        @@ -82,7 +80,7 @@ spec:
                                                 }
                                               }
                                             }
                                        -  }'
                                        + }
                                        @@ -264,26 +262,27 @@ spec:

                                        Using Kubectl to Create a Container Network Configuration of the Workload Type

                                        This section describes how to use kubectl to create a container network configuration of the workload type.

                                        -
                                        1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                        2. Modify the networkattachment-test.yaml file.

                                          vi networkattachment-test.yaml

                                          +
                                          1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                                          2. Modify the networkattachment-test.yaml file.

                                            vi networkattachment-test.yaml

                                            apiVersion: k8s.cni.cncf.io/v1
                                             kind: NetworkAttachmentDefinition
                                             metadata:
                                               annotations:
                                            -    yangtse.io/project-id: 05e38**
                                            +    yangtse.io/project-id: 80d5a**
                                               name: example
                                               namespace: kube-system
                                             spec:
                                            -  config: 
                                            -  '{
                                            +  config: |
                                            +   {
                                                 "type":"eni-neutron",
                                                 "args":{
                                            -      "securityGroups":"41891**",
                                            +      "securityGroups":"f4983**",
                                                   "subnets":[
                                                     {
                                            -          "subnetID":"27d95**"
                                            +          "subnetID":"5594b**"
                                                     }
                                                   ]
                                            -    }'
                                            + } + }
                                        Table 2 Key parameters

                                        Parameter

                                        @@ -301,7 +300,7 @@ spec: - - - - - @@ -460,7 +459,7 @@ spec:

                                        Deleting a Container Network Configuration

                                        You can delete the new container network configuration or view its YAML file.

                                        Before deleting a container network configuration, delete all pods using the configuration. Otherwise, the deletion will fail.

                                        -
                                        1. Run the following command to filter the pods that uses the configuration in the cluster (example is used as an example):
                                          kubectl get po -A -o=jsonpath="{.items[?(@.metadata.annotations.cni\.yangtse\.io/network-status=='[{\"name\":\"example\"}]')]['metadata.namespace', 'metadata.name']}"
                                          +
                                          1. Run the following command to filter the pods that uses the configuration in the cluster (example is used as an example):
                                            kubectl get pod -A -o=jsonpath="{.items[?(@.metadata.annotations.cni\.yangtse\.io/network-status=='[{\"name\":\"example\"}]')]['metadata.namespace', 'metadata.name']}"

                                            The command output contains the pod name and namespace associated with the configuration.

                                          2. Delete the owner of the pod. The owner may be a Deployment, StatefulSet, DaemonSet, or Job.
                                        diff --git a/docs/cce/umn/cce_10_0197.html b/docs/cce/umn/cce_10_0197.html index 7bef9f3f..612c3682 100644 --- a/docs/cce/umn/cce_10_0197.html +++ b/docs/cce/umn/cce_10_0197.html @@ -1,9 +1,9 @@ -

                                        Upgrade Overview

                                        +

                                        Process and Method of Upgrading a Cluster

                                        CCE strictly complies with community consistency authentication. It releases three Kubernetes versions each year and offers a maintenance period of at least 24 months after each version is released. CCE ensures the stable running of Kubernetes versions during the maintenance period.

                                        To ensure your service rights and benefits, upgrade your Kubernetes clusters before a maintenance period ends. You can check the Kubernetes version of your cluster on the cluster list page and check whether a new version is available. Proactive cluster upgrades help you:

                                        -
                                        1. Reduce security and stability risks: During the iteration of Kubernetes versions, known security and stability vulnerabilities are continuously fixed. Long-term use of EOS clusters will result in security and stability risks to services.
                                        2. Experience the latest functions: During the iteration of Kubernetes versions, new functions and optimizations are continuously released. For details about the features of the latest version, see Release Notes for CCE Cluster Versions.
                                        3. Minimize compatibility risks: During the iteration of Kubernetes versions, APIs are continuously modified and functions are deprecated. If a cluster has not been upgraded for a long time, more O&M assurance investment will be required when the cluster is upgraded. Periodic upgrades can effectively mitigate compatibility risks caused by accumulated version differences. It is a good practice to upgrade a patch version every quarter and upgrade a major version to the latest version every year.
                                        4. Obtain more effective technical support: CCE does not provide security patches or issue fixing for EOS Kubernetes cluster versions, and does not ensure technical support for the EOS versions.
                                        +
                                        • Reduce security and stability risks: During the iteration of Kubernetes versions, known security and stability vulnerabilities are continuously fixed. Long-term use of EOS clusters will result in security and stability risks to services.
                                        • Experience the latest functions: During the iteration of Kubernetes versions, new functions and optimizations are continuously released. For details about the features of the latest version, see Release Notes for CCE Cluster Versions.
                                        • Minimize compatibility risks: During the iteration of Kubernetes versions, APIs are continuously modified and functions are deprecated. If a cluster has not been upgraded for a long time, more O&M assurance investment will be required when the cluster is upgraded. Periodic upgrades can effectively mitigate compatibility risks caused by accumulated version differences. It is a good practice to upgrade a patch version every quarter and upgrade a major version to the latest version every year.
                                        • Obtain more effective technical support: CCE does not provide security patches or issue fixing for EOS Kubernetes cluster versions, and does not ensure technical support for the EOS versions.

                                        Cluster Upgrade Path

                                        CCE clusters evolve iteratively based on the community Kubernetes version. A CCE cluster version consists of the community Kubernetes version and the CCE patch version. Therefore, two cluster upgrade paths are provided.

                                        • Upgrading a Kubernetes version
                                        Table 6 Key parameters

                                        Parameter

                                        String

                                        API version. The value is fixed at k8s.cni.cncf.io/v1.

                                        +

                                        API version. The value is fixed at k8s.cni.cncf.io/v1.

                                        kind

                                        @@ -310,7 +309,7 @@ spec:

                                        String

                                        Type of the object to be created. The value is fixed at NetworkAttachmentDefinition.

                                        +

                                        Type of the object to be created. The value is fixed at NetworkAttachmentDefinition.

                                        yangtse.io/project-id

                                        @@ -337,7 +336,7 @@ spec:

                                        String

                                        Namespace of the configuration resource. The value is fixed to kube-system.

                                        +

                                        Namespace of the configuration resource. The value is fixed to kube-system.

                                        config

                                        @@ -369,7 +368,7 @@ spec:

                                        String

                                        The value is fixed at eni-neutron.

                                        +

                                        The value is fixed at eni-neutron.

                                        args

                                        @@ -402,9 +401,9 @@ spec:

                                        String

                                        Security group ID. If no security group is planned, select the same security group as that in default-network.

                                        +

                                        Security group ID. If no security group is planned, ensure that the security group is the same as that in default-network.

                                        How to obtain:

                                        -

                                        Log in to the VPC console. In the navigation pane, choose Access Control > Security Groups. Click the target security group name and copy the ID on the Summary tab page.

                                        +

                                        Log in to the VPC console. In the navigation pane, choose Access Control > Security Groups. Click the target security group name and copy the ID on the Summary tab page.

                                        subnets

                                        @@ -417,7 +416,7 @@ spec:
                                        [{"subnetID":"27d95**"},{"subnetID":"827bb**"},{"subnetID":"bdd6b**"}]

                                        Subnet ID not used by the cluster in the same VPC.

                                        How to obtain:

                                        -

                                        Log in to the VPC console. In the navigation pane, choose Virtual Private Cloud > Subnets. Click the target subnet name and copy the Subnet ID on the Summary tab page.

                                        +

                                        Log in to the VPC console. In the navigation pane, choose Virtual Private Cloud > Subnets. Click the target subnet name and copy the Subnet ID on the Summary tab page.

                                        - - @@ -52,17 +52,22 @@ + + +

                                        Source Kubernetes Version

                                        @@ -37,12 +37,12 @@

                                        v1.23 or v1.25

                                        v1.23

                                        +

                                        v1.23

                                        v1.25 or v1.27

                                        v1.25

                                        +

                                        v1.25

                                        v1.27

                                        v1.28

                                        v1.28

                                        +

                                        1.29

                                        +
                                        -
                                        • A version that has been end of maintenance cannot be directly upgraded to the latest version. You need to upgrade such a version for multiple times, for example, from v1.15 to v1.19, v1.23, and then to v1.27/v1.28.
                                        • A Kubernetes version can be upgraded only after the patch is upgraded to the latest version. CCE will automatically generate an optimal upgrade path on the console based on the current cluster version.
                                        +
                                        • A version that has been end of maintenance cannot be directly upgraded to the latest version. You need to upgrade such a version for multiple times, for example, from v1.15 to v1.19, v1.23, and then to v1.27 or v1.28.
                                        • A Kubernetes version can be upgraded only after the patch is upgraded to the latest version. CCE will automatically generate an optimal upgrade path on the console based on the current cluster version.
                                      3. Upgrading a patch version

                                        Patch version management is available for CCE clusters of v1.19 or later to provide new features and fix bugs and vulnerability for in-maintenance clusters without requiring a major version upgrade.

                                        After a new patch version is released, you can directly upgrade any patch version to the latest patch version. For details about the release history of patch versions, see Patch Version Release Notes.

                                    Cluster Upgrade Process

                                    The cluster upgrade process involves pre-upgrade check, backup, upgrade, and post-upgrade verification.

                                    -
                                    Figure 1 Process of upgrading a cluster
                                    +
                                    Figure 1 Process of upgrading a cluster

                                    After determining the target version of the cluster, read the precautions carefully and prevent function incompatibility during the upgrade.

                                    1. Pre-upgrade check

                                      Before a cluster upgrade, CCE checks mandatory items such as the cluster status, add-ons, and nodes to ensure that the cluster meets the upgrade requirements. For more details, see Pre-upgrade Check. If any check item is abnormal, rectify the fault as prompted on the console.

                                    2. Backup

                                      You can use disk snapshots to back up master node data, including CCE component images, component configurations, and etcd data. Back up data before an upgrade. If unexpected cases occur during an upgrade, you can use the backup to quickly restore the cluster.

                                      @@ -73,9 +78,9 @@

                                  Backup Mode

                                  Backup Time

                                  +

                                  Backup Duration

                                  Rollback Time

                                  +

                                  Rollback Duration

                                  Description

                                  Master node disks, including component images, configurations, logs, and etcd data

                                  One-click backup on web pages (manually triggered)

                                  +

                                  One-click backup on a web page (manually triggered)

                                  20 minutes to 2 hours (based on the cloud backup tasks in the current region)

                                8. Configuration and upgrade

                                  Configure parameters before an upgrade. CCE has provided default settings, which can be modified as needed. After the configuration, upgrade add-ons, master nodes, and worker nodes in sequence.

                                  -
                                  • Add-on Upgrade Configuration: Add-ons that have been installed in your cluster are listed. During the cluster upgrade, CCE automatically upgrades the selected add-ons to be compatible with the target cluster version. You can click Set to re-define the add-on parameters.

                                    If an add-on is marked with on its right side, the add-on cannot be compatible with both the source and target versions of the cluster upgrade. In this case, CCE will upgrade the add-on after the cluster upgrade. The add-on may be unavailable during the cluster upgrade.

                                    +
                                    • Add-on Upgrade Configuration: Add-ons that have been installed in your cluster are listed. During the cluster upgrade, CCE automatically upgrades the selected add-ons to be compatible with the target cluster version. You can click Set to re-define the add-on parameters.

                                      If an add-on is marked with on its right side, the add-on cannot be compatible with both the source and target versions of the cluster upgrade. In this case, CCE will upgrade the add-on after the cluster upgrade. The add-on may be unavailable during the cluster upgrade.

                                    • Node Upgrade Configuration
                                      • Max. Nodes for Batch Upgrade: You can configure the maximum number of nodes to be upgraded in a batch.

                                        Node pools will be upgraded in sequence. Nodes in node pools will be upgraded in batches. One node is upgraded in the first batch, two nodes in the second batch, and the number of nodes to be upgraded in each subsequent batch increases by a power of 2 until the maximum number of nodes to be upgraded in each batch is reached. The next cluster is upgraded after the previous one is upgraded. By default, 20 nodes are upgraded in a batch, and the number can be increased to the maximum of 60.

                                      • Node Priority: You can customize node upgrade priorities. If the priorities are not specified, CCE will perform the upgrade based on the priorities generated by the default policy.
                                        • Add Upgrade Priority: You can custom the priorities for upgrading node pools. If the priorities are not specified, CCE will preferentially upgrade the node pool with the least number of nodes based on the default policy.
                                        • Add Node Priority: You can custom the priorities for upgrading nodes in a node pool. If the priorities are not specified, CCE will preferentially upgrade the node with lightest load (calculated based on the number of pods, resource request rate, and number of PVs) based on the default policy.
                                        diff --git a/docs/cce/umn/cce_10_0198.html b/docs/cce/umn/cce_10_0198.html index d330f269..fe55c02d 100644 --- a/docs/cce/umn/cce_10_0198.html +++ b/docs/cce/umn/cce_10_0198.html @@ -2,45 +2,45 @@

                                        Accepting Nodes for Management

                                        Scenario

                                        In CCE, you can create a node (Creating a Node) or add existing nodes (ECSs) to your cluster for management.

                                        -
                                        • While an ECS is being accepted into a cluster, the operating system of the ECS will be reset to the standard OS image provided by CCE to ensure node stability. The CCE console prompts you to select the operating system and the login mode during the reset.
                                        • LVM information, including volume groups (VGs), logical volumes (LVs), and physical volumes (PVs), will be deleted from the system disks and data disks attached to the selected ECSs during management. Ensure that the information has been backed up.
                                        • While an ECS is being accepted into a cluster, do not perform any operation on the ECS through the ECS console.
                                        +
                                        • When accepting an ECS, you can reset the ECS OS to a standard public image offered by CCE. If you choose to do so, you need to reset the password or key pair, and the previous password or key pair will become invalid.
                                        • LVM information, including volume groups (VGs), logical volumes (LVs), and physical volumes (PVs), will be deleted from the system disks and data disks attached to the selected ECSs during acceptance. Ensure that the information has been backed up.
                                        • During the acceptance of an ECS, do not perform any operation on the ECS through the ECS console.
                                        -

                                        Constraints

                                        • The cluster version must be 1.15 or later.
                                        • If IPv6 is enabled for a cluster, only nodes in a subnet with IPv6 enabled can be accepted and managed. If IPv6 is not enabled for the cluster, only nodes in a subnet without IPv6 enabled can be accepted.
                                        • If a password or key has been set when the original VM node was created, reset the password or key during management. The original password or key will become invalid.
                                        • Nodes in a CCE Turbo cluster must support sub-ENIs or be bound to at least 16 ENIs. For details about the node flavors, see the node flavors that can be selected on the console when you create a node.
                                        • Data disks that have been partitioned will be ignored during node management. Ensure that there is at least one unpartitioned data disk meeting the specifications is attached to the node.
                                        +

                                        Notes and Constraints

                                        • ECSs and BMSs can be managed.
                                        -

                                        Prerequisites

                                        A cloud server that meets the following conditions can be accepted:

                                        -
                                        • The node to be accepted must be in the Running state and not used by other clusters. In addition, the node to be accepted does not carry the CCE-Dynamic-Provisioning-Node tag.
                                        • The node to be accepted and the cluster must be in the same VPC. (If the cluster version is earlier than v1.13.10, the node to be accepted and the CCE cluster must be in the same subnet.)
                                        • Data disks must be attached to the nodes to be managed. A local disk (disk-intensive disk) or a data disk of at least 20 GiB can be attached to the node, and any data disks already attached cannot be smaller than 10 GiB.
                                        • The node to be accepted has 2-core or higher CPU, 4 GiB or larger memory, and only one NIC.
                                        • Only cloud servers with the same specifications, AZ, and data disk configuration can be added in batches.
                                        +

                                        Prerequisites

                                        The cloud servers to be managed must meet the following requirements:

                                        +
                                        • The node to be accepted must be in the Running state and not used by other clusters. In addition, the node to be accepted does not carry the CCE-Dynamic-Provisioning-Node tag.
                                        • The node to be accepted and the cluster must be in the same VPC. (If the cluster version is earlier than v1.13.10, the node to be accepted and the CCE cluster must be in the same subnet.)
                                        • Data disks must be attached to the nodes to be managed. A local disk (disk-intensive disk) or a data disk of at least 20 GiB can be attached to the node, and any data disks already attached cannot be smaller than 10 GiB.
                                        • The node to be accepted has 2-core or higher CPU, 4 GiB or larger memory, and only one NIC.
                                        • Only cloud servers with the same data disk configurations can be added in batches.
                                        • If IPv6 is enabled for a cluster, only nodes in a subnet with IPv6 enabled can be accepted and managed. If IPv6 is not enabled for the cluster, only nodes in a subnet without IPv6 enabled can be accepted.
                                        • Nodes in a CCE Turbo cluster must support sub-ENIs or be bound to at least 16 ENIs. For details about the node flavors, see the node flavors that can be selected on the console when you create a node.
                                        • Data disks that have been partitioned will be ignored during node management. Ensure that there is at least one unpartitioned data disk meeting the specifications is attached to the node.

                                        Procedure

                                        1. Log in to the CCE console and go to the cluster where the node to be accepted resides.
                                        2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab and then Accept Node in the upper right corner.
                                        3. Specify node parameters.

                                          Configurations

                                          -
                                          Table 1 Node configuration parameters

                                          Parameter

                                          +
                                          - - - - - - - - - @@ -50,7 +50,7 @@

                                          Storage Settings

                                          Configure storage resources on a node for the containers running on it. -
                                          Table 1 Node configuration parameters

                                          Parameter

                                          Description

                                          +

                                          Description

                                          Specifications

                                          +

                                          Specifications

                                          Click Select Cloud Server and select the servers to be accepted.

                                          +

                                          Click Select Cloud Server and select the servers to be accepted.

                                          You can select multiple cloud servers for batch management. However, only the cloud servers with the same specifications, AZ, and data disk configuration can be added in batches.

                                          If a cloud server contains multiple data disks, select one of them for the container runtime and kubelet.

                                          Container Engine

                                          +

                                          Container Engine

                                          The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

                                          +

                                          The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

                                          OS

                                          +

                                          OS

                                          Select an OS type. Different types of nodes support different OSs.
                                          • Public image: Select a public image for the node.
                                          • Private image: Select a private image for the node.
                                          +
                                          Select an OS type. Different types of nodes support different OSs.
                                          • Public image: Select a public image for the node.
                                          • Private image: Select a private image for the node.
                                          NOTE:

                                          Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.

                                          Login Mode

                                          +

                                          Login Mode

                                          • Key Pair

                                            Select the key pair used to log in to the node. You can select a shared key.

                                            +
                                          • Key Pair

                                            Select the key pair used to log in to the node. You can select a shared key.

                                            A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create Key Pair.

                                          Table 2 Configuration parameters

                                          Parameter

                                          +
                                          @@ -82,20 +82,20 @@ - - - - diff --git a/docs/cce/umn/cce_10_0201.html b/docs/cce/umn/cce_10_0201.html index 308966f6..02be89b5 100644 --- a/docs/cce/umn/cce_10_0201.html +++ b/docs/cce/umn/cce_10_0201.html @@ -2,7 +2,7 @@

                                          Monitoring Custom Metrics on AOM

                                          CCE allows you to upload custom metrics to AOM. ICAgent on a node periodically calls the metric monitoring API configured on a workload to read monitoring data and then uploads the data to AOM.

                                          -
                                          Figure 1 Using ICAgent to collect monitoring metrics
                                          +
                                          Figure 1 Using ICAgent to collect monitoring metrics

                                          The custom metric API of a workload can be configured when the workload is created. The following procedure uses an Nginx application as an example to describe how to report custom metrics to AOM.

                                          1. Preparing an Application

                                            Prepare an application image. The application must provide a metric monitoring API for ICAgent to collect data, and the monitoring data must comply with the Prometheus specifications.

                                          2. Deploying Applications and Converting Nginx Metrics

                                            Use the application image to deploy a workload in a cluster. Custom metrics are automatically reported.

                                            @@ -58,7 +58,7 @@ ADD nginx.conf /etc/nginx/nginx.conf EXPOSE 80 CMD ["nginx", "-g", "daemon off;"]
                                          -

                                        4. Use this Dockerfile to build an image and upload it to SWR. The image name is nginx:exporter.

                                          1. In the navigation pane, choose My Images. In the upper right corner, click Upload Through Client. On the displayed dialog box, click Generate a temporary login command and click to copy the command.
                                          2. Run the login command copied in the previous step on the node. If the login is successful, the message "Login Succeeded" is displayed.
                                          3. Run the following command to build an image named nginx. The image version is exporter.
                                            docker build -t nginx:exporter .
                                            +

                                          4. Use this Dockerfile to build an image and upload it to SWR. The image name is nginx:exporter.

                                            1. In the navigation pane, choose My Images. In the upper right corner, click Upload Through Client. On the displayed dialog box, click Generate a temporary login command and click to copy the command.
                                            2. Run the login command copied in the previous step on the node. If the login is successful, the message "Login Succeeded" is displayed.
                                            3. Run the following command to build an image named nginx. The image version is exporter.
                                              docker build -t nginx:exporter .
                                            4. Tag the image and upload it to the image repository. Change the image repository address and organization name based on your requirements.
                                              docker tag nginx:exporter {swr-address}/{group}/nginx:exporter
                                               docker push {swr-address}/{group}/nginx:exporter
                                            @@ -71,7 +71,7 @@ Reading: 0 Writing: 1 Waiting: 2

                                          Deploying Applications and Converting Nginx Metrics

                                          The format of the monitoring data provided by nginx:exporter does not meet the requirements of Prometheus. Convert the data format to the format required by Prometheus. To convert the format of Nginx metrics, use nginx-prometheus-exporter, as shown in the following figure.

                                          -
                                          Figure 2 Using exporter to convert the data format
                                          +
                                          Figure 2 Using exporter to convert the data format

                                          Deploy nginx:exporter and nginx-prometheus-exporter in the same pod.

                                          kind: Deployment
                                           apiVersion: apps/v1
                                          diff --git a/docs/cce/umn/cce_10_0205.html b/docs/cce/umn/cce_10_0205.html
                                          index 2ced25f5..bb876cc8 100644
                                          --- a/docs/cce/umn/cce_10_0205.html
                                          +++ b/docs/cce/umn/cce_10_0205.html
                                          @@ -1,12 +1,12 @@
                                           
                                           
                                           

                                          Kubernetes Metrics Server

                                          -

                                          From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly accessed by users (for example, by using the kubectl top command) or used by controllers (for example, Horizontal Pod Autoscaler) in a cluster for decision-making. The specific component is metrics-server, which is used to substitute for heapster for providing the similar functions. heapster has been gradually abandoned since v1.11.

                                          +

                                          From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly accessed by users (for example, by using the kubectl top command) or used by controllers (for example, Horizontal Pod Autoscaler) in a cluster for decision-making. The specific component is metrics-server, which is used to substitute for Heapster for providing the similar functions. Heapster has been gradually abandoned since v1.11.

                                          metrics-server is an aggregator for monitoring data of core cluster resources. You can quickly install this add-on on the CCE console.

                                          -

                                          After installing this add-on, you can create HPA policies. For details, see HPA Policies.

                                          +

                                          After installing this add-on, you can create HPA policies. For details, see Creating an HPA Policy.

                                          The official community project and documentation are available at https://github.com/kubernetes-sigs/metrics-server.

                                          Installing the Add-on

                                          1. Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane, locate Kubernetes Metrics Server on the right, and click Install.
                                          2. On the Install Add-on page, configure the specifications.

                                            -

                                        5. Table 2 Storage configuration parameters

                                          Parameter

                                          Description

                                          Resource Tag

                                          You can add resource tags to classify resources.

                                          -

                                          You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

                                          -

                                          CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

                                          +

                                          You can add resource tags to classify resources. A maximum of eight resource tags can be added.

                                          +

                                          You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

                                          +

                                          CCE will automatically create the "CCE-Dynamic-Provisioning-Node=Node ID" tag.

                                          Kubernetes Label

                                          Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

                                          -

                                          Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

                                          +

                                          Labels can be used to distinguish nodes. With workload affinity settings, pods can be scheduled to a specified node. For more information, see Labels and Selectors.

                                          Taint

                                          This parameter is left blank by default. You can add taints to configure node anti-affinity. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
                                          • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
                                          • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
                                          • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
                                          +
                                          This parameter is left blank by default. You can add taints to configure anti-affinity for the node. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
                                          • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
                                          • Value: A value must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed.
                                          • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
                                          NOTICE:
                                          • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.
                                          • After a node pool is created, you can click Edit to modify its configuration. The modification will be synchronized to all nodes in the node pool.
                                          @@ -109,13 +109,13 @@

                                          Pre-installation Command

                                          Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

                                          +

                                          Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded. The characters of both the pre-installation and post-installation scripts are centrally calculated, and the total number of characters after transcoding cannot exceed 10240.

                                          The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

                                          Post-installation Command

                                          Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

                                          +

                                          Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded. The characters of both the pre-installation and post-installation scripts are centrally calculated, and the total number of characters after transcoding cannot exceed 10240.

                                          The script will be executed after Kubernetes software is installed, which does not affect the installation.

                                          Table 1 metrics-server configuration

                                          Parameter

                                          +
                                          @@ -21,6 +21,7 @@ @@ -84,10 +85,136 @@
                                          Table 1 Add-on configuration

                                          Parameter

                                          Description

                                          Number of pods that will be created to match the selected add-on specifications.

                                          If you select Custom, you can adjust the number of pods as required.

                                          +

                                          High availability is not possible with a single pod. If an error occurs on the node where the add-on instance runs, the add-on will fail.

                                          Containers

                                          @@ -56,7 +57,7 @@

                                          Using both taints and tolerations allows (not forcibly) the add-on Deployment to be scheduled to a node with the matching taints, and controls the Deployment eviction policies after the node where the Deployment is located is tainted.

                                          The add-on adds the default tolerance policy for the node.kubernetes.io/not-ready and node.kubernetes.io/unreachable taints, respectively. The tolerance time window is 60s.

                                          -

                                          For details, see Taints and Tolerations.

                                          +

                                          For details, see Configuring Tolerance Policies.

                                          +

                                          Change History

                                          +
                                          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                                          Table 4 Release history

                                          Add-on Version

                                          +

                                          Supported Cluster Version

                                          +

                                          New Feature

                                          +

                                          Community Version

                                          +

                                          1.3.60

                                          +

                                          v1.21

                                          +

                                          v1.23

                                          +

                                          v1.25

                                          +

                                          v1.27

                                          +

                                          v1.28

                                          +

                                          v1.29

                                          +

                                          CCE clusters 1.29 are supported.

                                          +

                                          0.6.2

                                          +

                                          1.3.39

                                          +

                                          v1.21

                                          +

                                          v1.23

                                          +

                                          v1.25

                                          +

                                          v1.27

                                          +

                                          v1.28

                                          +

                                          Fixed some issues.

                                          +

                                          0.6.2

                                          +

                                          1.3.37

                                          +

                                          v1.21

                                          +

                                          v1.23

                                          +

                                          v1.25

                                          +

                                          v1.27

                                          +

                                          v1.28

                                          +

                                          CCE clusters 1.28 are supported.

                                          +

                                          0.6.2

                                          +

                                          1.3.12

                                          +

                                          v1.19

                                          +

                                          v1.21

                                          +

                                          v1.23

                                          +

                                          v1.25

                                          +

                                          v1.27

                                          +

                                          None

                                          +

                                          0.6.2

                                          +

                                          1.3.6

                                          +

                                          v1.19

                                          +

                                          v1.21

                                          +

                                          v1.23

                                          +

                                          v1.25

                                          +
                                          • Supported anti-affinity scheduling of add-on pods on nodes in different AZs.
                                          • The default taint tolerance duration is changed to 60s.
                                          +

                                          0.6.2

                                          +

                                          1.3.2

                                          +

                                          v1.19

                                          +

                                          v1.21

                                          +

                                          v1.23

                                          +

                                          v1.25

                                          +

                                          CCE clusters 1.25 are supported.

                                          +

                                          0.6.2

                                          +

                                          1.2.1

                                          +

                                          v1.19

                                          +

                                          v1.21

                                          +

                                          v1.23

                                          +

                                          CCE clusters 1.23 are supported.

                                          +

                                          0.4.4

                                          +

                                          1.1.10

                                          +

                                          v1.15

                                          +

                                          v1.17

                                          +

                                          v1.19

                                          +

                                          v1.21

                                          +

                                          CCE clusters 1.21 are supported.

                                          +

                                          0.4.4

                                          +

                                          1.1.4

                                          +

                                          v1.15

                                          +

                                          v1.17

                                          +

                                          v1.19

                                          +

                                          Unified resource specification configuration unit.

                                          +

                                          0.4.4

                                          +
                                          +
                                          +
                                          diff --git a/docs/cce/umn/cce_10_0208.html b/docs/cce/umn/cce_10_0208.html index 53500e99..56a76ba2 100644 --- a/docs/cce/umn/cce_10_0208.html +++ b/docs/cce/umn/cce_10_0208.html @@ -1,14 +1,15 @@ -

                                          HPA Policies

                                          +

                                          Creating an HPA Policy

                                          Horizontal Pod Autoscaling (HPA) in Kubernetes implements horizontal scaling of pods. In a CCE HPA policy, you can configure different cooldown time windows and scaling thresholds for different applications based on the Kubernetes HPA.

                                          -

                                          Prerequisites

                                          To use HPA, install an add-on that provides metrics APIs. Select one of the following add-ons based on your cluster version and service requirements.
                                          • Kubernetes Metrics Server: provides basic resource usage metrics, such as container CPU and memory usage. It is supported by all cluster versions.
                                          -
                                          -
                                          -

                                          Constraints

                                          • HPA policies can be created only for clusters of v1.13 or later.
                                          • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

                                            For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volume mounted, a new pod cannot be started because EVS disks cannot be attached.

                                            +

                                            Prerequisites

                                            To use HPA, install an add-on that provides metrics APIs. Select one of the following add-ons based on your cluster version and service requirements.
                                            -

                                            Creating an HPA Policy

                                            1. Log in to the CCE console and click the cluster name to access the cluster console.
                                            2. Choose Workloads in the navigation pane. Locate the target workload and choose More > Auto Scaling in the Operation column.
                                            3. Set Policy Type to HPA+CronHPA, enable the created HPA policy, and configure parameters.

                                              This section describes only HPA policies. To enable CronHPA, see CronHPA Policies.

                                              +
                                            +

                                            Notes and Constraints

                                            • HPA policies can be created only for clusters of v1.13 or later.
                                            • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

                                              For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volumes mounted, a new pod cannot be started because EVS disks cannot be attached.

                                              +
                                            +
                                            +

                                            Procedure

                                            1. Log in to the CCE console and click the cluster name to access the cluster console.
                                            2. Choose Workloads in the navigation pane. Locate the target workload and choose More > Auto Scaling in the Operation column.
                                            3. Set Policy Type to HPA+CronHPA, enable the created HPA policy, and configure parameters.

                                              This section describes only HPA policies. To enable CronHPA, see Creating a Scheduled CronHPA Policy.

                                              @@ -50,6 +51,7 @@ -
                                              Table 1 HPA policy

                                              Parameter

                                              Custom Policy (supported only in clusters of v1.15 or later)

                                              NOTE:

                                              Before creating a custom policy, install an add-on that supports custom metric collection (for example, Prometheus) in the cluster. Ensure that the add-on can collect and report the custom metrics of the workloads.

                                              +

                                              For details, see Monitoring Custom Metrics Using Cloud Native Cluster Monitoring.

                                              • Metric Name: name of the custom metric. You can select a name as prompted.
                                              • Metric Source: Select an object type from the drop-down list. You can select Pod.
                                              • Desired Value: the average metric value of all pods. Number of pods to be scaled (rounded up) = (Current metric value/Desired value) x Number of current pods
                                                NOTE:

                                                When calculating the number of pods to be added or reduced, the HPA policy uses the maximum number of pods in the last 5 minutes.

                                                diff --git a/docs/cce/umn/cce_10_0209.html b/docs/cce/umn/cce_10_0209.html index 1189313c..fcf554a9 100644 --- a/docs/cce/umn/cce_10_0209.html +++ b/docs/cce/umn/cce_10_0209.html @@ -2,14 +2,14 @@

                                                Creating a Node Scaling Policy

                                                CCE provides auto scaling through the CCE Cluster Autoscaler add-on. Nodes with different flavors can be automatically added across AZs on demand.

                                                -

                                                If both a node scaling policy and the configuration in the auto scaling add-on take effect, for example, there are pods that cannot be scheduled and the value of a metric reaches the threshold, scale-out is performed first for the unschedulable pods.

                                                +

                                                If both a node scaling policy and the configuration in the Autoscaler add-on take effect, for example, there are pods that cannot be scheduled and the value of a metric reaches the threshold, scale-out is performed first for the unschedulable pods.

                                                • If the scale-out succeeds for the unschedulable pods, the system skips the metric-based rule logic and enters the next loop.
                                                • If the scale-out fails for the unschedulable pods, the metric-based rule is executed.

                                                Prerequisites

                                                Before using the node scaling function, you must install the CCE Cluster Autoscaler add-on of v1.13.8 or later in the cluster.

                                                -

                                                Constraints

                                                • If there are no nodes in a node pool, Autoscaler cannot obtain the CPU or memory data of the node, and the node scaling rule triggered using these metrics will not take effect.
                                                • If the driver of a GPU node is not installed, Autoscaler determines that the node is not fully available and the node scaling rules triggered using the CPU or memory metrics will not take effect.
                                                • Node scale-in will cause PVC/PV data loss for the local PVs associated with the node. These PVCs and PVs cannot be restored or used again. In a node scale-in, the pod that uses the local PV is evicted from the node. A new pod is created and stays in the pending state. This is because the PVC used by the pod has a node label, due to which the pod cannot be scheduled.
                                                • When Autoscaler is used, some taints or annotations may affect auto scaling. Therefore, do not use the following taints or annotations in clusters:
                                                  • ignore-taint.cluster-autoscaler.kubernetes.io: The taint works on nodes. Kubernetes-native Autoscaler supports protection against abnormal scale outs and periodically evaluates the proportion of available nodes in the cluster. When the proportion of non-ready nodes exceeds 45%, protection will be triggered. In this case, all nodes with the ignore-taint.cluster-autoscaler.kubernetes.io taint in the cluster are filtered out from the Autoscaler template and recorded as non-ready nodes, which affects cluster scaling.
                                                  • cluster-autoscaler.kubernetes.io/enable-ds-eviction: The annotation works on pods, which determines whether DaemonSet pods can be evicted by Autoscaler. For details, see Well-Known Labels, Annotations and Taints.
                                                  +

                                                  Notes and Constraints

                                                  • If there are no nodes in a node pool, Autoscaler cannot obtain the CPU or memory data of the node, and the node scaling rule triggered using these metrics will not take effect.
                                                  • If the driver of a GPU node is not installed, Autoscaler determines that the node is not fully available and the node scaling rules triggered using the CPU or memory metrics will not take effect.
                                                  • When Autoscaler is used, some taints or annotations may affect auto scaling. Therefore, do not use the following taints or annotations in clusters:
                                                    • ignore-taint.cluster-autoscaler.kubernetes.io: The taint works on nodes. Kubernetes-native Autoscaler supports protection against abnormal scale outs and periodically evaluates the proportion of available nodes in the cluster. When the proportion of non-ready nodes exceeds 45%, protection will be triggered. In this case, all nodes with the ignore-taint.cluster-autoscaler.kubernetes.io taint in the cluster are filtered out from the Autoscaler template and recorded as non-ready nodes, which affect cluster scaling.
                                                    • cluster-autoscaler.kubernetes.io/enable-ds-eviction: The annotation works on pods, which determines whether DaemonSet pods can be evicted by Autoscaler. For details, see Well-Known Labels, Annotations and Taints.
                                                  -

                                                  Configuring Node Pool Scaling Policies

                                                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                  2. In the navigation pane, choose Nodes. On the Node Pools tab, locate the row containing the target node pool and click Auto Scaling.

                                                    • If the auto scaling add-on has not been installed, configure add-on parameters based on service requirements, click Install, and wait until the add-on is installed. For details about add-on configurations, see CCE Cluster Autoscaler.
                                                    • If the auto scaling add-on has been installed, directly configure auto scaling policies.
                                                    +

                                                    Configuring Node Pool Scaling Policies

                                                    1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                    2. In the navigation pane, choose Nodes. On the Node Pools tab, locate the row containing the target node pool and click Auto Scaling.

                                                      • If Autoscaler has not been installed, configure add-on parameters based on service requirements, click Install, and wait until the add-on is installed. For details about add-on configurations, see CCE Cluster Autoscaler.
                                                      • If Autoscaler has been installed, directly configure auto scaling policies.

                                                    3. Configure auto scaling policies.

                                                      AS Configuration

                                                      • Customized Rule: Click Add Rule. In the dialog box displayed, configure parameters. You can add multiple node scaling policies, a maximum of one CPU usage-based rule, and one memory usage-based rule. The total number of rules cannot exceed 10.
                                                        The following table lists custom rules.
                                                        -
                                                        Table 1 Custom rules

                                                        Rule Type

                                                        @@ -20,9 +20,9 @@

                                                        Metric-based

                                                        • Trigger: Select CPU allocation rate or Memory allocation rate and enter a value. The value must be greater than the scale-in percentage configured in the auto scaling add-on.
                                                          NOTE:
                                                          • Resource allocation (%) = Resources requested by pods in the node pool/Resources allocatable to pods in the node pool
                                                          • If multiple rules meet the conditions, the rules are executed in either of the following modes:

                                                            If rules based on the CPU allocation rate and memory allocation rate are configured and two or more rules meet the scale-out conditions, the rule that will add the most nodes will be executed.

                                                            -

                                                            If a rule based on the CPU allocation rate and a periodic rule are configured and they both meet the scale-out conditions, one of them will be executed randomly. The rule executed first (rule A) changes the node pool to the scaling state. As a result, the other rule (rule B) cannot be executed. After rule A is executed and the node pool status becomes normal, rule B will not be executed.

                                                            -
                                                          • If rules based on the CPU allocation rate and memory allocation rate are configured, the policy detection period varies with the processing logic of each loop of the Autoscaler add-on. A scale-out is triggered once the conditions are met, but it is constrained by other factors such as the cooldown period and node pool status.
                                                          • When the number of nodes in the cluster reaches the upper limit, or the CPU or memory usage reaches the upper limit of the autoscaler add-on, node scale-out will not be triggered.
                                                          +
                                                        • Trigger: Select CPU allocation rate or Memory allocation rate and enter a value. The percentage must be greater than the value specified in the node resource requirements for a node scale-in when you configure a scaling policy (Configuring an Auto Scaling Policy for a Cluster).
                                                          NOTE:
                                                          • Resource allocation (%) = Resources requested by pods in the node pool/Resources allocatable to pods in the node pool
                                                          • If multiple rules meet the conditions, the rules are executed in either of the following modes:

                                                            If rules based on the CPU allocation rate and memory allocation rate are configured and two or more rules meet the scale-out conditions, the rule that will add the most nodes will be executed.

                                                            +

                                                            If a rule is configured based on the CPU allocation rate and a periodic rule and both the rules meet the scale-out conditions, the periodic rule executed early changes the node pool to the scaling state. As a result, the metric-based rule cannot be executed. After the periodic rule is executed and the node pool status becomes normal, the metric-based rule will not be executed. If the metric-based rule is executed early, the periodic rule will be executed after the metric-based rule is executed.

                                                            +
                                                          • If a rule is configured based on the CPU allocation rate and memory allocation rate, the policy detection period varies with the processing logic of each loop of the Autoscaler add-on. A scale-out is triggered once the conditions are met, but it is constrained by other factors such as the cooldown period and node pool status.
                                                          • If the number of nodes reaches the upper limit of the cluster scale, the upper limit of the nodes supported in a node pool, or the upper limit of the nodes of a specific flavor, a metric-based scale-out will not be triggered.
                                                          • If the number of nodes, CPUs, or memory resources reaches the upper limit for a node scale-out, a metric-based scale-out will not be triggered.
                                                        • Action: Configure an action to be performed when the triggering condition is met.
                                                          • Custom: Add a specified number of nodes to a node pool.
                                                          • Auto calculation: When the trigger condition is met, nodes are automatically added and the allocation rate is restored to a value lower than the threshold. The formula is as follows:

                                                            Number of nodes to be added = [Resource request of pods in the node pool/(Available resources of a single node x Target allocation rate)] – Number of current nodes + 1

                                                          @@ -38,24 +38,25 @@
                                                        -
                                                      • Nodes: The number of nodes in a node pool will always be within the range during auto scaling.
                                                      • Cooldown Period: a period during which the nodes added in the current node pool cannot be scaled in.
                                                      +
                                                    4. Nodes: The number of nodes in a node pool will always be within the range during auto scaling.
                                                    5. Cooldown Period: a period during which the nodes added in the current node pool cannot be scaled in.

                                                AS Object

                                                -

                                                Specification selection: Configure whether to enable auto scaling for node flavors in a node pool.

                                                +
                                                • Specifications: Configure whether to enable auto scaling for node flavors in a node pool.

                                                  If multiple flavors are configured for a node pool, you can specify the upper limit for the number of nodes and the priority for each flavor separately.

                                                  +
                                                  +

                                              • View cluster-level auto scaling configurations, which take effect for all node pools in the cluster. On this page, you can only view cluster-level auto scaling policies. To modify these policies, go to the Settings page. For details, see Configuring an Auto Scaling Policy for a Cluster.
                                              • After the configuration is complete, click OK.
                                              • Configuring an Auto Scaling Policy for a Cluster

                                                An auto scaling policy takes effect on all node pools in a cluster. After the policy is modified, the Autoscaler add-on will be restarted.

                                                -
                                                1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                2. In the navigation pane, choose Settings and click the Auto Scaling tab.
                                                3. Configure for an elastic scale-out.

                                                  • Auto Scale-out when the load cannot be scheduled: When workload pods in a cluster cannot be scheduled (pods remain in pending state), CCE automatically adds nodes to the slave node pool. If a node has been configured to be affinity for pods, no node will not be automatically added when pods cannot be scheduled. Such auto scaling typically works with an HPA policy. For details, see Using HPA and CA for Auto Scaling of Workloads and Nodes.

                                                    If this function is not enabled, scaling can be performed only using custom scaling policies.

                                                    -
                                                  • Upper limit of resources to be expanded: Configure an upper limit for the total resources in the cluster. When the upper limit is reached, nodes will not be automatically added.

                                                    When the total number of nodes, CPUs, and memory is collected, unavailable nodes in custom node pools are included but unavailable nodes in the default node pool are not included.

                                                    -
                                                    -
                                                  • Scale-Out Priority: You can drag and drop the node pools in a list to adjust their scale-out priorities.
                                                  +
                                                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                  2. In the navigation pane, choose Settings and click the Auto Scaling tab.

                                                    • If Autoscaler has not been installed, configure add-on parameters based on service requirements, click Install, and wait until the add-on is installed. For details about add-on configurations, see CCE Cluster Autoscaler.
                                                    • If Autoscaler has been installed, directly configure scaling policies.
                                                    +

                                                  3. Configure for an elastic scale-out.

                                                    • Auto Scale-out when the load cannot be scheduled: When workload pods in a cluster cannot be scheduled (pods remain in pending state), CCE automatically adds nodes to the slave node pool. If a pod has been scheduled to a node, the node will not be involved in an automatic scale-out. Such auto scaling typically works with an HPA policy. For details, see Using HPA and CA for Auto Scaling of Workloads and Nodes.

                                                      If this function is not enabled, custom scaling rules are the only option for performing a scale-out.

                                                      +
                                                    • Upper limit of resources to be expanded: the upper limit for the cluster's resources, such as the number of nodes, CPU cores, and memory. Once this limit is reached, no new nodes will be automatically added.
                                                    • Scale-Out Priority: You can drag and drop the node pools in a list to adjust their scale-out priorities.

                                                  4. Configure for an elastic scale-in. Elastic scale-in is disabled by default. After it is enabled, the following configurations are supported:

                                                    Node Scale-In Conditions: Nodes in a cluster are automatically scaled in when the scale-in conditions are met.
                                                    • Node Resource Condition: When the requested cluster node resources (both CPU and memory) are lower than a certain percentage (50% by default) for a period of time (10 minutes by default), a cluster scale-in is triggered.
                                                    • Node Status Condition: If a node is unavailable for a specified period of time, the node will be automatically reclaimed. The default value is 20 minutes.
                                                    • Scale-in Exception Scenarios: When a node meets the following exception scenarios, CCE will not scale in the node even if the node resources or status meets scale-in conditions:
                                                      1. Resources on other nodes in the cluster are insufficient.
                                                      2. Scale-in protection is enabled on the node. To enable or disable node scale-in protection, choose Nodes in the navigation pane and then click the Nodes tab. Locate the target node, choose More, and then enable or disable node scale-in protection in the Operation column.
                                                      3. There is a pod with the non-scale label on the node.
                                                      4. Policies such as reliability have been configured on some containers on the node.
                                                      5. There are non-DaemonSet containers in the kube-system namespace on the node.
                                                      6. (Optional) A container managed by a third-party pod controller is running on a node. Third-party pod controllers are for custom workloads except Kubernetes-native workloads such as Deployments and StatefulSets. Such controllers can be created using CustomResourceDefinitions.
                                                    Node Scale-in Policy
                                                    • Number of Concurrent Scale-In Requests: maximum number of idle nodes that can be concurrently deleted. Default value: 10.
                                                      Only idle nodes can be concurrently scaled in. Nodes that are not idle can only be scaled in one by one.

                                                      During a node scale-in, if the pods on the node do not need to be evicted (such as DaemonSet pods), the node is idle. Otherwise, the node is not idle.

                                                      -
                                                    • Node Recheck Timeout: interval for rechecking a node that could not be removed. Default value: 5 minutes.
                                                    • Cooldown Time
                                                      • Scale-in Cooldown Time After Scale-out: Default value: 10 minutes.

                                                        If both auto scale-out and scale-in exist in a cluster, set Scale-in Cooldown Time After Scale-out to 0 minutes. This prevents the node scale-in from being blocked due to continuous scale-out of some node pools or retries upon a scale-out failure, which results in unexpected waste of node resources.

                                                        +
                                                      • Node Recheck Timeout: interval for rechecking a node that could not be removed. Default value: 5 minutes.
                                                      • Cooldown Period
                                                        • Scale-in Cooldown Time After Scale-out: Default value: 10 minutes.

                                                          If both auto scale-out and scale-in exist in a cluster, set Scale-in Cooldown Time After Scale-out to 0 minutes. This prevents the node scale-in from being blocked due to continuous scale-out of some node pools or retries upon a scale-out failure, which results in unexpected waste of node resources.

                                                        • Scale-in Cooldown Time After Node Deletion: Default value: 10 minutes.
                                                        • Scale-in Cooldown Time After Failure: Default value: 3 minutes. For details, see Cooldown Period.
                                                      @@ -210,7 +211,7 @@ spec:

                                              String

                                              Metric threshold of a metric-based rule. The value can be any integer from 1 to 100 and must be a character string.

                                              +

                                              Threshold of the metric rule. The value can be an integer ranging from 1 to 100 and must be a character. If the value is set to -1, the threshold is automatically calculated.

                                              spec.rules[x].metricTrigger.Unit

                                              diff --git a/docs/cce/umn/cce_10_0210.html b/docs/cce/umn/cce_10_0210.html index 18077a26..d836d22f 100644 --- a/docs/cce/umn/cce_10_0210.html +++ b/docs/cce/umn/cce_10_0210.html @@ -42,8 +42,8 @@

                                              Procedure

                                              1. Create a CCE cluster.

                                                Create a cluster with the same specifications and configurations as the cluster of the earlier version. For details, see Creating a CCE Standard/Turbo Cluster.

                                              2. Add a node.

                                                Add a node with the same specifications and manual configuration items. For details, see Creating a Node.

                                                -

                                              3. Create a storage volume in the new cluster.

                                                Use the existing storage to create a PVC in the new cluster. The PVC name remains unchanged. For details, see Using an Existing OBS Bucket Through a Static PV or Using an Existing SFS Turbo File System Through a Static PV.

                                                -

                                                Storage switching supports only OBS buckets and SFS Turbo file systems. If non-shared storage is used, suspend the workloads in the old cluster to switch the storage resources. As a result, services will be unavailable.

                                                +

                                              4. Create a storage volume in the new cluster.

                                                Use the existing storage to create a PVC in the new cluster. The PVC name remains unchanged. For details, see Using an Existing OBS Bucket Through a Static PV or Using an Existing SFS Turbo File System Through a Static PV.

                                                +

                                                Storage switching supports only shared storage such as OBS and SFS Turbo. If non-shared storage is used, suspend the workloads in the old cluster to switch the storage resources. In this case, services will be unavailable.

                                              5. Create a workload in the new cluster.

                                                Create a workload in the new cluster. The name and specifications remain unchanged. For details, see Creating a Deployment or Creating a StatefulSet.

                                              6. Mount the storage again.

                                                Remount the existing storage in the workload. For details, see Using an Existing OBS Bucket Through a Static PV or Using an Existing SFS Turbo File System Through a Static PV.

                                                diff --git a/docs/cce/umn/cce_10_0212.html b/docs/cce/umn/cce_10_0212.html index 63406829..63a8815b 100644 --- a/docs/cce/umn/cce_10_0212.html +++ b/docs/cce/umn/cce_10_0212.html @@ -1,9 +1,7 @@

                                                Deleting a Cluster

                                                -

                                                Precautions

                                                • Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workloads, and Services. Related services cannot be restored. Before performing this operation, ensure that data has been backed up or migrated. Deleted data cannot be restored.
                                                  Resources that are not created in CCE will not be deleted:
                                                  • Accepted nodes (only the nodes created in CCE are deleted)
                                                  • ELB load balancers associated with Services and ingresses (only the automatically created load balancers are deleted)
                                                  • Manually created cloud storage resources associated with PVs or imported cloud storage resources (only the cloud storage resources automatically created by PVCs are deleted)
                                                  -
                                                  -
                                                • If you delete a cluster that is not running (for example, unavailable), associated resources, such as storage and networking resources, will remain.
                                                +

                                                Precautions

                                                • Deleting a cluster will delete the workloads and Services in the cluster, and the deleted data cannot be recovered. Before performing this operation, ensure that related data has been backed up or migrated.
                                                • If you choose to delete a cluster with the nodes in it, the system disks and data disks attached to the nodes will also be deleted. Back up data before the deletion.
                                                • If you delete a cluster that is not running (for example, unavailable), associated resources, such as storage and networking resources, will remain.

                                                Deleting a Cluster

                                                A hibernated cluster cannot be deleted. Wake up the cluster and try again.

                                                diff --git a/docs/cce/umn/cce_10_0213.html b/docs/cce/umn/cce_10_0213.html index 5cb7ac74..1a4525c4 100644 --- a/docs/cce/umn/cce_10_0213.html +++ b/docs/cce/umn/cce_10_0213.html @@ -1,405 +1,578 @@ -

                                                Cluster Configuration Management

                                                +

                                                Modifying Cluster Configurations

                                                Scenario

                                                CCE allows you to manage cluster parameters, through which you can let core components work under your requirements.

                                                -

                                                Constraints

                                                This function is supported only in clusters of v1.15 and later. It is not displayed for versions earlier than v1.15.

                                                -

                                                Procedure

                                                1. Log in to the CCE console. In the navigation pane, choose Clusters.
                                                2. Locate the target cluster, click ... to view more operations on the cluster, and choose Manage.
                                                3. On the Manage Components page on the right, change the values of the Kubernetes parameters listed in the following table.

                                                  -

                                                  Table 1 kube-apiserver configuration

                                                  Item

                                                  +
                                                  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + +
                                                  Table 1 kube-apiserver configurations

                                                  Item

                                                  Parameter

                                                  +

                                                  Parameter

                                                  Description

                                                  +

                                                  Description

                                                  Value

                                                  +

                                                  Value

                                                  Toleration time for nodes in NotReady state

                                                  +

                                                  Toleration time for nodes in NotReady state

                                                  default-not-ready-toleration-seconds

                                                  +

                                                  default-not-ready-toleration-seconds

                                                  Specifies the default tolerance time. The configuration takes effect for all pods by default. You can configure different tolerance time for pods. In this case, the tolerance time configured for the pod is used. For details, see Taints and Tolerations.

                                                  +

                                                  Specifies the default tolerance time. The configuration takes effect for all pods by default. You can configure different tolerance time for pods. In this case, the tolerance time configured for the pod is used. For details, see Configuring Tolerance Policies.

                                                  If the specified tolerance time is too short, pods may be frequently migrated in scenarios like a network jitter. If the specified tolerance time is too long, services may be interrupted during this period after the node is faulty.

                                                  Default: 300s

                                                  +

                                                  Default: 300s

                                                  Toleration time for nodes in unreachable state

                                                  +

                                                  Toleration time for nodes in unreachable state

                                                  default-unreachable-toleration-seconds

                                                  +

                                                  default-unreachable-toleration-seconds

                                                  Specifies the default tolerance time. The configuration takes effect for all pods by default. You can configure different tolerance time for pods. In this case, the tolerance time configured for the pod is used. For details, see Taints and Tolerations.

                                                  +

                                                  Specifies the default tolerance time. The configuration takes effect for all pods by default. You can configure different tolerance time for pods. In this case, the tolerance time configured for the pod is used. For details, see Configuring Tolerance Policies.

                                                  If the specified tolerance time is too short, pods may be frequently migrated in scenarios like a network jitter. If the specified tolerance time is too long, services may be interrupted during this period after the node is faulty.

                                                  Default: 300s

                                                  +

                                                  Default: 300s

                                                  Maximum Number of Concurrent Modification API Calls

                                                  +

                                                  Maximum Number of Concurrent Modification API Calls

                                                  max-mutating-requests-inflight

                                                  +

                                                  max-mutating-requests-inflight

                                                  Maximum number of concurrent mutating requests. When the value of this parameter is exceeded, the server rejects requests.

                                                  +

                                                  Maximum number of concurrent mutating requests. When the value of this parameter is exceeded, the server rejects requests.

                                                  The value 0 indicates that there is no limitation on the maximum number of concurrent modification requests. This parameter is related to the cluster scale. You are advised not to change the value.

                                                  Manual configuration is no longer supported since cluster v1.21. The value is automatically specified based on the cluster scale.

                                                  +

                                                  Manual configuration is no longer supported since cluster v1.21. The value is automatically specified based on the cluster scale.

                                                  • 200 for clusters with 50 or 200 nodes
                                                  • 500 for clusters with 1000 nodes
                                                  • 1000 for clusters with 2000 nodes

                                                  Maximum Number of Concurrent Non-Modification API Calls

                                                  +

                                                  Maximum Number of Concurrent Non-Modification API Calls

                                                  max-requests-inflight

                                                  +

                                                  max-requests-inflight

                                                  Maximum number of concurrent non-mutating requests. When the value of this parameter is exceeded, the server rejects requests.

                                                  +

                                                  Maximum number of concurrent non-mutating requests. When the value of this parameter is exceeded, the server rejects requests.

                                                  The value 0 indicates that there is no limitation on the maximum number of concurrent non-modification requests. This parameter is related to the cluster scale. You are advised not to change the value.

                                                  Manual configuration is no longer supported since cluster v1.21. The value is automatically specified based on the cluster scale.

                                                  +

                                                  Manual configuration is no longer supported since cluster v1.21. The value is automatically specified based on the cluster scale.

                                                  • 400 for clusters with 50 or 200 nodes
                                                  • 1000 for clusters with 1000 nodes
                                                  • 2000 for clusters with 2000 nodes

                                                  NodePort port range

                                                  +

                                                  NodePort port range

                                                  service-node-port-range

                                                  +

                                                  service-node-port-range

                                                  NodePort port range. After changing the value, go to the security group page and change the TCP/UDP port range of node security groups 30000 to 32767. Otherwise, ports other than the default port cannot be accessed externally.

                                                  +

                                                  NodePort port range. After changing the value, go to the security group page and change the TCP/UDP port range of node security groups 30000 to 32767. Otherwise, ports other than the default port cannot be accessed externally.

                                                  If the port number is smaller than 20106, a conflict may occur between the port and the CCE health check port, which may further lead to unavailable cluster. If the port number is greater than 32767, a conflict may occur between the port and the ports in net.ipv4.ip_local_port_range, which may further affect the network performance.

                                                  Default: 30000 to 32767

                                                  +

                                                  Default: 30000 to 32767

                                                  Value range:

                                                  Min > 20105

                                                  Max < 32768

                                                  Request Timeout

                                                  +

                                                  Request Timeout

                                                  request-timeout

                                                  +

                                                  request-timeout

                                                  Default request timeout interval of kube-apiserver. Exercise caution when changing the value of this parameter. Ensure that the changed value is proper to prevent frequent API timeout or other errors.

                                                  +

                                                  Default request timeout interval of kube-apiserver. Exercise caution when changing the value of this parameter. Ensure that the changed value is proper to prevent frequent API timeout or other errors.

                                                  This parameter is available only in clusters of v1.19.16-r30, v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, or later versions.

                                                  Default: 1m0s

                                                  +

                                                  Default: 1m0s

                                                  Value range:

                                                  Min ≥ 1s

                                                  Max ≤ 1 hour

                                                  Overload Control

                                                  +

                                                  Overload Control

                                                  support-overload

                                                  +

                                                  support-overload

                                                  Cluster overload control. If enabled, concurrent requests are dynamically controlled based on the resource pressure of master nodes to keep them and the cluster available.

                                                  +

                                                  Cluster overload control. If enabled, concurrent requests are dynamically controlled based on the resource pressure of master nodes to keep them and the cluster available.

                                                  This parameter is available only in clusters of v1.23 or later.

                                                  • false: Overload control is disabled.
                                                  • true: Overload control is enabled.
                                                  +
                                                  • false: Overload control is disabled.
                                                  • true: Overload control is enabled.
                                                  +

                                                  Node Restriction Add-on

                                                  +

                                                  enable-admission-plugin-node-restriction

                                                  +

                                                  This add-on allows the Kubelet of a node to operate only the objects of the current node for enhanced isolation in multi-tenant scenarios or the scenarios with high security requirements.

                                                  +

                                                  This parameter is available only in clusters of v1.23.14-r0, v1.25.9-r0, v1.27.6-r0, v1.28.4-r0, or later versions.

                                                  +

                                                  Default: true

                                                  +

                                                  Pod Node Selector Add-on

                                                  +

                                                  enable-admission-plugin-pod-node-selector

                                                  +

                                                  This add-on allows cluster administrators to configure the default node selector through namespace annotations. In this way, pods run only on specific nodes and configurations are simplified.

                                                  +

                                                  This parameter is available only in clusters of v1.23.14-r0, v1.25.9-r0, v1.27.6-r0, v1.28.4-r0, or later versions.

                                                  +

                                                  Default: true

                                                  +

                                                  Pod Toleration Limit Add-on

                                                  +

                                                  enable-admission-plugin-pod-toleration-restriction

                                                  +

                                                  This add-on allows cluster administrators to configure the default value and limits of pod tolerations through namespaces for fine-grained control over pod scheduling and key resource protection.

                                                  +

                                                  This parameter is available only in clusters of v1.23.14-r0, v1.25.9-r0, v1.27.6-r0, v1.28.4-r0, or later versions.

                                                  +

                                                  Default: false

                                                  +

                                                  API Audience Settings

                                                  +

                                                  api-audiences

                                                  +

                                                  Audiences for a service account token. The Kubernetes component for authenticating service account tokens checks whether the token used in an API request specifies authorized audiences.

                                                  +

                                                  Configuration suggestion: Accurately configure audiences according to the communication needs among cluster services. By doing so, the service account token is used for authentication only between authorized services, which enhances security.

                                                  +
                                                  NOTE:

                                                  An incorrect configuration may lead to an authentication communication failure between services or an error during token verification.

                                                  +
                                                  +

                                                  This parameter is available only in clusters of v1.23.16-r0, v1.25.11-r0, v1.27.8-r0, v1.28.6-r0, v1.29.2-r0, or later versions.

                                                  +

                                                  Default value: "https://kubernetes.default.svc.cluster.local"

                                                  +

                                                  Multiple values can be configured, which are separated by commas (,).

                                                  +

                                                  Service Account Token Issuer Identity

                                                  +

                                                  service-account-issuer

                                                  +

                                                  Entity identifier for issuing a service account token, which is the value identified by the iss field in the payload of the service account token.

                                                  +

                                                  Configuration suggestion: Ensure the configured issuer URL can be accessed in the cluster and trusted by the authentication system in the cluster.

                                                  +
                                                  NOTE:

                                                  If your specified issuer URL is untrusted or inaccessible, the authentication process based on the service account may fail.

                                                  +
                                                  +

                                                  This parameter is available only in clusters of v1.23.16-r0, v1.25.11-r0, v1.27.8-r0, v1.28.6-r0, v1.29.2-r0, or later versions.

                                                  +

                                                  Default value: "https://kubernetes.default.svc.cluster.local"

                                                  +

                                                  Multiple values can be configured, which are separated by commas (,).

                                                  -
                                                  Table 2 Scheduler configurations

                                                  Item

                                                  +
                                                  - - - - - - - - - - - - - - -
                                                  Table 2 Scheduler configurations

                                                  Item

                                                  Parameter

                                                  +

                                                  Parameter

                                                  Description

                                                  +

                                                  Description

                                                  Value

                                                  +

                                                  Value

                                                  Qps for communicating with kube-apiserver

                                                  +

                                                  QPS for communicating with kube-apiserver

                                                  kube-api-qps

                                                  +

                                                  kube-api-qps

                                                  QPS for communicating with kube-apiserver.

                                                  +

                                                  QPS for communicating with kube-apiserver.

                                                  • If the number of nodes in a cluster is less than 1000, the default value is 100.
                                                  • If a cluster contains 1000 or more nodes, the default value is 200.
                                                  +
                                                  • If the number of nodes in a cluster is less than 1000, the default value is 100.
                                                  • If the number of nodes in a cluster is 1000 or more, the default value is 200.

                                                  Burst for communicating with kube-apiserver

                                                  +

                                                  Burst for communicating with kube-apiserver

                                                  kube-api-burst

                                                  +

                                                  kube-api-burst

                                                  Burst for communicating with kube-apiserver.

                                                  +

                                                  Burst for communicating with kube-apiserver.

                                                  • If the number of nodes in a cluster is less than 1000, the default value is 100.
                                                  • If a cluster contains 1000 or more nodes, the default value is 200.
                                                  +
                                                  • If the number of nodes in a cluster is less than 1000, the default value is 100.
                                                  • If the number of nodes in a cluster is 1000 or more, the default value is 200.

                                                  Whether to enable GPU sharing

                                                  +

                                                  Whether to enable GPU sharing

                                                  enable-gpu-share

                                                  +

                                                  enable-gpu-share

                                                  Whether to enable GPU sharing. This parameter is supported only by clusters of v1.23.7-r10, v1.25.3-r0, and later.

                                                  -
                                                  • When disabled, ensure that pods in the cluster do not use the shared GPU (that is, the annotation of cce.io/gpu-decision does not exist in pods).
                                                  • When enabled, ensure that the annotation of cce.io/gpu-decision exists in pods that use GPU resources in the cluster.
                                                  +

                                                  Whether to enable GPU sharing. This parameter is supported only by clusters of v1.23.7-r10, v1.25.3-r0, and later.

                                                  +
                                                  • When disabled, ensure that pods in the cluster cannot use shared GPUs (no cce.io/gpu-decision annotation in pods).
                                                  • When enabled, ensure that there is a cce.io/gpu-decision annotation on all pods that use GPU resources in the cluster.

                                                  Default: true

                                                  +

                                                  Default: true

                                                  -
                                                  Table 3 kube-controller-manager configurations

                                                  Item

                                                  +
                                                  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + +
                                                  Table 3 kube-controller-manager configurations

                                                  Item

                                                  Parameter

                                                  +

                                                  Parameter

                                                  Description

                                                  +

                                                  Description

                                                  Value

                                                  +

                                                  Value

                                                  Number of concurrent processing of deployment

                                                  +

                                                  Number of concurrent processing of deployment

                                                  concurrent-deployment-syncs

                                                  +

                                                  concurrent-deployment-syncs

                                                  Number of deployment objects that are allowed to sync concurrently

                                                  +

                                                  Number of deployment objects that can be synchronized concurrently

                                                  Default: 5

                                                  +

                                                  Default: 5

                                                  Concurrent processing number of endpoint

                                                  +

                                                  Concurrent processing number of endpoint

                                                  concurrent-endpoint-syncs

                                                  +

                                                  concurrent-endpoint-syncs

                                                  Number of endpoint syncing operations that will be done concurrently

                                                  +

                                                  Number of endpoint syncing operations that will be done concurrently

                                                  Default: 5

                                                  +

                                                  Default: 5

                                                  Concurrent number of garbage collector

                                                  +

                                                  Concurrent number of garbage collector

                                                  concurrent-gc-syncs

                                                  +

                                                  concurrent-gc-syncs

                                                  Number of garbage collector workers that are allowed to sync concurrently

                                                  +

                                                  Number of garbage collector workers that can be synchronized concurrently

                                                  Default: 20

                                                  +

                                                  Default: 20

                                                  Number of job objects allowed to sync simultaneously

                                                  +

                                                  Number of job objects allowed to sync simultaneously

                                                  concurrent-job-syncs

                                                  +

                                                  concurrent-job-syncs

                                                  Number of job objects that are allowed to sync concurrently

                                                  +

                                                  Number of job objects that can be synchronized concurrently

                                                  Default: 5

                                                  +

                                                  Default: 5

                                                  Number of CronJob objects allowed to sync simultaneously

                                                  +

                                                  Number of CronJob objects allowed to sync simultaneously

                                                  concurrent-cron-job-syncs

                                                  +

                                                  concurrent-cron-job-syncs

                                                  Number of scheduled jobs that can be synchronized concurrently.

                                                  +

                                                  Number of scheduled jobs that can be synchronized concurrently

                                                  Default: 5

                                                  +

                                                  Default: 5

                                                  Number of concurrent processing of namespace

                                                  +

                                                  Number of concurrent processing of namespace

                                                  concurrent-namespace-syncs

                                                  +

                                                  concurrent-namespace-syncs

                                                  Number of namespace objects that are allowed to sync concurrently

                                                  +

                                                  Number of namespace objects that can be synchronized concurrently

                                                  Default: 10

                                                  +

                                                  Default: 10

                                                  Concurrent processing number of replicaset

                                                  +

                                                  Concurrent processing number of replicaset

                                                  concurrent-replicaset-syncs

                                                  +

                                                  concurrent-replicaset-syncs

                                                  Number of replica sets that are allowed to sync concurrently

                                                  +

                                                  Number of replica sets that can be synchronized concurrently

                                                  Default: 5

                                                  +

                                                  Default: 5

                                                  ResourceQuota

                                                  +

                                                  ResourceQuota

                                                  concurrent-resource-quota-syncs

                                                  +

                                                  concurrent-resource-quota-syncs

                                                  Number of resource quotas that are allowed to sync concurrently

                                                  +

                                                  Number of resource quotas that can be synchronized concurrently

                                                  Default: 5

                                                  +

                                                  Default: 5

                                                  Concurrent processing number of service

                                                  +

                                                  Concurrent processing number of service

                                                  concurrent-service-syncs

                                                  +

                                                  concurrent-service-syncs

                                                  Number of services that are allowed to sync concurrently

                                                  +

                                                  Number of services that can be synchronized concurrently

                                                  Default: 10

                                                  +

                                                  Default: 10

                                                  Concurrent processing number of serviceaccount-token

                                                  +

                                                  Concurrent processing number of serviceaccount-token

                                                  concurrent-serviceaccount-token-syncs

                                                  +

                                                  concurrent-serviceaccount-token-syncs

                                                  Number of service account token objects that are allowed to sync concurrently

                                                  +

                                                  Number of service account token objects that can be synchronized concurrently

                                                  Default: 5

                                                  +

                                                  Default: 5

                                                  Concurrent processing of ttl-after-finished

                                                  +

                                                  Concurrent processing of ttl-after-finished

                                                  concurrent-ttl-after-finished-syncs

                                                  +

                                                  concurrent-ttl-after-finished-syncs

                                                  Number of ttl-after-finished-controller workers that are allowed to sync concurrently

                                                  +

                                                  Number of ttl-after-finished-controller workers that can be synchronized concurrently

                                                  Default: 5

                                                  +

                                                  Default: 5

                                                  RC

                                                  +

                                                  RC

                                                  concurrent-rc-syncs

                                                  +

                                                  concurrent_rc_syncs (used in clusters of v1.19 or earlier)

                                                  +

                                                  concurrent-rc-syncs (used in clusters of v1.21 through v1.25.3-r0)

                                                  Number of replication controllers that are allowed to sync concurrently

                                                  -
                                                  NOTE:

                                                  This parameter is used only in clusters of v1.21 to v1.23. In clusters of v1.25 and later, this parameter is deprecated (officially deprecated from v1.25.3-r0 on).

                                                  +

                                                  Number of replication controllers that can be synchronized concurrently

                                                  +
                                                  NOTE:

                                                  This parameter is no longer supported in clusters of v1.25.3-r0 and later versions.

                                                  Default: 5

                                                  +

                                                  Default: 5

                                                  HPA

                                                  +

                                                  HPA

                                                  concurrent-horizontal-pod-autoscaler-syncs

                                                  +

                                                  concurrent-horizontal-pod-autoscaler-syncs

                                                  Number of HPA auto scaling requests that can be concurrently processed.

                                                  +

                                                  Number of HPA auto scaling requests that can be concurrently processed

                                                  Default 1 for clusters earlier than v1.27 and 5 for clusters earlier than v1.27

                                                  +

                                                  Default 1 for clusters earlier than v1.27 and 5 for clusters of v1.27 or later

                                                  Value range: 1 to 50

                                                  Cluster elastic computing period

                                                  +

                                                  Cluster elastic computing period

                                                  horizontal-pod-autoscaler-sync-period

                                                  +

                                                  horizontal-pod-autoscaler-sync-period

                                                  How often HPA audits metrics in a cluster.

                                                  +

                                                  Period for the horizontal pod autoscaler to perform auto scaling on pods. A smaller value will result in a faster auto scaling response and higher CPU load.

                                                  +
                                                  NOTE:

                                                  Make sure to configure this parameter properly as a lengthy period can cause the controller to respond slowly, while a short period may overload the cluster control plane.

                                                  +

                                                  Default: 15 seconds

                                                  +

                                                  Default: 15 seconds

                                                  Qps for communicating with kube-apiserver

                                                  +

                                                  Horizontal Pod Scaling Tolerance

                                                  kube-api-qps

                                                  +

                                                  horizontal-pod-autoscaler-tolerance

                                                  QPS for communicating with kube-apiserver

                                                  +

                                                  The configuration determines how quickly the horizontal pod autoscaler will act to auto scaling policies. If the parameter is set to 0, auto scaling will be triggered immediately when the related metrics are met.

                                                  +

                                                  Configuration suggestion: If the service resource usage increases sharply over time, retain a certain tolerance to prevent auto scaling which is beyond expectation in high resource usage scenarios.

                                                  • If the number of nodes in a cluster is less than 1000, the default value is 100.
                                                  • If a cluster contains 1000 or more nodes, the default value is 200.
                                                  +

                                                  Default: 0.1

                                                  Burst for communicating with kube-apiserver

                                                  +

                                                  HPA CPU Initialization Period

                                                  kube-api-burst

                                                  +

                                                  horizontal-pod-autoscaler-cpu-initialization-period

                                                  Burst for communicating with kube-apiserver.

                                                  +

                                                  During the period specified by this parameter, the CPU usage data used in HPA calculation is limited to pods that are both ready and have recently had their metrics collected. You can use this parameter to filter out unstable CPU usage data during the early stage of pod startup. This helps prevent incorrect scaling decisions based on momentary peak values.

                                                  +

                                                  Configuration suggestion: If you find that HPA is making incorrect scaling decisions due to CPU usage fluctuations during pod startup, increase the value of this parameter to allow for a buffer period of stable CPU usage.

                                                  +
                                                  NOTE:

                                                  Make sure to configure this parameter properly as a small value may trigger unnecessary scaling based on peak CPU usage, while a large value may cause scaling to be delayed.

                                                  +

                                                  This parameter is available only in clusters of v1.23.16-r0, v1.25.11-r0, v1.27.8-r0, v1.28.6-r0, v1.29.2-r0, or later versions.

                                                  +
                                                  • If the number of nodes in a cluster is less than 1000, the default value is 100.
                                                  • If a cluster contains 1000 or more nodes, the default value is 200.
                                                  +

                                                  Default: 5 minutes

                                                  The maximum number of terminated pods that can be kept before the Pod GC deletes the terminated pod

                                                  +

                                                  HPA Initial Readiness Delay

                                                  terminated-pod-gc-threshold

                                                  +

                                                  horizontal-pod-autoscaler-initial-readiness-delay

                                                  Number of terminated pods that can exist in a cluster. If there are more terminated pods than the expected number in the cluster, the terminated pods that exceed the number will be deleted.

                                                  +

                                                  After CPU initialization, this period allows HPA to use a less strict criterion for filtering CPU metrics. During this period, HPA will gather data on the CPU usage of the pod for scaling, regardless of any changes in the pod's readiness status. This parameter ensures continuous tracking of CPU usage, even when the pod status changes frequently.

                                                  +

                                                  Configuration suggestion: If the readiness status of pods fluctuates after startup and you want to prevent HPA misjudgment caused by the fluctuation, increase the value of this parameter to allow HPA to gather more comprehensive CPU usage data.

                                                  +
                                                  NOTE:

                                                  Configure this parameter properly. If it is set to a small value, an unnecessary scale-out may occur due to CPU data fluctuations when the pod enters the ready state. If it is set to a large value, HPA may not be able to make a quick decision when a rapid response is needed.

                                                  +

                                                  This parameter is available only in clusters of v1.23.16-r0, v1.25.11-r0, v1.27.8-r0, v1.28.6-r0, v1.29.2-r0, or later versions.

                                                  +
                                                  +

                                                  Default: 30s

                                                  +

                                                  QPS for communicating with kube-apiserver

                                                  +

                                                  kube-api-qps

                                                  +

                                                  QPS for communicating with kube-apiserver

                                                  +
                                                  • If the number of nodes in a cluster is less than 1000, the default value is 100.
                                                  • If the number of nodes in a cluster is 1000 or more, the default value is 200.
                                                  +

                                                  Burst for communicating with kube-apiserver

                                                  +

                                                  kube-api-burst

                                                  +

                                                  Burst for communicating with kube-apiserver

                                                  +
                                                  • If the number of nodes in a cluster is less than 1000, the default value is 100.
                                                  • If the number of nodes in a cluster is 1000 or more, the default value is 200.
                                                  +

                                                  The maximum number of terminated pods that can be kept before the Pod GC deletes the terminated pod

                                                  +

                                                  terminated-pod-gc-threshold

                                                  +

                                                  Number of terminated pods that can exist in a cluster. If there are more terminated pods than the expected number in the cluster, the terminated pods that exceed the number will be deleted.

                                                  NOTE:

                                                  If this parameter is set to 0, all pods in the terminated state are retained.

                                                  Default: 1000

                                                  +

                                                  Default: 1000

                                                  Value range: 10 to 12500

                                                  If the cluster version is v1.21.11-r40, v1.23.8-r0, v1.27.3-r0, v1.25.6-r0, or later, the value range is changed to 0 to 100000.

                                                  Unhealthy AZ Threshold

                                                  +

                                                  unhealthy-zone-threshold

                                                  +

                                                  When more than a certain proportion of pods in an AZ are unhealthy, the AZ itself will be considered unhealthy, and scheduling pods to nodes in that AZ will be restricted to limit the impacts of the unhealthy AZ.

                                                  +

                                                  This parameter is available only in clusters of v1.23.14-r0, v1.25.9-r0, v1.27.6-r0, v1.28.4-r0, or later versions.

                                                  +
                                                  NOTE:

                                                  If the parameter is set to a large value, pods in unhealthy AZs will be migrated in a large scale, which may lead to risks such as overloaded clusters.

                                                  +
                                                  +

                                                  Default: 0.55

                                                  +

                                                  Value range: 0 to 1

                                                  +

                                                  Node Eviction Rate

                                                  +

                                                  node-eviction-rate

                                                  +

                                                  This parameter specifies the number of nodes that pods are deleted from per second in a cluster when the AZ is healthy. The default value is 0.1, indicating that pods can be evicted from at most one node every 10 seconds.

                                                  +

                                                  This parameter is available only in clusters of v1.23.14-r0, v1.25.9-r0, v1.27.6-r0, v1.28.4-r0, or later versions.

                                                  +
                                                  NOTE:

                                                  If the parameter is set to a large value, the cluster may be overloaded. Additionally, if too many pods are evicted, they cannot be rescheduled, which will slow down fault recovery.

                                                  +
                                                  +

                                                  Default: 0.1

                                                  +

                                                  Secondary Node Eviction Rate

                                                  +

                                                  secondary-node-eviction-rate

                                                  +

                                                  This parameter specifies the number of nodes that pods are deleted from per second in a cluster when the AZ is unhealthy. The default value is 0.01, indicating that pods can be evicted from at most one node every 100 seconds.

                                                  +

                                                  This parameter is available only in clusters of v1.23.14-r0, v1.25.9-r0, v1.27.6-r0, v1.28.4-r0, or later versions.

                                                  +
                                                  NOTE:

                                                  There is no need to set the parameter to a large value for nodes in an unhealthy AZ, and this configuration may result in overloaded clusters.

                                                  +
                                                  +

                                                  Default: 0.01

                                                  +

                                                  Configure this parameter with node-eviction-rate and set it to one-tenth of node-eviction-rate.

                                                  +

                                                  Large Cluster Threshold

                                                  +

                                                  large-cluster-size-threshold

                                                  +

                                                  If the number of nodes in a cluster is greater than the value of this parameter, this is a large cluster.

                                                  +

                                                  This parameter is available only in clusters of v1.23.14-r0, v1.25.9-r0, v1.27.6-r0, v1.28.4-r0, or later versions.

                                                  +
                                                  NOTE:

                                                  kube-controller-manager automatically adjusts configurations for large clusters to optimize the cluster performance. Therefore, an excessively small threshold for small clusters will deteriorate the cluster performance.

                                                  +
                                                  +

                                                  Default: 50

                                                  +

                                                  For the clusters with a large number of nodes, configure a relatively larger value than the default one for higher performance and faster responses of controllers. Retain the default value for small clusters. Before adjusting the value of this parameter in a production environment, check the impact of the change on cluster performance in a test environment.

                                                  +
                                                  -
                                                  Table 4 Networking component configurations (supported only by CCE Turbo clusters)

                                                  Item

                                                  +
                                                  - - - - - - - - - - - - - - - - - - - - - - -
                                                  Table 4 Network components (available only for CCE Turbo clusters)

                                                  Item

                                                  Parameter

                                                  +

                                                  Parameter

                                                  Description

                                                  +

                                                  Description

                                                  Value

                                                  +

                                                  Value

                                                  The minimum number of network cards bound to the container at the cluster level

                                                  +

                                                  The minimum number of network cards bound to the container at the cluster level

                                                  nic-minimum-target

                                                  +

                                                  nic-minimum-target

                                                  Minimum number of container ENIs bound to a node

                                                  +

                                                  Minimum number of container ENIs bound to a node

                                                  The parameter value must be a positive integer. The value 10 indicates that at least 10 container ENIs must be bound to a node. If the number you specified exceeds the container ENI quota of the node, the ENI quota will be used.

                                                  Default: 10

                                                  +

                                                  Default: 10

                                                  Cluster-level node preheating container NIC upper limit check value

                                                  +

                                                  Cluster-level node preheating container NIC upper limit check value

                                                  nic-maximum-target

                                                  +

                                                  nic-maximum-target

                                                  After the number of ENIs bound to a node exceeds the nic-maximum-target value, CCE will not proactively pre-bind ENIs.

                                                  +

                                                  After the number of ENIs bound to a node exceeds the nic-maximum-target value, CCE will not proactively pre-bind ENIs.

                                                  Checking the upper limit of pre-bound container ENIs is enabled only when the value of this parameter is greater than or equal to the minimum number of container ENIs (nic-minimum-target) bound to a node.

                                                  The parameter value must be a positive integer. The value 0 indicates that checking the upper limit of pre-bound container ENIs is disabled. If the number you specified exceeds the container ENI quota of the node, the ENI quota will be used.

                                                  Default: 0

                                                  +

                                                  Default: 0

                                                  Number of NICs for dynamically warming up containers at the cluster level

                                                  +

                                                  Number of NICs for dynamically warming up containers at the cluster level

                                                  nic-warm-target

                                                  +

                                                  nic-warm-target

                                                  Extra ENIs will be pre-bound after the nic-minimum-target is used up in a pod. The value can only be a number.

                                                  -

                                                  When the value of nic-warm-target + the number of bound ENIs is greater than the value of nic-maximum-target, the system will pre-bind ENIs based on the difference between the value of nic-maximum-target and the number of bound ENIs.

                                                  +

                                                  Extra ENIs will be pre-bound after the nic-minimum-target is used up in a pod. The value can only be a number.

                                                  +

                                                  When the sum of the nic-warm-target value and the number of ENIs bound to the node is greater than the nic-maximum-target value, CCE will pre-bind the number of ENIs specified by the difference between the nic-maximum-target value and the current number of ENIs bound to the node.

                                                  Default: 2

                                                  +

                                                  Default: 2

                                                  Cluster-level node warm-up container NIC recycling threshold

                                                  +

                                                  Cluster-level node warm-up container NIC recycling threshold

                                                  nic-max-above-warm-target

                                                  +

                                                  nic-max-above-warm-target

                                                  Only when the number of idle ENIs on a node minus the value of nic-warm-target is greater than the threshold, the pre-bound ENIs will be unbound and reclaimed. Only numbers are allowed.

                                                  -
                                                  • A large value will accelerate pod startup but slow down the unbinding of idle container ENIs and decrease the IP address usage.
                                                  • A small value will speed up the unbinding of idle container ENIs and increase the IP address usage but will slow down pod startup, especially when a large number of pods increase instantaneously.
                                                  +

                                                  Only when the difference between the number of idle ENIs on a node and the nic-warm-target value is greater than the threshold, the pre-bound ENIs will be unbound and reclaimed. The value can only be a number.

                                                  +
                                                  • A large value will accelerate pod startup but slow down the unbinding of idle container ENIs and decrease the IP address usage. Exercise caution when performing this operation.
                                                  • A small value will speed up the unbinding of idle container ENIs and increase the IP address usage but will slow down pod startup, especially when a large number of pods increase instantaneously.

                                                  Default: 2

                                                  +

                                                  Default: 2

                                                  Low threshold of the number of container ENIs bound to a node in a cluster

                                                  +

                                                  Low threshold of the number of container ENIs bound to a node in a cluster

                                                  prebound-subeni-percentage

                                                  +

                                                  prebound-subeni-percentage

                                                  High threshold of the number of bound ENIs

                                                  +

                                                  High threshold of the number of bound ENIs

                                                  NOTE:

                                                  This parameter is being discarded. Use the dynamic pre-binding parameters of the other four ENIs.

                                                  Default: 0:0

                                                  +

                                                  Default: 0:0

                                                  -
                                                  Table 5 Extended controller configurations (supported only by clusters of v1.21 and later)

                                                  Item

                                                  +
                                                  - - - - - - + + + +
                                                  Table 5 Networking component configurations (supported only by the clusters using a VPC network)

                                                  Item

                                                  Parameter

                                                  +

                                                  Parameter

                                                  Description

                                                  +

                                                  Description

                                                  Value

                                                  +

                                                  Value

                                                  Enable resource quota management

                                                  +

                                                  Retaining the non-masqueraded CIDR block of the original pod IP address

                                                  enable-resource-quota

                                                  +

                                                  nonMasqueradeCIDRs

                                                  Indicates whether to automatically create a ResourceQuota when creating a namespace. With quota management, you can control the number of workloads of each type and the upper limits of resources in a namespace or related dimensions.

                                                  +

                                                  In a CCE cluster using the VPC network model, if a container in the cluster needs to access externally, the source pod IP address must be masqueraded as the IP address of the node where the pod resides through SNAT. After the configuration, the node will not SNAT the IP addresses in the CIDR block by default. This function is available only in clusters of v1.23.14-r0, v1.25.9-r0, v1.27.6-r0, v1.28.4-r0, or later versions.

                                                  +

                                                  By default, nodes in a cluster do not perform SNAT on packets destined for 10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16 that is detected by CCE as a private CIDR block. Instead, these packets are directly transferred using the upper-layer VPC. (The three CIDR blocks are considered as internal networks in the cluster and are reachable at Layer 3 by default.)

                                                  +

                                                  Default: 10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16

                                                  +
                                                  NOTE:

                                                  To enable cross-node pod access, the CIDR block of the node where the target pod runs must be added.

                                                  +

                                                  Similarly, to enable cross-ECS pod access in a VPC, the CIDR block of the ECS where the target pod runs must be added.

                                                  +
                                                  +
                                                  +
                                                  + +
                                                  + + + + + + + + - diff --git a/docs/cce/umn/cce_10_0214.html b/docs/cce/umn/cce_10_0214.html index 0467ce9d..72e43dd8 100644 --- a/docs/cce/umn/cce_10_0214.html +++ b/docs/cce/umn/cce_10_0214.html @@ -1,11 +1,10 @@ -

                                                  Hibernating and Waking Up a Cluster

                                                  -

                                                  Scenario

                                                  If you do not need to use a cluster temporarily, hibernate the cluster.

                                                  +

                                                  Hibernating or Waking Up a Cluster

                                                  +

                                                  Scenario

                                                  If a pay-per-use cluster is not needed temporarily, hibernate it to reduce costs.

                                                  After a cluster is hibernated, resources such as workloads cannot be created or managed in the cluster.

                                                  -

                                                  A hibernated cluster can be quickly woken up and used properly.

                                                  -

                                                  Constraints

                                                  • During cluster wakeup, the master node may fail to start due to insufficient resources, which leads to a cluster wakeup failure. In this case, wait for a while and try again.
                                                  • After a cluster is woken up, it takes 3 to 5 minutes to initialize data. Deliver services after the cluster runs properly.
                                                  +

                                                  Precautions

                                                  • During cluster wakeup, the master node may fail to start due to insufficient resources, which leads to a cluster wakeup failure. In this case, wait for a while and try again.
                                                  • After a cluster is woken up, it takes 3 to 5 minutes to initialize data. Deliver services after the cluster runs properly.

                                                  Hibernating a Cluster

                                                  1. Log in to the CCE console. In the navigation pane, choose Clusters.
                                                  2. Locate the cluster to be hibernated, click ... to view more operations on the cluster, and choose Hibernate.
                                                  3. In the dialog box displayed, check the precautions and click Yes. Wait until the cluster is hibernated.
                                                  diff --git a/docs/cce/umn/cce_10_0215.html b/docs/cce/umn/cce_10_0215.html index cd915fbc..b7d6f990 100644 --- a/docs/cce/umn/cce_10_0215.html +++ b/docs/cce/umn/cce_10_0215.html @@ -4,7 +4,7 @@

                                                  Prerequisites

                                                  Before creating a DaemonSet, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Standard/Turbo Cluster.

                                                  -

                                                  Using the CCE Console

                                                  1. Log in to the CCE console.
                                                  2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
                                                  3. Set basic information about the workload.

                                                    Basic Info
                                                    • Workload Type: Select DaemonSet. For details about workload types, see Overview.
                                                    • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
                                                    • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
                                                    • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Kata Runtime and Common Runtime.
                                                    • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
                                                    +

                                                    Using the CCE Console

                                                    1. Log in to the CCE console.
                                                    2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
                                                    3. Set basic information about the workload.

                                                      Basic Info
                                                      • Workload Type: Select DaemonSet. For details about workload types, see Overview.
                                                      • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
                                                      • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
                                                      • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Secure Runtime and Common Runtime.
                                                      • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
                                                      Container Settings
                                                      • Container Information
                                                        Multiple containers can be configured in a pod. You can click Add Container on the right to configure multiple containers for the pod.
                                                        • Basic Info: Configure basic information about the container.
                                                  Table 6 Extended controller configurations (supported only by clusters of v1.21 and later)

                                                  Item

                                                  +

                                                  Parameter

                                                  +

                                                  Description

                                                  +

                                                  Value

                                                  +

                                                  Enable resource quota management

                                                  +

                                                  enable-resource-quota

                                                  +

                                                  Indicates whether to automatically create a ResourceQuota when creating a namespace. With quota management, you can control the number of workloads of each type and the upper limits of resources in a namespace or related dimensions.

                                                  • false: Auto creation is disabled.
                                                  • true: Auto creation is enabled. For details about the resource quota defaults, see Configuring Resource Quotas.
                                                    NOTE:

                                                    In high-concurrency scenarios (for example, creating pods in batches), the resource quota management may cause some requests to fail due to conflicts. Do not enable this function unless necessary. To enable this function, ensure that there is a retry mechanism in the request client.

                                                  Default: false

                                                  +

                                                  Default: false

                                                  Parameter

                                                  @@ -82,17 +82,17 @@

                                                  (Optional) Service Settings

                                                  A Service provides external access for pods. With a static IP address, a Service forwards access traffic to pods and automatically balances load for these pods.

                                                  You can also create a Service after creating a workload. For details about Services of different types, see Overview.

                                                  -
                                                  (Optional) Advanced Settings
                                                  • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Workload Upgrade Policies.
                                                  +
                                                  (Optional) Advanced Settings
                                                  • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Node affinity is provided.
                                                    • Node Affinity: Common load affinity policies are offered for quick load affinity deployment.
                                                      • Specified node scheduling: Workload pods can be deployed on specified nodes through node affinity (nodeAffinity). If no node is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
                                                      • Specified node pool scheduling: Workload pods can be deployed in a specified node pool through node affinity (nodeAffinity). If no node pool is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
                                                      • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
                                                  -
                                                  • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Taints and Tolerations.
                                                  • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
                                                  • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
                                                  • Network Configuration +
                                                    • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Configuring Tolerance Policies.
                                                    • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Configuring Labels and Annotations.
                                                    • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
                                                    • Network Configuration

                                                4. Click Create Workload in the lower right corner.
                                                5. Using kubectl

                                                  The following procedure uses Nginx as an example to describe how to create a workload using kubectl.

                                                  -
                                                  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                  2. Create and edit the nginx-daemonset.yaml file. nginx-daemonset.yaml is an example file name, and you can change it as required.

                                                    vi nginx-daemonset.yaml

                                                    +
                                                    1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                    2. Create and edit the nginx-daemonset.yaml file. nginx-daemonset.yaml is an example file name, and you can change it as required.

                                                      vi nginx-daemonset.yaml

                                                      The content of the description file is as follows: The following provides an example. For more information on DaemonSets, see Kubernetes documents.

                                                      apiVersion: apps/v1
                                                       kind: DaemonSet
                                                      diff --git a/docs/cce/umn/cce_10_0222.html b/docs/cce/umn/cce_10_0222.html
                                                      index 01dab87f..194c7d97 100644
                                                      --- a/docs/cce/umn/cce_10_0222.html
                                                      +++ b/docs/cce/umn/cce_10_0222.html
                                                      @@ -8,7 +8,9 @@
                                                       
                                                    3. - + diff --git a/docs/cce/umn/cce_10_0232.html b/docs/cce/umn/cce_10_0232.html index 7f03e012..738d0ec6 100644 --- a/docs/cce/umn/cce_10_0232.html +++ b/docs/cce/umn/cce_10_0232.html @@ -7,8 +7,8 @@
                                                    4. Workload Affinity (podAffinity)/Workload Anti-affinity (podAntiAffinity): The nodes to which a pod can be scheduled are determined based on the label of the pod running on a node, but not the label of the node. Similar to node affinity, workload affinity and anti-affinity are also of requiredDuringSchedulingIgnoredDuringExecution and preferredDuringSchedulingIgnoredDuringExecution types.

                                                      Workload affinity and anti-affinity require a certain amount of computing time, which significantly slows down scheduling in large-scale clusters. Do not enable workload affinity and anti-affinity in a cluster that contains hundreds of nodes.

                                                    5. -

                                                      You can create the preceding affinity policies on the console. For details, see Configuring Load Affinity on the Console and Configuring Node Affinity on the Console.

                                                      -

                                                      Configuring Load Affinity on the Console

                                                      1. When creating a workload, click Scheduling in the Advanced Settings area. For details about how to create a workload, see Creating a Workload.
                                                      2. Select a load affinity scheduling policy.

                                                        • Not configured: No load affinity policy is configured.
                                                        • Multi-AZ deployment preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity.
                                                        • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to different AZs and different nodes through pod anti-affinity. When this scheduling policy is used, if there are fewer nodes than pods or node resources are insufficient, the extra pods will fail to run.
                                                        • Custom policies: allow flexible scheduling of workload pods based on pod labels. For details about the supported scheduling policies, see Table 1. Select a proper policy type and click to add a policy. For details about the parameters, see Table 2. +

                                                          You can create the preceding affinity policies on the console. For details, see Configuring Load Affinity on the Console or Configuring Node Affinity on the Console.

                                                          +

                                                          Configuring Load Affinity on the Console

                                                          1. When creating a workload, click Scheduling in the Advanced Settings area. For details about how to create a workload, see Creating a Workload.
                                                          2. Select a load affinity scheduling policy.

                                                            • Not configured: No load affinity policy is configured.
                                                            • Multi-AZ deployment preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity.
                                                            • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to different AZs and different nodes through pod anti-affinity. When this scheduling policy is used, if there are fewer nodes than pods or node resources are insufficient, the extra pods will fail to run.
                                                            • Custom policies: allow flexible scheduling of workload pods based on pod labels. For details about the supported scheduling policies, see Table 1. Select a proper policy type and click to add a policy. For details about the parameters, see Table 2.
                                                              - - - - - @@ -65,7 +65,7 @@ - -
                                                              Table 1 Load affinity policies

                                                              Policy

                                                              Type

                                                              @@ -17,39 +17,39 @@

                                                              Workload Affinity

                                                              +

                                                              Workload affinity

                                                              Required

                                                              Hard constraint, which is used to configure the conditions that must be met and corresponds to the requiredDuringSchedulingIgnoredDuringExecution field in YAML.

                                                              -

                                                              Select pods that require affinity by label. If such pods have been running on a node in the topology domain, the scheduler will forcibly schedule the created pods to that topology domain.

                                                              -
                                                              NOTE:

                                                              If multiple affinity rules are configured, multiple labels will be used to filter pods that require affinity, and the newly created pods must be affinity with all pods that meet the label filtering conditions. In this way, all pods that meet the label filtering conditions locate in the same topology domain for scheduling.

                                                              +

                                                              Hard constraint, which corresponds to requiredDuringSchedulingIgnoredDuringExecution in YAML for specifying the conditions that must be met.

                                                              +

                                                              Select pods that require affinity by label. If such pods already run on a node in the topology key, the scheduler will forcibly schedule the created pods to that topology key.

                                                              +
                                                              NOTE:

                                                              If multiple affinity rules are configured, multiple labels will be used to filter pods that require affinity, and the newly created pods must be affinity with all pods that meet the label filtering conditions. In this way, all pods that meet the label filtering conditions locate in the same topology key for scheduling.

                                                              Preferred

                                                              Soft constraint, which is used to configure the conditions that preferentially to be met and corresponds to the preferredDuringSchedulingIgnoredDuringExecution field in YAML.

                                                              -

                                                              Select pods that require affinity by label. If such pods have been running on a node in the topology domain, the scheduler will preferentially schedule the created pods to that topology domain.

                                                              -
                                                              NOTE:

                                                              If multiple affinity rules are configured, multiple labels will be used to filter pods that require affinity, and the newly created pods will be preferentially to be affinity with multiple pods that meet the label filtering conditions. However, even if no pod meets the label filter conditions, a topology domain will be selected for scheduling.

                                                              +

                                                              Soft constraint, which corresponds to preferredDuringSchedulingIgnoredDuringExecution in YAML for specifying the conditions that need to be met as much as possible.

                                                              +

                                                              Select pods that require affinity by label. If such pods already run on a node in the topology key, the scheduler will preferentially schedule the created pods to that topology key.

                                                              +
                                                              NOTE:

                                                              If multiple affinity rules are configured, multiple labels will be used to filter pods that require affinity, and the newly created pods will be preferentially to be affinity with multiple pods that meet the label filtering conditions. However, even if no pod meets the label filter conditions, a topology key will be selected for scheduling.

                                                              Workload Anti-Affinity

                                                              +

                                                              Workload anti-affinity

                                                              Required

                                                              Hard constraint, which corresponds to requiredDuringSchedulingIgnoredDuringExecution in YAML for specifying the conditions that must be met.

                                                              -

                                                              Select one or more pods that require anti-affinity by label. If such pods have been running on a node in the topology domain, the scheduler will not schedule the created pods to that topology domain.

                                                              -
                                                              NOTE:

                                                              If multiple anti-affinity rules are configured, multiple labels will be used to filter pods that require anti-affinity, and the newly created pods must be anti-affinity with all pods that meet the label filtering conditions. In this way, all the topology domains where the pods that meet the label filtering conditions locate will not be scheduled.

                                                              +

                                                              Select one or more pods that require anti-affinity by label. If such pods already run on a node in the topology key, the scheduler will not schedule the created pods to that topology key.

                                                              +
                                                              NOTE:

                                                              If multiple anti-affinity rules are configured, multiple labels will be used to filter pods that require anti-affinity, and the newly created pods must be anti-affinity with all pods that meet the label filtering conditions. In this way, all the topology keys where the pods that meet the label filtering conditions locate will not be scheduled.

                                                              Preferred

                                                              Soft constraint, which corresponds to preferredDuringSchedulingIgnoredDuringExecution in YAML for specifying the conditions that are preferentially met.

                                                              -

                                                              Select one or more pods that require anti-affinity by label. If such pods have been running on a node in the topology domain, the scheduler will preferentially schedule the created pods to other topology domains.

                                                              -
                                                              NOTE:

                                                              If multiple anti-affinity rules are configured, multiple labels will be used to filter pods that require anti-affinity, and the newly created pods will be preferentially to be anti-affinity with multiple pods that meet the label filtering conditions. However, even if all topology domains involve the pods that require anti-affinity, a topology domain will be selected for scheduling.

                                                              +

                                                              Soft constraint, which corresponds to preferredDuringSchedulingIgnoredDuringExecution in YAML for specifying the conditions that need to be met as much as possible.

                                                              +

                                                              Select one or more pods that require anti-affinity by label. If such pods already run on a node in the topology key, the scheduler will preferentially schedule the created pods to other topology keys.

                                                              +
                                                              NOTE:

                                                              If multiple anti-affinity rules are configured, multiple labels will be used to filter pods that require anti-affinity, and the newly created pods will be preferentially to be anti-affinity with multiple pods that meet the label filtering conditions. However, even if all topology keys involve the pods that require anti-affinity, a topology key will be selected for scheduling.

                                                              Weight

                                                              This parameter is available only in a Preferred scheduling policy. The weight ranges from 1 to 100. During scheduling, the scheduler adds the weight to the scores of other priority functions and schedules pods to the node with the largest total score.

                                                              +

                                                              This parameter is available only in a preferred scheduling policy. The weight ranges from 1 to 100. During scheduling, the scheduler adds the weight to the scores of other priority functions and schedules pods to the node with the largest total score.

                                                              Namespace

                                                              @@ -75,8 +75,8 @@

                                                              Topology Key

                                                              A topology domain (topologyKey) determines the range of nodes to be scheduled based on node labels. For example, if the node label is kubernetes.io/hostname, the range of nodes is determined by node name. Nodes with different names are in different topology domains. In this case, a topology domain contains only one node. If the specified label is kubernetes.io/os, the range of nodes is determined by node OS. Nodes running different OSs belong to different topology domains. In this case, a topology domain may contain multiple nodes.

                                                              -

                                                              After the node range is determined using the topology domain, configure the policy for scheduling, including the label name, operator, and label value. The minimum unit for scheduling is a topology domain. For example, if a node in a topology domain meets the load affinity policy, all nodes in the topology domain can be scheduled.

                                                              +

                                                              A topology key (topologyKey) determines the range of nodes to be scheduled based on node labels. For example, if the node label is kubernetes.io/hostname, the range of nodes is determined by node name. Nodes with different names are in different topology keys. In this case, a topology key contains only one node. If the specified label is kubernetes.io/os, the range of nodes is determined by node OS. Nodes running different OSs belong to different topology keys. In this case, a topology key may contain multiple nodes.

                                                              +

                                                              After the node range is determined using the topology key, configure the policy for scheduling, including the label key, operator, and label value. The minimum unit for scheduling is a topology key. For example, if a node in a topology key meets the load affinity policy, all nodes in the topology key can be scheduled.

                                                              Label Key

                                                              @@ -88,7 +88,7 @@

                                                              Operator

                                                              The following operators are supported:

                                                              -
                                                              • In: The label of the affinity or anti-affinity object is in the label value list (values field).
                                                              • NotIn: The label of the affinity or anti-affinity object is not in the label value list (values field).
                                                              • Exists: The affinity or anti-affinity object has a specified label name.
                                                              • DoesNotExist: The affinity or anti-affinity object does not have the specified label name.
                                                              +
                                                              • In: The label of the affinity or anti-affinity object is in the label value list (values field).
                                                              • NotIn: The label of the affinity or anti-affinity object is not in the label value list (values field).
                                                              • Exists: The affinity or anti-affinity object has a specified label key.
                                                              • DoesNotExist: The affinity or anti-affinity object does not have a specified label key.

                                                              Label Value

                                                              @@ -102,7 +102,7 @@

                                                            • After the scheduling policy is added, click Create Workload.
                                                            • -

                                                              Configuring Node Affinity on the Console

                                                              1. When creating a workload, click Scheduling in the Advanced Settings area. For details about how to create a workload, see Creating a Workload.
                                                              2. Select a node affinity scheduling policy.

                                                                • Not configured: No node affinity policy is configured.
                                                                • Node Affinity: Specify the nodes where workload pods are to be deployed. If no nodes are specified, the pods will be randomly scheduled based on the default cluster scheduling policy.
                                                                • Specified Node Pool Scheduling: Specify the node pools where workload pods are to be deployed. If no node pools are specified, the pods will be randomly scheduled based on the default cluster scheduling policy.
                                                                • Custom policies: allow flexible scheduling of workload pods based on node labels. For details about the supported scheduling policies, see Table 3. Select a proper policy type and click to add a policy. For details about the parameters, see Table 4. You can also click Specify Node or Specify AZ to quickly select a node or AZ on the console for scheduling.

                                                                  Specifying a node or AZ is also implemented through labels. The console frees you from manually entering node labels. The kubernetes.io/hostname label is used when you specify a node, and the failure-domain.beta.kubernetes.io/zone label is used when you specify an AZ.

                                                                  +

                                                                  Configuring Node Affinity on the Console

                                                                  1. When creating a workload, click Scheduling in the Advanced Settings area. For details about how to create a workload, see Creating a Workload.
                                                                  2. Select a node affinity scheduling policy.

                                                                    • Not configured: No node affinity policy is configured.
                                                                    • Node Affinity: Specify the nodes where workload pods are to be deployed. If no nodes are specified, the pods will be randomly scheduled according to the default cluster scheduling policy.
                                                                    • Specified Node Pool Scheduling: Specify the node pools where workload pods are to be deployed. If no node pools are specified, the pods will be randomly scheduled according to the default cluster scheduling policy.
                                                                    • Custom policies: allow flexible scheduling of workload pods based on node labels. For details about the supported scheduling policies, see Table 3. Select a proper policy type and click to add a policy. For details about the parameters, see Table 4. You can also click Specify Node or Specify AZ to quickly select a node or AZ on the console for scheduling.

                                                                      Specifying a node or AZ is also implemented through labels. The console frees you from manually entering node labels. The kubernetes.io/hostname label is used when you specify a node, and the failure-domain.beta.kubernetes.io/zone label is used when you specify an AZ.

                                                                      @@ -118,7 +118,7 @@ - @@ -132,7 +132,7 @@ -
                                                                      Table 3 Node affinity settings

                                                                      Parameter

                                                                      Preferred

                                                                      Soft constraint, which corresponds to preferredDuringSchedulingIgnoredDuringExecution for specifying the conditions that are preferentially met.

                                                                      +

                                                                      Soft constraint, which corresponds to preferredDuringSchedulingIgnoredDuringExecution for specifying the conditions that need to be met as much as possible.

                                                                      If multiple rules that are preferentially met are added, scheduling will be performed even if one or none of the rules is met.

                                                                      Label

                                                                      +

                                                                      Label Key

                                                                      When configuring node affinity, enter the node label to be matched.

                                                                      Both default labels and custom labels are supported.

                                                                      @@ -141,7 +141,7 @@

                                                                      Operator

                                                                      The following operators are supported:

                                                                      -
                                                                      • In: The label of the affinity or anti-affinity object is in the label value list (values field).
                                                                      • NotIn: The label of the affinity or anti-affinity object is not in the label value list (values field).
                                                                      • Exists: The affinity or anti-affinity object has a specified label name.
                                                                      • DoesNotExist: The affinity or anti-affinity object does not have the specified label name.
                                                                      • Gt: (available only for node affinity) The label value of the scheduled node is greater than the list value (string comparison).
                                                                      • Lt: (available only for node affinity) The label value of the scheduled node is less than the list value (string comparison).
                                                                      +
                                                                      • In: The label of the affinity or anti-affinity object is in the label value list (values field).
                                                                      • NotIn: The label of the affinity or anti-affinity object is not in the label value list (values field).
                                                                      • Exists: The affinity or anti-affinity object has a specified label key.
                                                                      • DoesNotExist: The affinity or anti-affinity object does not have a specified label key.
                                                                      • Gt: (available only for node affinity) The label value of the scheduled node is greater than the list value (string comparison).
                                                                      • Lt: (available only for node affinity) The label value of the scheduled node is less than the list value (string comparison).

                                                                      Label Value

                                                                      @@ -174,7 +174,7 @@ Labels: beta.kubernetes.io/arch=amd64 os.name=EulerOS_2.0_SP5 os.version=3.10.0-862.14.1.5.h328.eulerosv2r7.x86_64

                                                                      In workload scheduling, common node labels are as follows:

                                                                      -
                                                                      • failure-domain.beta.kubernetes.io/region: region where the node is located.
                                                                      • failure-domain.beta.kubernetes.io/zone: availability zone to which the node belongs.
                                                                      • kubernetes.io/hostname: host name of the node.
                                                                      +
                                                                      • failure-domain.beta.kubernetes.io/region: region where the node is located
                                                                      • failure-domain.beta.kubernetes.io/zone: availability zone to which the node belongs
                                                                      • kubernetes.io/hostname: hostname of the node

                                                                      Kubernetes provides the nodeSelector field. When creating a workload, you can set this field to specify that the pod can be deployed only on a node with the specific label. The following example shows how to use a nodeSelector to deploy the pod only on the node with the gpu=true label.

                                                                      apiVersion: v1
                                                                       kind: Pod
                                                                      @@ -185,10 +185,10 @@ spec:
                                                                           gpu: true
                                                                       ...
                                                                      Node affinity rules can achieve the same results. Compared with nodeSelector, node affinity rules seem more complex, but with a more expressive syntax. You can use the spec.affinity.nodeAffinity field to set node affinity. There are two types of node affinity:
                                                                      • requiredDuringSchedulingIgnoredDuringExecution: Kubernetes cannot schedule the pod unless the rule is met.
                                                                      • PreferredDuringSchedulingIgnoredDuringExecution: Kubernetes tries to find a node that meets the rule. If a matching node is not available, Kubernetes still schedules the pod.
                                                                      -

                                                                      In these two types of node affinity, requiredDuringScheduling or preferredDuringScheduling indicates that the pod can be scheduled to a node only when all the defined rules are met (required). IgnoredDuringExecution indicates that if the node label changes after Kubernetes schedules the pod, the pod continues to run and will not be rescheduled. However, if kubelet on the node is restarted, kubelet will recheck the node affinity rule, and the pod will still be scheduled to another node.

                                                                      +

                                                                      In these two types of node affinity, requiredDuringScheduling or preferredDuringScheduling indicates that the pod can be scheduled to a node only when all the defined rules are met (required). IgnoredDuringExecution indicates that any changes to the node label after Kubernetes schedules the pod will not affect the pod's running or cause it to be rescheduled. However, if kubelet on the node is restarted, kubelet will recheck the node affinity rule, and the pod will still be scheduled to another node.

                                                                      -

                                                                      The following is an example of setting node affinity:

                                                                      +

                                                                      The following is an example of configuring node affinity:

                                                                      apiVersion: apps/v1
                                                                       kind: Deployment
                                                                       metadata:
                                                                      @@ -325,7 +325,7 @@ gpu-585455d466-t56cm   1/1     Running   0          2m29s   172.16.0.64   192.16
                                                                       gpu-585455d466-t5w5x   1/1     Running   0          2m29s   172.16.0.41   192.168.0.212

                                                                      In the preceding example, the node scheduling priority is as follows. Nodes with both SSD and gpu=true labels have the highest priority. Nodes with the SSD label but no gpu=true label have the second priority (weight: 80). Nodes with the gpu=true label but no SSD label have the third priority. Nodes without any of these two labels have the lowest priority.

                                                                      -
                                                                      Figure 1 Scheduling priority
                                                                      +
                                                                      Figure 1 Scheduling priority

                                                                      Workload Affinity (podAffinity)

                                                                      Node affinity rules affect only the affinity between pods and nodes. Kubernetes also supports configuring inter-pod affinity rules. For example, the frontend and backend of an application can be deployed together on one node to reduce access latency. There are also two types of inter-pod affinity rules: requiredDuringSchedulingIgnoredDuringExecution and preferredDuringSchedulingIgnoredDuringExecution.

                                                                      For workload affinity, topologyKey cannot be left blank when requiredDuringSchedulingIgnoredDuringExecution and preferredDuringSchedulingIgnoredDuringExecution are used.

                                                                      @@ -382,7 +382,7 @@ backend-658f6cb858-dlrz8 1/1 Running 0 5m38s 172.16.0.67 1 frontend-67ff9b7b97-dsqzn 1/1 Running 0 6s 172.16.0.70 192.168.0.100 frontend-67ff9b7b97-hxm5t 1/1 Running 0 6s 172.16.0.71 192.168.0.100 frontend-67ff9b7b97-z8pdb 1/1 Running 0 6s 172.16.0.72 192.168.0.100 -

                                                                      The topologyKey field is used to divide topology domains to specify the selection range. If the label keys and values of nodes are the same, the nodes are considered to be in the same topology domain. Then, the contents defined in the following rules are selected. The effect of topologyKey is not fully demonstrated in the preceding example because all the nodes have the kubernetes.io/hostname label, that is, all the nodes are within the range.

                                                                      +

                                                                      The topologyKey field is used to divide topology keys to specify the selection range. If the label keys and values of nodes are the same, the nodes are considered to be in the same topology key. Then, the contents defined in the following rules are selected. The effect of topologyKey is not fully demonstrated in the preceding example because all the nodes have the kubernetes.io/hostname label, that is, all the nodes are within the range.

                                                                      To see how topologyKey works, assume that the backend of the application has two pods, which are running on different nodes.

                                                                      $ kubectl get po -o wide
                                                                       NAME                       READY   STATUS    RESTARTS   AGE     IP            NODE         
                                                                      @@ -400,7 +400,7 @@ NAME            STATUS   ROLES    AGE   VERSION                            PREFE
                                                                       192.168.0.212   Ready    <none>   91m   v1.15.6-r1-20.3.0.2.B001-15.30.2   
                                                                       192.168.0.94    Ready    <none>   91m   v1.15.6-r1-20.3.0.2.B001-15.30.2   true
                                                                       192.168.0.97    Ready    <none>   91m   v1.15.6-r1-20.3.0.2.B001-15.30.2   true
                                                                      -

                                                                      If the topologyKey of podAffinity is set to prefer, the node topology domains are divided as shown in Figure 2.

                                                                      +

                                                                      If the topologyKey of podAffinity is set to prefer, the node topology keys are divided as shown in Figure 2.

                                                                            affinity:
                                                                               podAffinity:
                                                                                 requiredDuringSchedulingIgnoredDuringExecution:
                                                                      @@ -411,8 +411,8 @@ NAME            STATUS   ROLES    AGE   VERSION                            PREFE
                                                                                       operator: In 
                                                                                       values: 
                                                                                       - backend
                                                                      -
                                                                      Figure 2 Topology domains
                                                                      -

                                                                      During scheduling, node topology domains are divided based on the prefer label. In this example, 192.168.0.97 and 192.168.0.94 are divided into the same topology domain. If a pod with the app=backend label runs in the topology domain, even if not all nodes in the topology domain run the pod with the app=backend label (in this example, only the 192.168.0.97 node has such a pod), frontend is also deployed in this topology domain (192.168.0.97 or 192.168.0.94).

                                                                      +
                                                                      Figure 2 Topology keys
                                                                      +

                                                                      During scheduling, node topology keys are divided based on the prefer label. In this example, 192.168.0.97 and 192.168.0.94 are divided into the same topology key. If a pod with the app=backend label runs in the topology key, even if not all nodes in the topology key run the pod with the app=backend label (in this example, only the 192.168.0.97 node has such a pod), frontend is also deployed in this topology key (192.168.0.97 or 192.168.0.94).

                                                                      $ kubectl create -f affinity3.yaml 
                                                                       deployment.apps/frontend created
                                                                       
                                                                      @@ -427,7 +427,7 @@ frontend-67ff9b7b97-z8pdb   1/1     Running   0          6s      172.16.0.72   1
                                                                       

                                                                      Workload Anti-Affinity (podAntiAffinity)

                                                                      Unlike the scenarios in which pods are preferred to be scheduled onto the same node, sometimes, it could be the exact opposite. For example, if certain pods are deployed together, they will affect the performance.

                                                                      For workload anti-affinity, when requiredDuringSchedulingIgnoredDuringExecution is used, the default access controller LimitPodHardAntiAffinityTopology of Kubernetes requires that topologyKey can only be kubernetes.io/hostname. To use other custom topology logic, modify or disable the access controller.

                                                                      -

                                                                      The following is an example of defining an anti-affinity rule. This rule divides node topology domains by the kubernetes.io/hostname label. If a pod with the app=frontend label already exists on a node in the topology domain, pods with the same label cannot be scheduled to other nodes in the topology domain.

                                                                      +

                                                                      The following is an example of defining an anti-affinity rule. This rule divides node topology keys by the kubernetes.io/hostname label. If a pod with the app=frontend label already exists on a node in the topology key, pods with the same label cannot be scheduled to other nodes in the topology key.

                                                                      apiVersion: apps/v1
                                                                       kind: Deployment
                                                                       metadata:
                                                                      @@ -459,14 +459,14 @@ spec:
                                                                             affinity:
                                                                               podAntiAffinity:
                                                                                 requiredDuringSchedulingIgnoredDuringExecution:
                                                                      -          - topologyKey: kubernetes.io/hostname   # Topology domain of the node
                                                                      +          - topologyKey: kubernetes.io/hostname   # Topology key of the node
                                                                                   labelSelector:    # Pod label matching rule
                                                                                     matchExpressions: 
                                                                                     - key: app
                                                                                       operator: In 
                                                                                       values: 
                                                                                       - frontend
                                                                      -

                                                                      Create an anti-affinity rule and view the deployment result. In the example, node topology domains are divided by the kubernetes.io/hostname label. The label values of nodes with the kubernetes.io/hostname label are different, so there is only one node in a topology domain. If a frontend pod already exists in a topology domain, pods with the same label will not be scheduled to the topology domain. In this example, there are only four nodes. Therefore, there is one pod which is in the Pending state and cannot be scheduled.

                                                                      +

                                                                      Create an anti-affinity rule and view the deployment result. In the example, node topology keys are divided by the kubernetes.io/hostname label. The label values of nodes with the kubernetes.io/hostname label are different, so there is only one node in a topology key. If a topology key contains only one node where a frontend pod already exists, pods with the same label will not be scheduled to that topology key. In this example, there are only four nodes. Therefore, there is one pod which is in the Pending state and cannot be scheduled.

                                                                      $ kubectl create -f affinity4.yaml 
                                                                       deployment.apps/frontend created
                                                                       
                                                                      @@ -479,12 +479,12 @@ frontend-6f686d8d87-q7cfq   1/1     Running   0          18s   172.16.0.47   192
                                                                       frontend-6f686d8d87-xl8hx   1/1     Running   0          18s   172.16.0.23   192.168.0.94 

                                                                      Operator Values

                                                                      You can use the operator field to set the logical relationship of the usage rule. The value of operator can be:

                                                                      -
                                                                      • In: The label of the affinity or anti-affinity object is in the label value list (values field).
                                                                      • NotIn: The label of the affinity or anti-affinity object is not in the label value list (values field).
                                                                      • Exists: The affinity or anti-affinity object has a specified label name.
                                                                      • DoesNotExist: The affinity or anti-affinity object does not have the specified label name.
                                                                      • Gt: (available only for node affinity) The label value of the scheduled node is greater than the list value (string comparison).
                                                                      • Lt: (available only for node affinity) The label value of the scheduled node is less than the list value (string comparison).
                                                                      +
                                                                      • In: The label of the affinity or anti-affinity object is in the label value list (values field).
                                                                      • NotIn: The label of the affinity or anti-affinity object is not in the label value list (values field).
                                                                      • Exists: The affinity or anti-affinity object has a specified label key.
                                                                      • DoesNotExist: The affinity or anti-affinity object does not have a specified label key.
                                                                      • Gt: (available only for node affinity) The label value of the scheduled node is greater than the list value (string comparison).
                                                                      • Lt: (available only for node affinity) The label value of the scheduled node is less than the list value (string comparison).
                                                                      diff --git a/docs/cce/umn/cce_10_0240.html b/docs/cce/umn/cce_10_0240.html index 4ac572fa..bb0ef66b 100644 --- a/docs/cce/umn/cce_10_0240.html +++ b/docs/cce/umn/cce_10_0240.html @@ -4,11 +4,12 @@

                                                                      cce-hpa-controller is a CCE-developed add-on, which can be used to flexibly scale in or out Deployments based on metrics such as CPU usage and memory usage.

                                                                      Main Functions

                                                                      • Scaling can be performed based on the percentage of the current number of pods.
                                                                      • The minimum scaling step can be set.
                                                                      • Different scaling operations can be performed based on the actual metric values.
                                                                      -

                                                                      Constraints

                                                                      • This add-on can be installed only in clusters of v1.15 or later.
                                                                      • If the version is 1.2.11 or later, the add-ons that can provide metrics API must be installed.
                                                                        • Kubernetes Metrics Server: provides basic resource usage metrics, such as container CPU and memory usage. It is supported by all cluster versions.
                                                                        +

                                                                        Notes and Constraints

                                                                        Installing the Add-on

                                                                        1. Log in to the CCE console and click the cluster name to access the cluster console. Click Add-ons in the navigation pane, locate CCE Advanced HPA on the right, and click Install.
                                                                        2. On the Install Add-on page, configure the specifications.

                                                                          -

                                                                          Table 1 cce-hpa-controller configuration

                                                                          Parameter

                                                                          +
                                                                          @@ -17,7 +18,7 @@ @@ -25,6 +26,7 @@
                                                                          Table 1 Add-on configuration

                                                                          Parameter

                                                                          Description

                                                                          Add-on Specifications

                                                                          Select Single or Custom for Add-on Specifications.

                                                                          -
                                                                          NOTE:

                                                                          Single-instance add-ons are used only for service verification. In commercial deployments, select Custom based on the cluster specifications. The specifications of cce-hpa-controller are decided by the total number of containers in the cluster and the number of scaling policies. You are advised to configure 500m CPU and 1,000 MiB memory for every 5,000 containers, and 100m CPU and 500 MiB memory for every 1,000 scaling policies.

                                                                          +
                                                                          NOTE:

                                                                          A single instance is solely for verification purposes. For commercial situations, you need to choose Custom based on the cluster specifications. The add-on specifications are influenced by the total number of containers in clusters and the number of scaling policies. For typical situations, it is recommended that you configure 500m CPU cores and 1,000 MiB of memory for every 5,000 containers. As for scaling policies, 100m CPU cores and 500 MiB of memory should be configured for every 1,000 of them.

                                                                          Number of pods that will be created to match the selected add-on specifications.

                                                                          If you select Custom, you can adjust the number of pods as required.

                                                                          +

                                                                          High availability is not possible with a single pod. If an error occurs on the node where the add-on instance runs, the add-on will fail.

                                                                          Containers

                                                                          @@ -36,32 +38,31 @@
                                                                          -

                                                                        3. Select Single or Custom for Add-on Specifications.

                                                                          • Pods: Set the number of pods based on service requirements.
                                                                          • Containers: Set a proper container quota based on service requirements.

                                                                        4. Configure scheduling policies for the add-on.

                                                                          • Scheduling policies do not take effect on add-on instances of the DaemonSet type.
                                                                          • When configuring multi-AZ deployment or node affinity, ensure that there are nodes meeting the scheduling policy and that resources are sufficient in the cluster. Otherwise, the add-on cannot run.
                                                                          -
                                                                          - @@ -232,6 +233,16 @@ spec: + + + + + - @@ -459,7 +470,7 @@ ingress-test * 121.**.**.** 80 10s

                                                                        5. Enter http://121.**.**.**:80 in the address box of the browser to access the workload (for example, Nginx workload).

                                                                          121.**.**.** indicates the IP address of the unified load balancer.

                                                                        6. -

                                                                          Creating an Ingress - Interconnecting with an Existing Load Balancer

                                                                          CCE allows you to connect to an existing load balancer when creating an ingress.
                                                                          • Existing dedicated load balancers must be the application type (HTTP/HTTPS) supporting private networks (with a private IP).
                                                                          +

                                                                          Creating an Ingress - Interconnecting with an Existing Load Balancer

                                                                          CCE allows you to connect to an existing load balancer when creating an ingress.
                                                                          • An existing dedicated load balancer must be of the application type (HTTP/HTTPS) and support private networks (with a private IP address).
                                                                          If the cluster version is 1.23 or later, the YAML file configuration is as follows:
                                                                          apiVersion: networking.k8s.io/v1
                                                                          diff --git a/docs/cce/umn/cce_10_0276.html b/docs/cce/umn/cce_10_0276.html
                                                                          index 8f5f10bb..db27caea 100644
                                                                          --- a/docs/cce/umn/cce_10_0276.html
                                                                          +++ b/docs/cce/umn/cce_10_0276.html
                                                                          @@ -2,9 +2,9 @@
                                                                           
                                                                           

                                                                          Performing Rolling Upgrade for Nodes

                                                                          Scenario

                                                                          In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.

                                                                          -
                                                                          Figure 1 Workload migration
                                                                          +
                                                                          Figure 1 Workload migration
                                                                          -

                                                                          Constraints

                                                                          • The original node and the target node to which the workload is to be migrated must be in the same cluster.
                                                                          • The cluster must be of v1.13.10 or later.
                                                                          • The default node pool DefaultPool does not support this configuration.
                                                                          +

                                                                          Notes and Constraints

                                                                          • The original node and the target node to which the workload is to be migrated must be in the same cluster.
                                                                          • The cluster must be of v1.13.10 or later.
                                                                          • The default node pool does not support this configuration.

                                                                          Scenario 1: The Original Node Is in DefaultPool

                                                                          1. Create a node pool. For details, see Creating a Node Pool.
                                                                          2. On the node pool list page, click View Node in the Operation column of the target node pool. The IP address of the new node is displayed in the node list.
                                                                          1. Install and configure kubectl. For details, see Connecting to a Cluster Using kubectl.
                                                                          1. Migrate the workload.

                                                                            1. Add a taint to the node where the workload needs to be migrated out.

                                                                              kubectl taint node [node] key=value:[effect]

                                                                              In the preceding command, [node] indicates the IP address of the node where the workload to be migrated is located. The value of [effect] can be NoSchedule, PreferNoSchedule, or NoExecute. In this example, set this parameter to NoSchedule.

                                                                              diff --git a/docs/cce/umn/cce_10_0277.html b/docs/cce/umn/cce_10_0277.html index 9a263b78..3e0efd74 100644 --- a/docs/cce/umn/cce_10_0277.html +++ b/docs/cce/umn/cce_10_0277.html @@ -37,7 +37,12 @@
                                                                          - + + + diff --git a/docs/cce/umn/cce_10_0278.html b/docs/cce/umn/cce_10_0278.html index deb5a32c..5d07a477 100644 --- a/docs/cce/umn/cce_10_0278.html +++ b/docs/cce/umn/cce_10_0278.html @@ -6,10 +6,10 @@

                                                                          Prerequisites

                                                                          At least one cluster has been created.

                                                                          -

                                                                          Constraints

                                                                          A maximum of 6000 Services can be created in each namespace. The Services mentioned here indicate the Kubernetes Service resources added for workloads.

                                                                          +

                                                                          Notes and Constraints

                                                                          A maximum of 6000 Services can be created in each namespace. The Services mentioned here indicate the Kubernetes Service resources added for workloads.

                                                                          -

                                                                          Namespace Types

                                                                          Namespaces can be created in either of the following ways:

                                                                          -
                                                                          • Created automatically: When a cluster is up, the default, kube-public, kube-system, and kube-node-lease namespaces are created by default.
                                                                            • default: All objects for which no namespace is specified are allocated to this namespace.
                                                                            • kube-public: Resources in this namespace can be accessed by all users (including unauthenticated users), such as public add-ons and container charts.
                                                                            • kube-system: All resources created by Kubernetes are in this namespace.
                                                                            • kube-node-lease: Each node has an associated Lease object in this namespace. The object is periodically updated by the node. Both NodeStatus and NodeLease are considered as heartbeats from a node. In versions earlier than v1.13, only NodeStatus is available. The NodeLease feature is introduced in v1.13. NodeLease is more lightweight than NodeStatus. This feature significantly improves the cluster scalability and performance.
                                                                            +

                                                                            Namespace Types

                                                                            Namespaces can be created automatically or manually.

                                                                            +
                                                                            • Created automatically: When a cluster is up, the default, kube-public, kube-system, and kube-node-lease namespaces are created by default.
                                                                              • default: All objects for which no namespace is specified are allocated to this namespace.
                                                                              • kube-public: Resources in this namespace can be accessed by all users (including unauthenticated users) so that some resources in the cluster can be readable in the entire cluster. This is a reserved Kubernetes namespace. Its common attributes are only conventions but not requirements.
                                                                              • kube-system: All resources created by Kubernetes are in this namespace.
                                                                              • kube-node-lease: Each node has an associated Lease object in this namespace. The object is periodically updated by the node. Both NodeStatus and NodeLease are considered as heartbeats from a node. In versions earlier than v1.13, only NodeStatus is available. The NodeLease feature is introduced in v1.13. NodeLease is more lightweight than NodeStatus. This feature significantly improves the cluster scalability and performance.
                                                                            • Created manually: You can create namespaces to serve separate purposes. For example, you can create three namespaces, one for a development environment, one for joint debugging environment, and one for test environment. You can also create one namespace for login services and one for game services.

                                                                            Creating a Namespace

                                                                            1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                            2. Choose Namespaces in the navigation pane and click Create Namespace in the upper right corner.
                                                                            3. Set namespace parameters based on Table 1.

                                                                              diff --git a/docs/cce/umn/cce_10_0279.html b/docs/cce/umn/cce_10_0279.html index cd768839..7163078b 100644 --- a/docs/cce/umn/cce_10_0279.html +++ b/docs/cce/umn/cce_10_0279.html @@ -11,42 +11,42 @@

                                                                            Components

                                                                            -

                                                                            Workload scaling components are described as follows:

                                                                            +

                                                                            Workload Scaling Types

                                                                            -
                                                                          Table 2 Configurations for add-on scheduling

                                                                          Parameter

                                                                          +
                                                                          - - - - - - - @@ -89,10 +90,68 @@
                                                                          Table 2 Configurations for add-on scheduling

                                                                          Parameter

                                                                          Description

                                                                          +

                                                                          Description

                                                                          Multi AZ

                                                                          +

                                                                          Multi AZ

                                                                          • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
                                                                          • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
                                                                          • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.
                                                                          +
                                                                          • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
                                                                          • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
                                                                          • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.

                                                                          Node Affinity

                                                                          +

                                                                          Node Affinity

                                                                          • Not configured: Node affinity is disabled for the add-on.
                                                                          • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
                                                                          • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
                                                                          • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

                                                                            If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

                                                                            +
                                                                          • Not configured: Node affinity is disabled for the add-on.
                                                                          • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
                                                                          • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
                                                                          • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

                                                                            If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

                                                                          Toleration

                                                                          +

                                                                          Toleration

                                                                          Using both taints and tolerations allows (not forcibly) the add-on Deployment to be scheduled to a node with the matching taints, and controls the Deployment eviction policies after the node where the Deployment is located is tainted.

                                                                          +

                                                                          Using both taints and tolerations allows (not forcibly) the add-on Deployment to be scheduled to a node with the matching taints, and controls the Deployment eviction policies after the node where the Deployment is located is tainted.

                                                                          The add-on adds the default tolerance policy for the node.kubernetes.io/not-ready and node.kubernetes.io/unreachable taints, respectively. The tolerance time window is 60s.

                                                                          -

                                                                          For details, see Taints and Tolerations.

                                                                          +

                                                                          For details, see Configuring Tolerance Policies.

                                                                          +

                                                                          Change History

                                                                          +
                                                                          + + + + + + + + + + + + + + + + + + + + + +
                                                                          Table 4 Release history

                                                                          Add-on Version

                                                                          +

                                                                          Supported Cluster Version

                                                                          +

                                                                          New Feature

                                                                          +

                                                                          1.4.3

                                                                          +

                                                                          v1.21

                                                                          +

                                                                          v1.23

                                                                          +

                                                                          v1.25

                                                                          +

                                                                          v1.27

                                                                          +

                                                                          v1.28

                                                                          +

                                                                          v1.29

                                                                          +

                                                                          Fixed some issues.

                                                                          +

                                                                          1.3.43

                                                                          +

                                                                          v1.21

                                                                          +

                                                                          v1.23

                                                                          +

                                                                          v1.25

                                                                          +

                                                                          v1.27

                                                                          +

                                                                          v1.28

                                                                          +

                                                                          Fixed some issues.

                                                                          +

                                                                          1.3.42

                                                                          +

                                                                          v1.21

                                                                          +

                                                                          v1.23

                                                                          +

                                                                          v1.25

                                                                          +

                                                                          v1.27

                                                                          +

                                                                          v1.28

                                                                          +

                                                                          CCE clusters 1.28 are supported.

                                                                          +

                                                                          1.3.14

                                                                          +

                                                                          v1.19

                                                                          +

                                                                          v1.21

                                                                          +

                                                                          v1.23

                                                                          +

                                                                          v1.25

                                                                          +

                                                                          v1.27

                                                                          +

                                                                          CCE clusters 1.27 are supported.

                                                                          +
                                                                          +
                                                                          +
                                                                          diff --git a/docs/cce/umn/cce_10_0245.html b/docs/cce/umn/cce_10_0245.html index b23b42f3..ea109ec7 100644 --- a/docs/cce/umn/cce_10_0245.html +++ b/docs/cce/umn/cce_10_0245.html @@ -9,7 +9,7 @@

                                                                          Permission Design

                                                                          The following uses company X as an example.

                                                                          Generally, a company has multiple departments or projects, and each department has multiple members. Design how permissions are to be assigned to different groups and projects, and set a user name for each member to facilitate subsequent user group and permissions configuration.

                                                                          The following figure shows the organizational structure of a department in a company and the permissions to be assigned to each member:

                                                                          -

                                                                          +

                                                                          Director: David

                                                                          David is a department director of company X. To assign him all CCE permissions (both cluster and namespace permissions), create the cce-admin user group for David on the IAM console and assign the CCE Administrator role.

                                                                          CCE Administrator: This role has all CCE permissions. You do not need to assign other permissions.

                                                                          diff --git a/docs/cce/umn/cce_10_0249.html b/docs/cce/umn/cce_10_0249.html index d997dfab..fea8a7a2 100644 --- a/docs/cce/umn/cce_10_0249.html +++ b/docs/cce/umn/cce_10_0249.html @@ -55,7 +55,7 @@ spec:

                                                                          This mode applies to scenarios where high performance is not required and the source IP address of the client does not need to be retained. This mode brings more balanced load to each node in the cluster.

                                                                          This mode applies to scenarios where high performance is required and the source IP address of the client need to be retained. However, traffic is forwarded only to the node where the container resides, and source IP address translation is not performed.

                                                                          +

                                                                          This mode applies to scenarios where high performance is required and the source IP address of the client needs to be retained. However, traffic is forwarded only to the node where the container resides, and source IP address translation is not performed.

                                                                          Access mode

                                                                          @@ -234,7 +234,7 @@ spec: selector: app: nginx type: LoadBalancer -
                                                                        7. Leveraging the pass-through feature of the Service, kube-proxy is bypassed when the ELB address is used for access. The ELB load balancer is accessed first, and then the workload. For details, see Enabling Passthrough Networking for LoadBalancer Services.
                                                                          • After passthrough networking is configured for a dedicated load balancer, in a CCE standard cluster, pods that run on the same node as the workload and pods that run on the same node cannot be accessed through the LoadBalancer Service.
                                                                          • Passthrough networking is not supported for clusters of v1.15 or earlier.
                                                                          • In IPVS network mode, the pass-through settings of Service connected to the same ELB must be the same.
                                                                          • If node-level (local) service affinity is used, kubernetes.io/elb.pass-through is automatically set to onlyLocal to enable pass-through.
                                                                          +
                                                                        8. Leveraging the pass-through feature of the Service, kube-proxy is bypassed when the ELB address is used for access. The ELB load balancer is accessed first, and then the workload. For details, see Configuring Passthrough Networking for a LoadBalancer Service.
                                                                          • In a CCE standard cluster, after passthrough networking is configured for a dedicated load balancer, the private IP address of the load balancer cannot be accessed from the node where the workload pod resides or other containers on the same node as the workload.
                                                                          • Passthrough networking is not supported for clusters of v1.15 or earlier.
                                                                          • In IPVS network mode, the passthrough settings of Services connected to the same load balancer must be the same.
                                                                          • If node-level (local) service affinity is used, kubernetes.io/elb.pass-through is automatically set to onlyLocal to enable pass-through.
                                                                          apiVersion: v1 
                                                                           kind: Service 
                                                                          diff --git a/docs/cce/umn/cce_10_0251.html b/docs/cce/umn/cce_10_0251.html
                                                                          index c4ecc832..81b71391 100644
                                                                          --- a/docs/cce/umn/cce_10_0251.html
                                                                          +++ b/docs/cce/umn/cce_10_0251.html
                                                                          @@ -3,10 +3,10 @@
                                                                           

                                                                          Creating a LoadBalancer Ingress on the Console

                                                                          Prerequisites

                                                                          -

                                                                          Constraints

                                                                          • It is recommended that other resources not use the load balancer automatically created by an ingress. Otherwise, the load balancer will be occupied when the ingress is deleted, resulting in residual resources.
                                                                          • After an ingress is created, upgrade and maintain the configuration of the selected load balancers on the CCE console. Do not modify the configuration on the ELB console. Otherwise, the ingress service may be abnormal.
                                                                          • The URL registered in an ingress forwarding policy must be the same as the URL used to access the backend Service. Otherwise, a 404 error will be returned.
                                                                          • In a cluster using the IPVS proxy mode, if the ingress and Service use the same ELB load balancer, the ingress cannot be accessed from the nodes and containers in the cluster because kube-proxy mounts the LoadBalancer Service address to the ipvs-0 bridge. This bridge intercepts the traffic of the load balancer connected to the ingress. Use different load balancers for the ingress and Service.
                                                                          • A dedicated load balancer must be of the application type (HTTP/HTTPS) type and support private networks (with a private IP).
                                                                          • If multiple ingresses access the same ELB port in a cluster, the listener configuration items (such as the certificate associated with the listener and the HTTP2 attribute of the listener) are subject to the configuration of the first ingress.
                                                                          +

                                                                          Notes and Constraints

                                                                          • It is recommended that other resources not use the load balancer automatically created by an ingress. Otherwise, the load balancer will be occupied when the ingress is deleted, resulting in residual resources.
                                                                          • After an ingress is created, upgrade and maintain the configuration of the selected load balancers on the CCE console. Do not modify the configuration on the ELB console. Otherwise, the ingress service may be abnormal.
                                                                          • The URL registered in an ingress forwarding policy must be the same as the URL used to access the backend Service. Otherwise, a 404 error will be returned.
                                                                          • In a cluster using the IPVS proxy mode, if the ingress and Service use the same ELB load balancer, the ingress cannot be accessed from the nodes and containers in the cluster because kube-proxy mounts the LoadBalancer Service address to the ipvs-0 bridge. This bridge intercepts the traffic of the load balancer connected to the ingress. Use different load balancers for the ingress and Service.
                                                                          • A dedicated load balancer must be of the application type (HTTP/HTTPS) and support private networks (with a private IP address).
                                                                          • If multiple ingresses access the same ELB port in a cluster, the listener configuration items (such as the certificate associated with the listener and the HTTP/2 attribute of the listener) are subject to the configuration of the first ingress.

                                                                          Adding a LoadBalancer Ingress

                                                                          This section uses an Nginx workload as an example to describe how to add a LoadBalancer ingress.

                                                                          -
                                                                          1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                          2. Choose Services & Ingresses in the navigation pane, click the Ingresses tab, and click Create Ingress in the upper right corner.
                                                                          3. Configure ingress parameters.

                                                                            • Name: specifies a name of an ingress, for example, ingress-demo.
                                                                            • Load Balancer: Select a load balancer type and creation mode.

                                                                              A load balancer can be dedicated or shared. A dedicated load balancer must be of the application (HTTP/HTTPS) type and support private networks.

                                                                              +
                                                                              1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                              2. Choose Services & Ingresses in the navigation pane, click the Ingresses tab, and click Create Ingress in the upper right corner.
                                                                              3. Configure ingress parameters.

                                                                                • Name: Customize the name of an ingress, for example, ingress-demo.
                                                                                • Load Balancer: Select a load balancer type and creation mode.

                                                                                  A load balancer can be dedicated or shared. A dedicated load balancer must be of the application type (HTTP/HTTPS) and support private networks.

                                                                                  You can select Use existing or Auto create to obtain a load balancer. For details about the configuration of different creation modes, see Table 1.
                                                                                  @@ -21,18 +21,19 @@ -
                                                                                  Table 1 Load balancer configurations

                                                                                  How to Create

                                                                                  Auto create

                                                                                  • Instance Name: Enter a load balancer name.
                                                                                  • Public Access: If enabled, an EIP with 5 Mbit/s bandwidth will be created.
                                                                                  • AZ: available only to dedicated load balancers. You can create load balancers in multiple AZs to improve service availability. You can deploy a load balancer in multiple AZs for high availability.
                                                                                  • Specifications (available only to dedicated load balancers)
                                                                                    • Fixed: applies to stable traffic, billed based on specifications.
                                                                                    -
                                                                                  +
                                                                                  • Instance Name: Enter a load balancer name.
                                                                                  • AZ: available only to dedicated load balancers. You can create load balancers in multiple AZs to improve service availability. You can deploy a load balancer in multiple AZs for high availability.
                                                                                  • Frontend Subnet: available only to dedicated load balancers. It is used to allocate IP addresses for load balancers to provide services externally.
                                                                                  • Backend Subnet: available only to dedicated load balancers. It is used to allocate IP addresses for load balancers to access the backend service.
                                                                                  • Network/Application-oriented Specifications (available only to dedicated load balancers)
                                                                                    • Elastic: applies to fluctuating traffic, billed based on total traffic. Clusters of v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, and later versions support elastic specifications.
                                                                                    • Fixed: applies to stable traffic, billed based on specifications.
                                                                                    +
                                                                                  • EIP: If you select Auto create, you can configure the billing mode and size of the public network bandwidth.
                                                                                  • Resource Tag: You can add resource tags to classify resources. You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.
                                                                                  -
                                                                                • Listener: An ingress configures a listener for the load balancer, which listens to requests from the load balancer and distributes traffic. After the configuration is complete, a listener is created on the load balancer. The default listener name is k8s__<Protocol type>_<Port number>, for example, k8s_HTTP_80.
                                                                                  • External Protocol: HTTP or HTTPS
                                                                                  • External Port: port number that is open to the ELB service address. The port number can be specified randomly.
                                                                                  • Certificate Source: TLS secret and ELB server certificate are supported.
                                                                                  • Server Certificate: When an HTTPS listener is created for a load balancer, bind a certificate to the load balancer to support encrypted authentication for HTTPS data transmission.
                                                                                    • TLS secret: For details about how to create a secret certificate, see Creating a Secret.
                                                                                    • ELB server certificate: Use the certificate created in the ELB service.
                                                                                    +
                                                                                  • Listener: An ingress configures a listener for the load balancer, which listens to requests from the load balancer and distributes traffic. After the configuration is complete, a listener is created on the load balancer. The default listener name is k8s__<Protocol type>_<Port number>, for example, k8s_HTTP_80.
                                                                                    • External Protocol: HTTP and HTTPS are available.
                                                                                    • External Port: port number that is open to the ELB service address. The port number is configurable.
                                                                                    • Access Control
                                                                                      • Allow all IP addresses: No access control is configured.
                                                                                      • Trustlist: Only the selected IP address group can access the load balancer.
                                                                                      • Blocklist: The selected IP address group cannot access the load balancer.
                                                                                      +
                                                                                    • Certificate Source: TLS secret and ELB server certificate are supported.
                                                                                    • Server Certificate: When an HTTPS listener is created for a load balancer, bind a certificate to the load balancer to support encrypted authentication for HTTPS data transmission.
                                                                                      • TLS secret: For details about how to create a secret certificate, see Creating a Secret.
                                                                                      • ELB server certificate: Use the certificate created in the ELB service.

                                                                                      If there is already an HTTPS ingress for the chosen port on the load balancer, the certificate of the new HTTPS ingress must be the same as the certificate of the existing ingress. This means that a listener has only one certificate. If two certificates, each with a different ingress, are added to the same listener of the same load balancer, only the certificate added earliest takes effect on the load balancer.

                                                                                      -
                                                                                    • SNI: Server Name Indication (SNI) is an extended protocol of TLS. It allows multiple TLS-based access domain names to be provided for external systems using the same IP address and port. Different domain names can use different security certificates. After SNI is enabled, the client is allowed to submit the requested domain name when initiating a TLS handshake request. After receiving the TLS request, the load balancer searches for the certificate based on the domain name in the request. If the certificate corresponding to the domain name is found, the load balancer returns the certificate for authorization. Otherwise, the default certificate (server certificate) is returned for authorization.
                                                                                      • The SNI option is available only when HTTPS is used.
                                                                                      +
                                                                                    • SNI: stands for Server Name Indication (SNI), which is an extended protocol of TLS. SNI allows multiple TLS-compliant domain names for external access using the same IP address and port number, and different domain names can use different security certificates. After SNI is enabled, the client is allowed to submit the requested domain name when initiating a TLS handshake request. After receiving the TLS request, the load balancer searches for the certificate based on the domain name in the request. If the certificate corresponding to the domain name is found, the load balancer returns the certificate for authorization. Otherwise, the default certificate (server certificate) is returned for authorization.
                                                                                      • The SNI option is available only when HTTPS is used.
                                                                                      • This function is supported only in clusters of v1.15.11 and later.
                                                                                      • Only one domain name can be specified for each SNI certificate. Wildcard-domain certificates are supported.
                                                                                      • For ingresses connected to the same ELB port, do not configure SNIs with the same domain name but different certificates. Otherwise, the SNIs will be overwritten.
                                                                                    • Security Policy: combinations of different TLS versions and supported cipher suites available to HTTPS listeners.

                                                                                      For details about security policies, see ELB User Guide.

                                                                                      @@ -40,14 +41,55 @@
                                                                        9. Backend Protocol:

                                                                          When the listener is HTTP-compliant, only HTTP can be selected.

                                                                          If it is an HTTPS listener, this parameter can be set to HTTP or HTTPS.

                                                                          +
                                                                        10. Advanced Options +
                                                                          + + + + + + + + + + + + + + + + + + + + + +

                                                                          Configuration

                                                                          +

                                                                          Description

                                                                          +

                                                                          Restrictions

                                                                          +

                                                                          Idle Timeout

                                                                          +

                                                                          Timeout for an idle client connection. If there are no requests reaching the load balancer during the timeout duration, the load balancer will disconnect the connection from the client and establish a new connection when there is a new request.

                                                                          +

                                                                          None

                                                                          +

                                                                          Request Timeout

                                                                          +

                                                                          Timeout for waiting for a request from a client. There are two cases:

                                                                          +
                                                                          • If the client fails to send a request header to the load balancer during the timeout duration, the request will be interrupted.
                                                                          • If the interval between two consecutive request bodies reaching the load balancer is greater than the timeout duration, the connection will be disconnected.
                                                                          +

                                                                          None

                                                                          +

                                                                          Response Timeout

                                                                          +

                                                                          Timeout for waiting for a response from a backend server. After a request is forwarded to the backend server, if the backend server does not respond during the timeout duration, the load balancer will stop waiting and return HTTP 504 Gateway Timeout.

                                                                          +

                                                                          None

                                                                          +

                                                                          HTTP2

                                                                          +

                                                                          Whether to use HTTP/2 for a client to communicate with a load balancer. Request forwarding using HTTP/2 improves the access performance between your application and the load balancer. However, the load balancer still uses HTTP/1.x to forward requests to the backend server.

                                                                          +

                                                                          This function is available only when the listener is HTTPS-compliant.

                                                                          +
                                                                          +
                                                                        11. -
                                                                        12. Forwarding Policy: When the access address of a request matches the forwarding policy (a forwarding policy consists of a domain name and URL, for example, 10.117.117.117:80/helloworld), the request is forwarded to the corresponding target Service for processing. You can click to add multiple forwarding policies.
                                                                          • Domain Name: actual domain name. Ensure that the domain name has been registered and archived. Once a domain name rule is configured, you must use the domain name for access.
                                                                          • URL Matching Rule
                                                                            • Prefix match: If the URL is set to /healthz, the URL that meets the prefix can be accessed, for example, /healthz/v1 and /healthz/v2.
                                                                            • Exact match: The URL can be accessed only when it is fully matched. For example, if the URL is set to /healthz, only /healthz can be accessed.
                                                                            • RegEX match: The URL is matched based on the regular expression. For example, if the regular expression is /[A-Za-z0-9_.-]+/test, all URLs that comply with this rule can be accessed, for example, /abcA9/test and /v1-Ab/test. Two regular expression standards are supported: POSIX and Perl.
                                                                            +
                                                                          • Forwarding Policy: When the access address of a request matches the forwarding policy (a forwarding policy consists of a domain name and URL, for example, 10.117.117.117:80/helloworld), the request is forwarded to the corresponding target Service for processing. You can click to add multiple forwarding policies.
                                                                            • Domain Name: actual domain name. Ensure that the domain name has been registered and archived. Once a domain name rule is configured, you must use the domain name for access.
                                                                            • URL Matching Rule
                                                                              • Prefix match: If the URL is set to /healthz, the URL that meets the prefix can be accessed, for example, /healthz/v1 and /healthz/v2.
                                                                              • Exact match: The URL can be accessed only when it is fully matched. For example, if the URL is set to /healthz, only /healthz can be accessed.
                                                                              • RegEX match: The URL is matched based on the regular expression. For example, if the regular expression is /[A-Za-z0-9_.-]+/test, all URLs that comply with this rule can be accessed, for example, /abcA9/test and /v1-Ab/test. Two regular expression standards are supported: POSIX and Perl.
                                                                            • URL: access path to be registered, for example, /healthz.

                                                                              The access path added here must exist in the backend application. Otherwise, the forwarding fails.

                                                                              For example, the default access URL of the Nginx application is /usr/share/nginx/html. When adding /test to the ingress forwarding policy, ensure the access URL of your Nginx application contains /usr/share/nginx/html/test. Otherwise, error 404 will be returned.

                                                                            • Destination Service: Select an existing Service or create a Service. Services that do not meet search criteria are automatically filtered out.
                                                                            • Destination Service Port: Select the access port of the destination Service.
                                                                            • Set ELB:
                                                                              • Algorithm: Three algorithms are available: weighted round robin, weighted least connections algorithm, or source IP hash.
                                                                                • Weighted round robin: Requests are forwarded to different servers based on their weights, which indicate server processing performance. Backend servers with higher weights receive proportionately more requests, whereas equal-weighted servers receive the same number of requests. This algorithm is often used for short connections, such as HTTP services.
                                                                                • Weighted least connections: In addition to the weight assigned to each server, the number of connections processed by each backend server is considered. Requests are forwarded to the server with the lowest connections-to-weight ratio. Building on least connections, the weighted least connections algorithm assigns a weight to each server based on their processing capability. This algorithm is often used for persistent connections, such as database connections.
                                                                                • Source IP hash: The source IP address of each request is calculated using the hash algorithm to obtain a unique hash key, and all backend servers are numbered. The generated key allocates the client to a particular server. This enables requests from different clients to be distributed in load balancing mode and ensures that requests from the same client are forwarded to the same server. This algorithm applies to TCP connections without cookies.
                                                                                -
                                                                              • Sticky Session: This function is disabled by default. Options are as follows:
                                                                                • Load balancer cookie: Enter the Stickiness Duration , which ranges from 1 to 1,440 minutes.
                                                                                +
                                                                              • Sticky Session: This function is disabled by default. Options are as follows:
                                                                                • Load balancer cookie: Enter the Stickiness Duration , which ranges from 1 to 1440 minutes.
                                                                                • When the distribution policy uses the source IP hash, sticky session cannot be set.
                                                                                • Dedicated load balancers in the clusters of a version earlier than v1.21 do not support sticky sessions. If sticky sessions are required, use shared load balancers.
                                                                              • Health Check: Set the health check configuration of the load balancer. If this function is enabled, the following configurations are supported: @@ -90,10 +132,10 @@
                                                                            • Operation: Click Delete to delete the configuration.
                                                                          • Annotation: Ingresses provide some advanced CCE functions, which are implemented by annotations. When you use kubectl to create a container, annotations will be used. For details, see Creating an Ingress - Automatically Creating a Load Balancer or Creating an Ingress - Interconnecting with an Existing Load Balancer.
                                                                          -

                                                                        13. After the configuration is complete, click OK. After the ingress is created, it is displayed in the ingress list.

                                                                          On the ELB console, you can view the ELB automatically created through CCE. The default name is cce-lb-ingress.UID. Click the ELB name to access its details page. On the Listeners tab page, view the route settings of the ingress, including the URL, listener port, and backend server group port.

                                                                          +

                                                                        14. Click OK. After the ingress is created, it is displayed in the ingress list.

                                                                          On the ELB console, you can check the load balancer automatically created through CCE. The default name is cce-lb-<ingress.UID>. Click the load balancer name to go to the details page. On the Listeners tab page, check the listener and forwarding policy of the target ingress.

                                                                          After an ingress is created, upgrade and maintain the selected load balancer on the CCE console. Do not modify the configuration on the ELB console. Otherwise, the ingress service may be abnormal.

                                                                          -

                                                                        15. Access the /healthz interface of the workload, for example, workload defaultbackend.

                                                                          1. Obtain the access address of the /healthz interface of the workload. The access address consists of the load balancer IP address, external port, and mapping URL, for example, 10.**.**.**:80/healthz.
                                                                          2. Enter the URL of the /healthz interface, for example, http://10.**.**.**:80/healthz, in the address box of the browser to access the workload, as shown in Figure 1.
                                                                            Figure 1 Accessing the /healthz interface of defaultbackend
                                                                            +

                                                                          3. Access the /healthz interface of the workload, for example, workload defaultbackend.

                                                                            1. Obtain the access address of the /healthz interface of the workload. The access address consists of the load balancer IP address, external port, and mapping URL, for example, 10.**.**.**:80/healthz.
                                                                            2. Enter the URL of the /healthz interface, for example, http://10.**.**.**:80/healthz, in the address box of the browser to access the workload, as shown in Figure 1.
                                                                              Figure 1 Accessing the /healthz interface of defaultbackend

                                                                          diff --git a/docs/cce/umn/cce_10_0252.html b/docs/cce/umn/cce_10_0252.html index 4da8558f..7c1f8de7 100644 --- a/docs/cce/umn/cce_10_0252.html +++ b/docs/cce/umn/cce_10_0252.html @@ -4,16 +4,16 @@

                                                                          Scenario

                                                                          This section uses an Nginx workload as an example to describe how to create a LoadBalancer ingress using kubectl.

                                                                          -

                                                                          Prerequisites

                                                                          +

                                                                          Prerequisites

                                                                          • An ingress provides network access for backend workloads. Ensure that a workload is available in a cluster. If no workload is available, deploy a sample Nginx workload by referring to Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet.
                                                                          • Services Supported by Ingresses lists the Service types supported by LoadBalancer ingresses.
                                                                          • A dedicated load balancer must be of the application type (HTTP/HTTPS) and support private networks (with a private IP address).

                                                                          Ingress Description of networking.k8s.io/v1

                                                                          In CCE clusters of v1.23 or later, the ingress version is switched to networking.k8s.io/v1.

                                                                          Compared with v1beta1, v1 has the following differences in parameters:

                                                                          • The ingress type is changed from kubernetes.io/ingress.class in annotations to spec.ingressClassName.
                                                                          • The format of backend is changed.
                                                                          • The pathType parameter must be specified for each path. The options are as follows:
                                                                            • ImplementationSpecific: The matching method depends on Ingress Controller. The matching method defined by ingress.beta.kubernetes.io/url-match-mode is used in CCE, which is the same as v1beta1.
                                                                            • Exact: exact matching of the URL, which is case-sensitive.
                                                                            • Prefix: matching based on the URL prefix separated by a slash (/). The match is case-sensitive, and elements in the path are matched one by one. A path element refers to a list of labels in the path separated by a slash (/).
                                                                          -

                                                                          +

                                                                          Creating an Ingress - Automatically Creating a Load Balancer

                                                                          The following describes how to run the kubectl command to automatically create a load balancer when creating an ingress.

                                                                          -
                                                                          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                                          2. Create a YAML file named ingress-test.yaml. The file name can be customized.

                                                                            vi ingress-test.yaml

                                                                            +
                                                                            1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                                            2. Create a YAML file named ingress-test.yaml. The file name can be customized.

                                                                              vi ingress-test.yaml

                                                                              Starting from cluster v1.23, the ingress version is switched from networking.k8s.io/v1beta1 to networking.k8s.io/v1. For details about the differences between v1 and v1beta1, see Ingress Description of networking.k8s.io/v1.

                                                                              Example of a shared load balancer (public network access) for clusters of v1.23 or later:
                                                                              apiVersion: networking.k8s.io/v1
                                                                              @@ -27,7 +27,7 @@ metadata:
                                                                                     '{
                                                                                         "type":"public",
                                                                                         "bandwidth_name":"cce-bandwidth-******",
                                                                              -          "bandwidth_chargemode":"traffic",
                                                                              +          "bandwidth_chargemode":"traffic",
                                                                                         "bandwidth_size":5,
                                                                                         "bandwidth_sharetype":"PER",
                                                                                         "vip_subnet_cidr_id": "*****",
                                                                              @@ -62,7 +62,7 @@ metadata:
                                                                                     '{
                                                                                         "type":"public",
                                                                                         "bandwidth_name":"cce-bandwidth-******",
                                                                              -          "bandwidth_chargemode":"traffic",
                                                                              +          "bandwidth_chargemode":"traffic",
                                                                                         "bandwidth_size":5,
                                                                                         "bandwidth_sharetype":"PER",
                                                                                         "eip_type":"5_bgp"
                                                                              @@ -91,7 +91,7 @@ metadata:
                                                                                     '{
                                                                                         "type": "public",
                                                                                         "bandwidth_name": "cce-bandwidth-******",
                                                                              -          "bandwidth_chargemode": "traffic",
                                                                              +          "bandwidth_chargemode": "traffic",
                                                                                         "bandwidth_size": 5,
                                                                                         "bandwidth_sharetype": "PER",
                                                                                         "eip_type": "5_bgp",
                                                                              @@ -119,7 +119,7 @@ spec:
                                                                                       pathType: ImplementationSpecific
                                                                                 ingressClassName: cce
                                                                              -
                                                                              Example of a dedicated load balancer (public network access) for clusters of version 1.21 or earlier:
                                                                              apiVersion: networking.k8s.io/v1beta1
                                                                              +
                                                                              Example of a dedicated load balancer (public network access) for clusters of v1.21 or earlier:
                                                                              apiVersion: networking.k8s.io/v1beta1
                                                                               kind: Ingress
                                                                               metadata:
                                                                                 name: ingress-test
                                                                              @@ -132,7 +132,7 @@ metadata:
                                                                                     '{
                                                                                         "type": "public",
                                                                                         "bandwidth_name": "cce-bandwidth-******",
                                                                              -          "bandwidth_chargemode": "traffic",
                                                                              +          "bandwidth_chargemode": "traffic",
                                                                                         "bandwidth_size": 5,
                                                                                         "bandwidth_sharetype": "PER",
                                                                                         "eip_type": "5_bgp",
                                                                              @@ -142,6 +142,7 @@ metadata:
                                                                                         ],
                                                                                         "l7_flavor_name": "L7_flavor.elb.s1.small"
                                                                                      }'
                                                                              +    kubernetes.io/elb.tags: key1=value1,key2=value2           # ELB resource tags
                                                                               spec:
                                                                                 rules:
                                                                                 - host: ''
                                                                              @@ -172,7 +173,7 @@ spec:
                                                                               
                                                                        16. String

                                                                          Select a proper load balancer type.

                                                                          -
                                                                          • union: shared load balancer
                                                                          • performance: dedicated load balancer
                                                                          +
                                                                          • union: shared load balancer
                                                                          • performance: dedicated load balancer..

                                                                          kubernetes.io/ingress.class

                                                                          @@ -204,7 +205,7 @@ spec:

                                                                          String

                                                                          This parameter indicates the external port registered with the address of the LoadBalancer Service.

                                                                          -

                                                                          Supported range: 1 to 65535

                                                                          +

                                                                          The value ranges from 1 to 65535.

                                                                          NOTE:

                                                                          Some ports are high-risk ports and are blocked by default, for example, port 21.

                                                                          kubernetes.io/elb.tags

                                                                          +

                                                                          No

                                                                          +

                                                                          String

                                                                          +

                                                                          Whether to add resource tags to a load balancer. This function is available only when the load balancer is automatically created, and the cluster is of v1.23.11-r0, v1.25.6-r0, v1.27.3-r0, or later.

                                                                          +

                                                                          A tag is in the format of "key=value". Use commas (,) to separate multiple tags.

                                                                          +

                                                                          host

                                                                          No

                                                                          @@ -332,7 +343,7 @@ spec:

                                                                          Bandwidth mode.

                                                                          • traffic: billed by traffic
                                                                          -

                                                                          Default: traffic

                                                                          +

                                                                          Default: traffic

                                                                          bandwidth_size

                                                                          @@ -341,7 +352,7 @@ spec:

                                                                          Integer

                                                                          Bandwidth size. The default value is 1 to 2000 Mbit/s. Configure this parameter based on the bandwidth range allowed in your region.

                                                                          +

                                                                          Bandwidth size. The value ranges from 1 Mbit/s to 2000 Mbit/s by default. Configure this parameter based on the bandwidth range allowed in your region.

                                                                          The minimum increment for bandwidth adjustment varies depending on the bandwidth range.
                                                                          • The minimum increment is 1 Mbit/s if the allowed bandwidth does not exceed 300 Mbit/s.
                                                                          • The minimum increment is 50 Mbit/s if the allowed bandwidth ranges from 300 Mbit/s to 1000 Mbit/s.
                                                                          • The minimum increment is 500 Mbit/s if the allowed bandwidth exceeds 1000 Mbit/s.

                                                                          CCE Node Problem Detector

                                                                          +

                                                                          Cloud Native Cluster Monitoring

                                                                          +

                                                                          This add-on uses Prometheus-operator and Prometheus to provide easy-to-use, end-to-end Kubernetes cluster monitoring.

                                                                          +

                                                                          CCE Node Problem Detector

                                                                          This add-on monitors abnormal events of cluster nodes and connects to a third-party monitoring platform. It is a daemon running on each node. It collects node issues from different daemons and reports them to the API server. It can run as a DaemonSet or a daemon.

                                                                          Table 1 Workload scaling components

                                                                          Type

                                                                          +
                                                                          - - - - - - - - - - -
                                                                          Table 1 Workload scaling types

                                                                          Type

                                                                          Component Name

                                                                          +

                                                                          Component

                                                                          Component Description

                                                                          +

                                                                          Component Description

                                                                          Reference

                                                                          +

                                                                          Reference

                                                                          HPA

                                                                          +

                                                                          HPA

                                                                          Kubernetes Metrics Server

                                                                          +

                                                                          HorizontalPodAutoscaler (built-in Kubernetes component)

                                                                          A built-in component of Kubernetes, which enables horizontal scaling of pods. It adds the application-level cooldown time window and scaling threshold functions based on the HPA.

                                                                          +

                                                                          HorizontalPodAutoscaler is a built-in component of Kubernetes for Horizontal Pod Autoscaling (HPA). CCE incorporates the application-level cooldown time window and scaling threshold functions into Kubernetes HPA.

                                                                          HPA Policies

                                                                          +

                                                                          Creating an HPA Policy

                                                                          CronHPA

                                                                          +

                                                                          CronHPA

                                                                          CCE Advanced HPA

                                                                          +

                                                                          CCE Advanced HPA

                                                                          CronHPA can scale in or out a cluster at a fixed time. It can work with HPA policies to periodically adjust the HPA scaling scope, implementing workload scaling in complex scenarios.

                                                                          +

                                                                          CronHPA can scale in or out a cluster at a fixed time. It can work with HPA policies to periodically adjust the HPA scaling scope, implementing workload scaling in complex scenarios.

                                                                          CronHPA Policies

                                                                          +

                                                                          Creating a Scheduled CronHPA Policy

                                                                          -

                                                                          Node scaling components are described as follows:

                                                                          +

                                                                          Node Scaling Types

                                                                          -
                                                                          Table 2 Node scaling components

                                                                          Component Name

                                                                          +
                                                                          diff --git a/docs/cce/umn/cce_10_0280.html b/docs/cce/umn/cce_10_0280.html index 639d9065..a72650a0 100644 --- a/docs/cce/umn/cce_10_0280.html +++ b/docs/cce/umn/cce_10_0280.html @@ -1,16 +1,18 @@ -

                                                                          Container Network Models

                                                                          +

                                                                          Container Network

                                                                          diff --git a/docs/cce/umn/cce_10_0281.html b/docs/cce/umn/cce_10_0281.html index 4414779c..de3196e5 100644 --- a/docs/cce/umn/cce_10_0281.html +++ b/docs/cce/umn/cce_10_0281.html @@ -2,7 +2,7 @@

                                                                          Overview

                                                                          The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:

                                                                          - +

                                                                          Network Model Comparison

                                                                          Table 1 describes the differences of network models supported by CCE.

                                                                          After a cluster is created, the network model cannot be changed.

                                                                          @@ -20,11 +20,11 @@
                                                                          - - - - @@ -54,31 +54,32 @@ - - - - - - - - - -
                                                                          Table 2 Node scaling types

                                                                          Component Name

                                                                          Component Description

                                                                          Application scenarios

                                                                          • Common container service scenarios
                                                                          • Scenarios that do not have high requirements on network latency and bandwidth
                                                                          +
                                                                          • Low requirements on performance: As the container tunnel network requires additional VXLAN tunnel encapsulation, it has about 5% to 15% of performance loss when compared with the other two container network models. Therefore, the container tunnel network applies to the scenarios that do not have high performance requirements, such as web applications, and middle-end and back-end services with a small number of access requests.
                                                                          • Large-scale networking: Different from the VPC network that is limited by the VPC route quota, the container tunnel network does not have any restriction on the infrastructure. In addition, the container tunnel network controls the broadcast domain to the node level. The container tunnel network supports a maximum of 2000 nodes.
                                                                          • Scenarios that have high requirements on network latency and bandwidth
                                                                          • Containers can communicate with VMs using a microservice registration framework, such as Dubbo and CSE.
                                                                          +
                                                                          • High performance requirements: As no tunnel encapsulation is required, the VPC network model delivers the performance close to that of a VPC network when compared with the container tunnel network model. Therefore, the VPC network model applies to scenarios that have high requirements on performance, such as AI computing and big data computing.
                                                                          • Small- and medium-scale networks: Due to the limitation on VPC routing tables, it is recommended that the number of nodes in a cluster be less than or equal to 1000.
                                                                          • Scenarios that have high requirements on network latency, bandwidth, and performance
                                                                          • Containers can communicate with VMs using a microservice registration framework, such as Dubbo and CSE.
                                                                          +
                                                                          • High performance requirements: Cloud Native 2.0 networks use VPC networks to construct container networks, eliminating the need for tunnel encapsulation or NAT when containers communicate. This makes Cloud Native 2.0 networks ideal for scenarios that demand high bandwidth and low latency.
                                                                          • Large-scale networking: Cloud Native 2.0 networks support a maximum of 2,000 ECS nodes and 100,000 pods.

                                                                          Core technology

                                                                          @@ -45,7 +45,7 @@

                                                                          CCE Turbo cluster

                                                                          Network isolation

                                                                          +

                                                                          Container network isolation

                                                                          Kubernetes native NetworkPolicy for pods

                                                                          Pods support security group isolation.

                                                                          Passthrough networking

                                                                          +

                                                                          Interconnecting pods to a load balancer

                                                                          No

                                                                          +

                                                                          Interconnected through a NodePort

                                                                          No

                                                                          +

                                                                          Interconnected through a NodePort

                                                                          Yes

                                                                          +

                                                                          Directly interconnected using a dedicated load balancer

                                                                          +

                                                                          Interconnected using a shared load balancer through a NodePort

                                                                          IP address management

                                                                          +

                                                                          Managing container IP addresses

                                                                          • The container CIDR block is allocated separately.
                                                                          • CIDR blocks are divided by node and can be dynamically allocated (CIDR blocks can be dynamically added after being allocated.)
                                                                          +
                                                                          • Separate container CIDR blocks needed
                                                                          • Container CIDR blocks divided by node and dynamically added after being allocated
                                                                          • The container CIDR block is allocated separately.
                                                                          • CIDR blocks are divided by node and statically allocated (the CIDR block cannot be changed after a node is created).
                                                                          +
                                                                          • Separate container CIDR blocks needed
                                                                          • Container CIDR blocks divided by node and statically allocated (the allocated CIDR blocks cannot be changed after a node is created)

                                                                          The container CIDR block is divided from the VPC subnet and does not need to be allocated separately.

                                                                          +

                                                                          Container CIDR blocks divided from a VPC subnet (You do not need to configure separate container CIDR blocks.)

                                                                          Network performance

                                                                          Performance loss due to VXLAN encapsulation

                                                                          No tunnel encapsulation. Cross-node packets are forwarded through VPC routers, delivering performance equivalent to that of the host network.

                                                                          +

                                                                          No tunnel encapsulation, and cross-node traffic forwarded through VPC routers (The performance is so good that is comparable to that of the host network, but there is a loss caused by NAT.)

                                                                          The container network is integrated with the VPC network, eliminating performance loss.

                                                                          +

                                                                          Container network integrated with VPC network, eliminating performance loss

                                                                          Networking scale

                                                                          @@ -86,20 +87,19 @@

                                                                          A maximum of 2000 nodes are supported.

                                                                          Suitable for small- and medium-scale networks due to the limitation on VPC routing tables. It is recommended that the number of nodes be less than or equal to 1000.

                                                                          -

                                                                          Each time a node is added to the cluster, a route is added to the VPC routing tables. Therefore, the cluster scale is limited by the VPC routing tables.

                                                                          +

                                                                          Each time a node is added to the cluster, a route is added to the VPC routing tables. Evaluate the cluster scale that is limited by the VPC routing tables before creating the cluster.

                                                                          A maximum of 2000 nodes are supported.

                                                                          +

                                                                          In a cloud-native network 2.0 cluster, containers' IP addresses are assigned from VPC CIDR blocks, and the number of containers supported is restricted by these blocks. Evaluate the cluster's scale limitations before creating it.

                                                                          -
                                                                          1. The scale of a cluster that uses the VPC network model is limited by the custom routes of the VPC. Therefore, estimate the number of required nodes before creating a cluster.
                                                                          2. The scale of a cluster that uses the Cloud Native Network 2.0 model depends on the size of the VPC subnet CIDR block selected for the network attachment definition. Before creating a cluster, evaluate the scale of your cluster.
                                                                          3. By default, VPC routing network supports direct communication between containers and hosts in the same VPC. If a peering connection policy is configured between the VPC and another VPC, the containers can directly communicate with hosts on the peer VPC. In addition, in hybrid networking scenarios such as Direct Connect and VPN, communication between containers and hosts on the peer end can also be achieved with proper planning.
                                                                          4. Do not change the mask of the primary CIDR block on the VPC after a cluster is created. Otherwise, the network will be abnormal.
                                                                          -
                                                                          diff --git a/docs/cce/umn/cce_10_0282.html b/docs/cce/umn/cce_10_0282.html index 219db235..b123737c 100644 --- a/docs/cce/umn/cce_10_0282.html +++ b/docs/cce/umn/cce_10_0282.html @@ -1,30 +1,32 @@ -

                                                                          Container Tunnel Network

                                                                          -

                                                                          Container Tunnel Network Model

                                                                          The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch. Though at some costs of performance, packet encapsulation and tunnel transmission enable higher interoperability and compatibility with advanced features (such as network policy-based isolation) for most common scenarios.
                                                                          Figure 1 Container tunnel network
                                                                          +

                                                                          Tunnel Network Model

                                                                          +

                                                                          Model Definition

                                                                          A container tunnel network creates a separate network plane for containers by using tunnel encapsulation on the host network plane. The container tunnel network of a CCE cluster uses VXLAN for tunnel encapsulation and Open vSwitch as the virtual switch backend. VXLAN is a protocol that encapsulates Ethernet packets into UDP packets to transmit them through tunnels. Open vSwitch is an open-source virtual switch software that provides functions such as network isolation and data forwarding.

                                                                          +
                                                                          While there may be some performance costs, packet encapsulation and tunnel transmission allow for greater interoperability and compatibility with advanced features, such as network policy-based isolation, in most common scenarios.
                                                                          Figure 1 Container tunnel network
                                                                          -

                                                                          Pod-to-pod communication

                                                                          -
                                                                          • On the same node: Packets are directly forwarded via the OVS bridge on the node.
                                                                          • Across nodes: Packets are encapsulated in the OVS bridge and then forwarded to the peer node.
                                                                          +

                                                                          In a cluster using the container tunnel model, the communication paths between pods on the same node and between pods on different nodes are different.

                                                                          +
                                                                          • Inter-pod communication on the same node: Packets are directly forwarded via the OVS bridge on the node.
                                                                          • Inter-pod communication on different nodes: Packets are encapsulated in the OVS bridge and then forwarded to the target pod on the peer node through the host NIC.

                                                                          Advantages and Disadvantages

                                                                          Advantages

                                                                          -
                                                                          • The container network is decoupled from the node network and is not limited by the VPC quotas and response speed (such as the number of VPC routes, number of elastic ENIs, and creation speed).
                                                                          • Network isolation is supported. For details, see Network Policies.
                                                                          • Bandwidth limits are supported.
                                                                          • Large-scale networking is supported.
                                                                          +
                                                                          • The container network is decoupled from the node network and is not limited by the VPC quotas and response speed (such as the number of VPC routes, number of ENIs, and creation speed).
                                                                          • Network isolation is supported. For details, see Configuring Network Policies to Restrict Pod Access.
                                                                          • Bandwidth limits are supported.
                                                                          • Large-scale networking with a maximum of 2000 nodes is supported.

                                                                          Disadvantages

                                                                          -
                                                                          • High encapsulation overhead, complex networking, and low performance
                                                                          • Pods cannot directly use functionalities such as EIPs and security groups.
                                                                          • External networks cannot be directly connected to container IP addresses.
                                                                          +
                                                                          • High encapsulation overhead results in poor performance and makes it difficult to locate network faults.
                                                                          • Pods cannot directly use features like EIPs and security groups.
                                                                          • Container IP addresses cannot be directly accessed by external networks.
                                                                          -

                                                                          Applicable Scenarios

                                                                          • Low requirements on performance: As the container tunnel network requires additional VXLAN tunnel encapsulation, it has about 5% to 15% of performance loss when compared with the other two container network models. Therefore, the container tunnel network applies to the scenarios that do not have high performance requirements, such as web applications, and middle-end and back-end services with a small number of access requests.
                                                                          • Large-scale networking: Different from the VPC network that is limited by the VPC route quota, the container tunnel network does not have any restriction on the infrastructure. In addition, the container tunnel network controls the broadcast domain to the node level. The container tunnel network supports a maximum of 2000 nodes.
                                                                          +

                                                                          Application Scenarios

                                                                          • Low requirements on performance: As the container tunnel network requires additional VXLAN tunnel encapsulation, it has about 5% to 15% of performance loss when compared with the other two container network models. Therefore, the container tunnel network applies to the scenarios that do not have high performance requirements, such as web applications, and middle-end and back-end services with a small number of access requests.
                                                                          • Large-scale networking: Different from the VPC network that is limited by the VPC route quota, the container tunnel network does not have any restriction on the infrastructure. In addition, the container tunnel network controls the broadcast domain to the node level. The container tunnel network supports a maximum of 2000 nodes.

                                                                          Container IP Address Management

                                                                          The container tunnel network allocates container IP addresses according to the following rules:

                                                                          -
                                                                          • The container CIDR block is allocated separately, which is irrelevant to the node CIDR block.
                                                                          • IP addresses are allocated by node. One or more CIDR blocks with a fixed size (16 by default) are allocated to each node in a cluster from the container CIDR block.
                                                                          • When the IP addresses on a node are used up, you can apply for a new CIDR block.
                                                                          • The container CIDR block cyclically allocates CIDR blocks to new nodes or existing nodes in sequence.
                                                                          • Pods scheduled to a node are cyclically allocated IP addresses from one or more CIDR blocks allocated to the node.
                                                                          -
                                                                          Figure 2 IP address allocation of the container tunnel network
                                                                          -

                                                                          Maximum number of nodes that can be created in the cluster using the container tunnel network = Number of IP addresses in the container CIDR block / Size of the IP CIDR block allocated to the node by the container CIDR block at a time (16 by default)

                                                                          -

                                                                          For example, if the container CIDR block is 172.16.0.0/16, the number of IP addresses is 65536. If 16 IP addresses are allocated to a node at a time, a maximum of 4096 (65536/16) nodes can be created in the cluster. This is an extreme case. If 4096 nodes are created, a maximum of 16 pods can be created for each node because only 16 IP CIDR block\s are allocated to each node. In addition, the number of nodes that can be created in a cluster also depends on the node network and cluster scale.

                                                                          +
                                                                          • Container CIDR blocks are separate from node CIDR blocks.
                                                                          • IP addresses are allocated by node. One or more CIDR blocks with a fixed size (16 by default) are allocated to each node in a cluster from the container CIDR block.
                                                                          • When the IP addresses on a node are used up, you can apply for a new CIDR block.
                                                                          • A container CIDR block assigns CIDR blocks to new nodes or existing nodes in a cyclical sequence.
                                                                          • IP addresses from one or more CIDR blocks assigned to a node are allocated to pods scheduled to that node in a cyclical manner.
                                                                          +
                                                                          Figure 2 IP address allocation of the container tunnel network
                                                                          +

                                                                          Maximum number of nodes that can be created in the cluster using the container tunnel network = Number of IP addresses in the container CIDR block/Size of the IP CIDR block allocated to the node by the container CIDR block at a time (16 by default)

                                                                          +

                                                                          For example, if the container CIDR block is 172.16.0.0/16, the number of IP addresses is 65536. The mask of the container CIDR block allocated to a node is 28. That is, a total of 16 container IP addresses are allocated each time. Therefore, a maximum of 4096 (65536/16) nodes can be created. This is an extreme case. If 4096 nodes are created, a maximum of 16 pods can be created for each node because only a CIDR block with 16 IP addresses is allocated to each node. The number of nodes that can be added to a cluster is also determined by the available IP addresses in the node subnet and the scale of the cluster.

                                                                          -

                                                                          Recommendation for CIDR Block Planning

                                                                          As described in Cluster Network Structure, network addresses in a cluster can be divided into three parts: node network, container network, and service network. When planning network addresses, consider the following aspects:

                                                                          -
                                                                          • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs.
                                                                          • Ensure that each CIDR block has sufficient IP addresses.
                                                                            • The IP addresses in the node CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
                                                                            • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses. The number of pods that can be created on each node also depends on other parameter settings.
                                                                            +

                                                                            Recommendation for CIDR Block Planning

                                                                            As explained in Cluster Network Structure, network addresses in a cluster are divided into the cluster network, container network, and service network. When planning network addresses, consider the following factors:

                                                                            +
                                                                            • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs.
                                                                            • Ensure that each CIDR block has sufficient IP addresses.
                                                                              • The IP addresses in the cluster CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
                                                                              • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses. The number of pods that can be created on each node also depends on other parameter settings.
                                                                            -

                                                                            Example of Container Tunnel Network Access

                                                                            Create a cluster that uses the container tunnel network model. Create a Deployment in the cluster.

                                                                            -
                                                                            kind: Deployment
                                                                            +

                                                                            Example of Container Tunnel Network Access

                                                                            The following is an example of creating a workload in a cluster using the container tunnel network model:

                                                                            +
                                                                            1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                                            2. Create a Deployment in the cluster.

                                                                              Create the deployment.yaml file. The following shows an example:

                                                                              +
                                                                              kind: Deployment
                                                                               apiVersion: apps/v1
                                                                               metadata:
                                                                                 name: example
                                                                              @@ -51,17 +53,19 @@ spec:
                                                                                             memory: 512Mi
                                                                                     imagePullSecrets:
                                                                                       - name: default-secret
                                                                              -

                                                                              View the created pod.

                                                                              -
                                                                              $ kubectl get pod -owide
                                                                              -NAME                       READY   STATUS    RESTARTS   AGE     IP          NODE           NOMINATED NODE   READINESS GATES
                                                                              +

                                                                              Create the workload.

                                                                              +
                                                                              kubectl apply -f deployment.yaml
                                                                              +

                                                                            3. Check the running pods.

                                                                              kubectl get pod -owide
                                                                              +

                                                                              Command output:

                                                                              +
                                                                              NAME                       READY   STATUS    RESTARTS   AGE     IP          NODE           NOMINATED NODE   READINESS GATES
                                                                               example-5bdc5699b7-5rvq4   1/1     Running   0          3m28s   10.0.0.20   192.168.0.42   <none>           <none>
                                                                               example-5bdc5699b7-984j9   1/1     Running   0          3m28s   10.0.0.21   192.168.0.42   <none>           <none>
                                                                               example-5bdc5699b7-lfxkm   1/1     Running   0          3m28s   10.0.0.22   192.168.0.42   <none>           <none>
                                                                               example-5bdc5699b7-wjcmg   1/1     Running   0          3m28s   10.0.0.52   192.168.0.64   <none>           <none>
                                                                              -

                                                                              In this case, the IP address of the pod cannot be directly accessed outside the cluster in the same VPC. This is a feature of the container tunnel network.

                                                                              -

                                                                              However, the pod can be accessed from a node in the cluster or in the pod. As shown in the following figure, the pod can be accessed directly from the container.

                                                                              -
                                                                              $ kubectl exec -it example-5bdc5699b7-5rvq4 -- curl 10.0.0.21
                                                                              -<!DOCTYPE html>
                                                                              +

                                                                            4. Use a cloud server in the same VPC to directly access a pod's IP address from outside the cluster. The access failed.

                                                                              You can access a pod using its IP address within the pod or from a node in the cluster. In the following example, access a pod's IP address within the pod. example-5bdc5699b7-5rvq4 is the pod name, and 10.0.0.21 is the pod IP address.
                                                                              kubectl exec -it example-5bdc5699b7-5rvq4 -- curl 10.0.0.21
                                                                              +
                                                                              +

                                                                              If the following information is displayed, the workload can be properly accessed:

                                                                              +
                                                                              <!DOCTYPE html>
                                                                               <html>
                                                                               <head>
                                                                               <title>Welcome to nginx!</title>
                                                                              @@ -86,11 +90,12 @@ Commercial support is available at
                                                                               <p><em>Thank you for using nginx.</em></p>
                                                                               </body>
                                                                               </html>
                                                                              +

                                                                            diff --git a/docs/cce/umn/cce_10_0283.html b/docs/cce/umn/cce_10_0283.html index 5b74890f..29edb9e7 100644 --- a/docs/cce/umn/cce_10_0283.html +++ b/docs/cce/umn/cce_10_0283.html @@ -1,38 +1,38 @@ -

                                                                            VPC Network

                                                                            -

                                                                            Model Definition

                                                                            The VPC network uses VPC routing to integrate with the underlying network. This network model is suitable for performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the VPC route quota. Each node is assigned a CIDR block of a fixed size. This networking model is free from tunnel encapsulation overhead and outperforms the container tunnel network model. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in a cluster can be directly accessed from ECSs in the same VPC outside the cluster.
                                                                            Figure 1 VPC network model
                                                                            +

                                                                            VPC Network Model

                                                                            +

                                                                            Model Definition

                                                                            The VPC network model seamlessly combines VPC routing with the underlying network, making it ideal for high-performance scenarios. However, the maximum number of nodes allowed in a cluster is determined by the VPC route quota. In the VPC network model, container CIDR blocks are separate from node CIDR blocks. To allocate IP addresses to containers running on a node in a cluster, each node in the cluster is allocated with a container CIDR block for a fixed number of IP addresses. The VPC network model outperforms the container tunnel network model in terms of performance because it does not have tunnel encapsulation overhead. When using the VPC network model in a cluster, the VPC routing table automatically configures the routes between container CIDR blocks and VPC CIDR blocks. This means that pods within the cluster can be accessed directly from cloud servers in the same VPC, even if they are outside the cluster.
                                                                            Figure 1 VPC network model
                                                                            -

                                                                            Pod-to-pod communication

                                                                            -
                                                                            • On the same node: Packets are directly forwarded through IPvlan.
                                                                            • Across nodes: Packets are forwarded to the default gateway through default routes, and then to the peer node via the VPC routes.
                                                                            +

                                                                            In a cluster using the VPC network model, network communication paths are as follows:

                                                                            +
                                                                            • Inter-pod communication on the same node: Packets are directly forwarded through IPvlan.
                                                                            • Inter-pod communication on different nodes: The traffic accesses the default gateway by following the route specified in the VPC routing table and then is forwarded to the target pod on another node using VPC routing.
                                                                            • Pod communication outside a cluster: When a container in a cluster needs to access a network outside of the cluster, CCE uses NAT to translate the container's IP address into the node IP address so that the pod communicates externally using the node IP address.

                                                                            Advantages and Disadvantages

                                                                            Advantages

                                                                            -
                                                                            • No tunnel encapsulation is required, so network problems are easy to locate and the performance is high.
                                                                            • In the same VPC, the external network of the cluster can be directly connected to the container IP address.
                                                                            +
                                                                            • High performance and simplified network fault locating are achieved by eliminating the need for tunnel encapsulation.
                                                                            • A VPC routing table automatically configures routes between container CIDR blocks and VPC CIDR blocks. This enables resources in the VPC to directly communicate with containers in the cluster.

                                                                              Similarly, if the VPC is accessible to other VPCs or data centers and the VPC routing table includes routes to the container CIDR blocks, resources in other VPCs or data centers can directly communicate with containers in the cluster, provided there are no conflicts between the network CIDR blocks.

                                                                              +
                                                                              +

                                                                            Disadvantages

                                                                            -
                                                                            • The number of nodes is limited by the VPC route quota.
                                                                            • Each node is assigned a CIDR block of a fixed size, which leads to a waste of IP addresses in the container CIDR block.
                                                                            • Pods cannot directly use functionalities such as EIPs and security groups.
                                                                            +
                                                                            • The number of nodes is limited by the VPC route quota.
                                                                            • Each node is assigned a CIDR block with a fixed size, which results in IP address wastage in the container CIDR block.
                                                                            • Pods cannot directly use features like EIPs and security groups.
                                                                            -

                                                                            Applicable Scenarios

                                                                            • High performance requirements: As no tunnel encapsulation is required, the VPC network model delivers the performance close to that of a VPC network when compared with the container tunnel network model. Therefore, the VPC network model applies to scenarios that have high requirements on performance, such as AI computing and big data computing.
                                                                            • Small- and medium-scale networks: Due to the limitation on VPC routing tables, it is recommended that the number of nodes in a cluster be less than or equal to 1000.
                                                                            +

                                                                            Application Scenarios

                                                                            • High performance requirements: As no tunnel encapsulation is required, the VPC network model delivers the performance close to that of a VPC network when compared with the container tunnel network model. Therefore, the VPC network model applies to scenarios that have high requirements on performance, such as AI computing and big data computing.
                                                                            • Small- and medium-scale networks: Due to the limitation on VPC routing tables, it is recommended that the number of nodes in a cluster be less than or equal to 1000.
                                                                            -

                                                                            Container IP Address Management

                                                                            The VPC network allocates container IP addresses according to the following rules:

                                                                            -
                                                                            • The container CIDR block is allocated separately.
                                                                            • IP addresses are allocated by node. One CIDR block with a fixed size (which is configurable) is allocated to each node in a cluster from the container CIDR block.
                                                                            • The container CIDR block cyclically allocates CIDR blocks to new nodes in sequence.
                                                                            • Pods scheduled to a node are cyclically allocated IP addresses from CIDR blocks allocated to the node.
                                                                            -
                                                                            Figure 2 IP address management of the VPC network
                                                                            +

                                                                            Container IP Address Management

                                                                            The VPC network model assigns container IP addresses based on the following guidelines:

                                                                            +
                                                                            • Container CIDR blocks are separate from node CIDR blocks.
                                                                            • IP addresses are allocated by node. One CIDR block with a fixed size (configurable) is allocated to each node in a cluster from the container CIDR block.
                                                                            • A container CIDR block assigns CIDR blocks to new nodes in a cyclical sequence.
                                                                            • IP addresses from the CIDR blocks assigned to a node are allocated to pods scheduled to that node in a cyclical manner.
                                                                            +
                                                                            Figure 2 IP address management of the VPC network

                                                                            Maximum number of nodes that can be created in the cluster using the VPC network = Number of IP addresses in the container CIDR block /Number of IP addresses in the CIDR block allocated to the node by the container CIDR block

                                                                            -

                                                                            For example, if the container CIDR block is 172.16.0.0/16, the number of IP addresses is 65536. The mask of the container CIDR block allocated to the node is 25. That is, the number of container IP addresses on each node is 128. Therefore, a maximum of 512 (65536/128) nodes can be created. In addition, the number of nodes that can be created in a cluster also depends on the node network and cluster scale.

                                                                            +

                                                                            For example, if the container CIDR block is 172.16.0.0/16, the number of IP addresses is 65536. The mask of the container CIDR block allocated to a node is 25. That is, the number of container IP addresses on each node is 128. Therefore, a maximum of 512 (65536/128) nodes can be created. The number of nodes that can be added to a cluster is also determined by the available IP addresses in the node subnet and the scale of the cluster. For details, see Recommendation for CIDR Block Planning.

                                                                            -

                                                                            Recommendation for CIDR Block Planning

                                                                            As described in Cluster Network Structure, network addresses in a cluster can be divided into three parts: node network, container network, and service network. When planning network addresses, consider the following aspects:

                                                                            -
                                                                            • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs.
                                                                            • Ensure that each CIDR block has sufficient IP addresses.
                                                                              • The IP addresses in the node CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
                                                                              • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses. The number of pods that can be created on each node also depends on other parameter settings.
                                                                              +

                                                                              Recommendation for CIDR Block Planning

                                                                              As explained in Cluster Network Structure, network addresses in a cluster are divided into the cluster network, container network, and service network. When planning network addresses, consider the following factors:

                                                                              +
                                                                              • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs.
                                                                              • Ensure that each CIDR block has sufficient IP addresses.
                                                                                • The IP addresses in the cluster CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
                                                                                • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses. The number of pods that can be created on each node also depends on other parameter settings.

                                                                              Assume that a cluster contains 200 nodes and the network model is VPC network.

                                                                              -

                                                                              In this case, the number of available IP addresses in the selected node subnet must be greater than 200. Otherwise, nodes cannot be created due to insufficient IP addresses.

                                                                              -

                                                                              The container CIDR block is 10.0.0.0/16, and the number of available IP addresses is 65536. As described in Container IP Address Management, the VPC network is allocated a CIDR block with the fixed size (using the mask to determine the maximum number of container IP addresses allocated to each node). For example, if the upper limit is 128, the cluster supports a maximum of 512 (65536/128) nodes, including the three master nodes.

                                                                              +

                                                                              In this case, the number of available IP addresses in the selected subnet must be greater than 200. Otherwise, nodes cannot be created due to insufficient IP addresses.

                                                                              +

                                                                              The container CIDR block is 172.16.0.0/16, and the number of available IP addresses is 65536. As described in Container IP Address Management, the VPC network is allocated a CIDR block with a fixed size (using the mask to determine the maximum number of container IP addresses allocated to each node). For example, if the upper limit is 128, the cluster supports a maximum of 512 (65536/128) nodes.

                                                                              -

                                                                              Example of VPC Network Access

                                                                              Create a cluster using the VPC network model. The cluster contains one node.

                                                                              -
                                                                              $ kubectl get node
                                                                              -NAME           STATUS   ROLES    AGE   VERSION
                                                                              -192.168.0.99   Ready    <none>   9d    v1.17.17-r0-CCE21.6.1.B004-17.37.5
                                                                              -

                                                                              Check the VPC routing table. The destination address 172.16.0.0/25 is the container CIDR block allocated to the node, and the next hop is the corresponding node. When the container IP address is accessed, the VPC route forwards the access request to the next-hop node. This indicates that the VPC network model uses VPC routes.

                                                                              -

                                                                              Create a Deployment in the cluster.

                                                                              -
                                                                              kind: Deployment
                                                                              +

                                                                              Example of VPC Network Access

                                                                              In this example, a cluster using the VPC network model is created, and the cluster contains one node.

                                                                              +

                                                                              On the VPC console, locate the VPC to which the cluster belongs and check the VPC routing table.

                                                                              +

                                                                              You can find that CCE has created a custom route in the routing table. This route has a destination address corresponding to the container CIDR block assigned to the node, and the next hop is directed towards the target node. In the example, the container CIDR block for the cluster is 172.16.0.0/16, with 128 container IP addresses assigned to each node. Therefore, the node's container CIDR block is 172.16.0.0/25, providing a total of 128 container IP addresses.

                                                                              +

                                                                              When a container IP address is accessed, the VPC route will forward the traffic to the next-hop node that corresponds to the destination address. The following is an example:

                                                                              +
                                                                              1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                                              2. Create a Deployment in the cluster.

                                                                                Create the deployment.yaml file. The following shows an example:
                                                                                kind: Deployment
                                                                                 apiVersion: apps/v1
                                                                                 metadata:
                                                                                   name: example
                                                                                @@ -52,17 +52,19 @@ spec:
                                                                                           image: 'nginx:perl'
                                                                                       imagePullSecrets:
                                                                                         - name: default-secret
                                                                                -

                                                                                Check the pod.

                                                                                -
                                                                                $ kubectl get pod -owide
                                                                                -NAME                       READY   STATUS    RESTARTS   AGE   IP           NODE           NOMINATED NODE   READINESS GATES
                                                                                +
                                                                                +

                                                                                Create the workload.

                                                                                +
                                                                                kubectl apply -f deployment.yaml
                                                                                +

                                                                              3. Check the running pods.

                                                                                kubectl get pod -owide
                                                                                +
                                                                                Command output:
                                                                                NAME                       READY   STATUS    RESTARTS   AGE   IP           NODE           NOMINATED NODE   READINESS GATES
                                                                                 example-86b9779494-l8qrw   1/1     Running   0          14s   172.16.0.6   192.168.0.99   <none>           <none>
                                                                                 example-86b9779494-svs8t   1/1     Running   0          14s   172.16.0.7   192.168.0.99   <none>           <none>
                                                                                 example-86b9779494-x8kl5   1/1     Running   0          14s   172.16.0.5   192.168.0.99   <none>           <none>
                                                                                 example-86b9779494-zt627   1/1     Running   0          14s   172.16.0.8   192.168.0.99   <none>           <none>
                                                                                -

                                                                                In this case, if you access the IP address of the pod from an ECS (outside the cluster) in the same VPC, the access is successful. This is a feature of VPC networking. Pods can be directly accessed from any node locating outside of the cluster and in the same VPC as that of the pods using the pods' IP addresses.

                                                                                -

                                                                                Pods can be accessed from nodes or pods in the same cluster. In the following example, you can directly access the pods in the container.

                                                                                -
                                                                                $ kubectl exec -it example-86b9779494-l8qrw -- curl 172.16.0.7
                                                                                -<!DOCTYPE html>
                                                                                +
                                                                                +

                                                                              4. Use a cloud server in the same VPC to directly access a pod's IP address from outside the cluster. You can also access a pod using its IP address within the pod or from a node in the cluster. In the following example, access a pod's IP address within the pod. example-86b9779494-l8qrw is the pod name, and 172.16.0.7 is the pod IP address.

                                                                                kubectl exec -it example-86b9779494-l8qrw -- curl 172.16.0.7
                                                                                +

                                                                                If the following information is displayed, the workload can be properly accessed:

                                                                                +
                                                                                <!DOCTYPE html>
                                                                                 <html>
                                                                                 <head>
                                                                                 <title>Welcome to nginx!</title>
                                                                                @@ -87,11 +89,12 @@ Commercial support is available at
                                                                                 <p><em>Thank you for using nginx.</em></p>
                                                                                 </body>
                                                                                 </html>
                                                                                +

                                                                              diff --git a/docs/cce/umn/cce_10_0284.html b/docs/cce/umn/cce_10_0284.html index c4bb178d..7339f8bf 100644 --- a/docs/cce/umn/cce_10_0284.html +++ b/docs/cce/umn/cce_10_0284.html @@ -1,19 +1,21 @@ -

                                                                              Cloud Native 2.0 Network

                                                                              -

                                                                              Model Definition

                                                                              Developed by CCE, Cloud Native 2.0 network deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and EIPs are bound to deliver high performance.

                                                                              -
                                                                              Figure 1 Cloud Native 2.0 network
                                                                              -

                                                                              Pod-to-pod communication

                                                                              -
                                                                              • Pods on BMS nodes use ENIs, whereas pods on ECS nodes use Sub-ENIs. Sub-ENIs are attached to ENIs through VLAN sub-interfaces.
                                                                              • On the same node: Packets are forwarded through the VPC ENI or sub-ENI.
                                                                              • Across nodes: Packets are forwarded through the VPC ENI or sub-ENI.
                                                                              +

                                                                              Cloud Native 2.0 Network Model

                                                                              +

                                                                              Model Definition

                                                                              Cloud Native 2.0 network model is a proprietary, next-generation container network model that combines the elastic network interfaces (ENIs) and supplementary network interfaces (sub-ENIs) of the Virtual Private Cloud (VPC). This allows ENIs or sub-ENIs to be directly bound to pods, giving each pod its own unique IP address within the VPC. Furthermore, it supports additional features like ELB passthrough container, pod binding to a security group, and pod binding to an EIP. Because container tunnel encapsulation and NAT are not required, the Cloud Native 2.0 network model enables higher network performance than the container tunnel network model and VPC network model.

                                                                              +
                                                                              Figure 1 Cloud Native 2.0 network model
                                                                              +

                                                                              In a cluster using the Cloud Native 2.0 network model, pods rely on ENIs or sub-ENIs to connect to external networks.

                                                                              +
                                                                              • Pods running on BMS nodes use ENIs.
                                                                              • Pods running on ECS nodes use sub-ENIs that are bound to the ECS' ENIs through VLAN sub-interfaces.
                                                                              • To run a pod, it is necessary to bind ENIs to it. The number of pods that can run on a node depends on the number of ENIs that can be bound to the node and the number of ENI ports available on the node.
                                                                              • Traffic for communications between pods on a node, communications between pods on different nodes, and access to networks outside a cluster is forwarded through the ENI or sub-ENI of the VPC.

                                                                              Constraints

                                                                              This network model is available only to CCE Turbo clusters.

                                                                              Advantages and Disadvantages

                                                                              Advantages

                                                                              -
                                                                              • As the container network directly uses VPC, it is easy to locate network problems and provide the highest performance.
                                                                              • External networks in a VPC can be directly connected to container IP addresses.
                                                                              • The load balancing, security group, and EIP capabilities provided by VPC can be directly used by pods.
                                                                              +
                                                                              • VPCs serve as the foundation for constructing container networks. Every pod has its own network interface and IP address, which simplifies network problem-solving and enhances performance.
                                                                              • In the same VPC, ENIs are directly bound to pods in a cluster, so that resources outside the cluster can directly communicate with containers within the cluster.

                                                                                Similarly, if the VPC is accessible to other VPCs or data centers, resources in other VPCs or data centers can directly communicate with containers in the cluster, provided there are no conflicts between the network CIDR blocks.

                                                                                +
                                                                                +
                                                                              • The load balancing, security group, and EIP capabilities provided by VPC can be directly used by pods.

                                                                              Disadvantages

                                                                              -

                                                                              The container network directly uses VPC, which occupies the VPC address space. Therefore, you must properly plan the container CIDR block before creating a cluster.

                                                                              +

                                                                              Container networks are built on VPCs, with each pod receiving an IP address from the VPC CIDR block. As a result, it is crucial to plan the container CIDR block carefully before creating a cluster.

                                                                              -

                                                                              Application Scenarios

                                                                              • High performance requirements and use of other VPC network capabilities: Cloud Native Network 2.0 directly uses VPC, which delivers almost the same performance as the VPC network. Therefore, it applies to scenarios that have high requirements on bandwidth and latency.
                                                                              • Large-scale networking: Cloud Native Network 2.0 supports a maximum of 2000 ECS nodes and 100,000 containers.
                                                                              +

                                                                              Application Scenarios

                                                                              • High performance requirements: Cloud Native 2.0 networks use VPC networks to construct container networks, eliminating the need for tunnel encapsulation or NAT when containers communicate. This makes Cloud Native 2.0 networks ideal for scenarios that demand high bandwidth and low latency.
                                                                              • Large-scale networking: Cloud Native 2.0 networks support a maximum of 2,000 ECS nodes and 100,000 pods.

                                                                              Container IP Address Management

                                                                              In the Cloud Native Network 2.0 model, ECS nodes use sub-ENIs.

                                                                              • The IP address of the pod is directly allocated from the VPC subnet configured for the container network. You do not need to allocate an independent small network segment to the node.
                                                                              • To add an ECS node to a cluster, bind the ENI that carries the sub-ENI first. After the ENI is bound, you can bind the sub-ENI.
                                                                              • Number of ENIs bound to an ECS node: For clusters of v1.19.16-r40, v1.21.11-r0, v1.23.9-r0, v1.25.4-r0, v1.27.1-r0, and later versions, the value is 1. For clusters of earlier versions, the value is the maximum number of sub-ENIs that can be bound to the node divided by 64 (rounded up).
                                                                              • ENIs bound to an ECS node = Number of ENIs used to bear sub-ENIs + Number of sub-ENIs currently used by pods + Number of pre-bound sub-ENIs
                                                                              • When a pod is created, an available ENI is randomly allocated from the prebinding ENI pool of the node.
                                                                              • When the pod is deleted, the ENI is released back to the ENI pool of the node.
                                                                              • When a node is deleted, the ENIs are released back to the pool, and the sub-ENIs are deleted.
                                                                              @@ -105,19 +107,20 @@
                                                                              The number of pre-binding ENIs on the node remains in the following range:
                                                                              • Minimum number of ENIs to be pre-bound = min(max(nic-minimum-target – Number of bound ENIs, nic-warm-target), nic-maximum-target – Number of bound ENIs)
                                                                              • Maximum number of ENIs to be pre-bound = max(nic-warm-target + nic-max-above-warm-target, Number of bound ENIs – nic-minimum-target)
                                                                              -

                                                                              When a pod is created, an idle ENI (the earliest unused one) is preferentially allocated from the pool. If no idle ENI is available, a newsub-ENI is bound to the pod.

                                                                              +

                                                                              When a pod is created, an idle ENI (the earliest unused one) is preferentially allocated from the pool. If no idle ENI is available, a new sub-ENI is bound to the pod.

                                                                              When the pod is deleted, the corresponding ENI is released back to the pre-bound ENI pool of the node, enters a 2 minutes cooldown period, and can be bind to another pod. If the ENI is not bound to any pod within 2 minutes, it will be released.

                                                                              -

                                                                              Recommendation for CIDR Block Planning

                                                                              As described in Cluster Network Structure, network addresses in a cluster can be divided into three parts: node network, container network, and service network. When planning network addresses, consider the following aspects:

                                                                              -
                                                                              • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs. All subnets (including those created from the secondary CIDR block) in the VPC where the cluster resides cannot conflict with the container and Service CIDR blocks.
                                                                              • Ensure that each CIDR block has sufficient IP addresses.
                                                                                • The IP addresses in the node CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
                                                                                • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses.
                                                                                +

                                                                                Recommendation for CIDR Block Planning

                                                                                As explained in Cluster Network Structure, network addresses in a cluster are divided into the cluster network, container network, and service network. When planning network addresses, consider the following factors:

                                                                                +
                                                                                • All subnets (including extended subnets) in the VPC where the cluster resides cannot conflict with the Service CIDR blocks.
                                                                                • Ensure that each CIDR block has sufficient IP addresses.
                                                                                  • The IP addresses in the cluster CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
                                                                                  • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses.
                                                                                -

                                                                                In the Cloud Native Network 2.0 model, the container CIDR block and node CIDR block share the network addresses in a VPC. It is recommended that the container subnet and node subnet not use the same subnet. Otherwise, containers or nodes may fail to be created due to insufficient IP resources.

                                                                                +

                                                                                In the Cloud Native 2.0 network model, the container CIDR block and node CIDR block share the network IP addresses in a VPC. It is recommended that the container subnet and node subnet not use the same subnet. Otherwise, containers or nodes may fail to be created due to insufficient IP addresses.

                                                                                In addition, a subnet can be added to the container CIDR block after a cluster is created to increase the number of available IP addresses. In this case, ensure that the added subnet does not conflict with other subnets in the container CIDR block.

                                                                                -
                                                                                Figure 2 Configuring CIDR blocks
                                                                                +
                                                                                Figure 2 Configuring CIDR blocks
                                                                                -

                                                                                Example of Cloud Native Network 2.0 Access

                                                                                Create a CCE Turbo cluster, which contains three ECS nodes.

                                                                                -

                                                                                Access the details page of one node. You can see that the node has one primary ENI and one extended ENI, and both of them are ENIs. The extended ENI belongs to the container CIDR block and is used to mount a sub-ENI to the pod.

                                                                                -

                                                                                Create a Deployment in the cluster.

                                                                                +

                                                                                Example of Cloud Native Network 2.0 Access

                                                                                In this example, a CCE Turbo cluster is created, and the cluster contains three ECS nodes.

                                                                                +

                                                                                You can check the basic information about a node on the ECS console. You can see that a primary network interface and an extended network interface are bound to the node. Both of them are ENIs. The IP address of the extended network interface belongs to the container CIDR block and is used to bind sub-ENIs to pods on the node.

                                                                                +

                                                                                The following is an example of creating a workload in a cluster using the Cloud Native 2.0 network model:

                                                                                +
                                                                                1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                                                2. Create a Deployment in the cluster.

                                                                                  Create the deployment.yaml file. The following shows an example:

                                                                                  kind: Deployment
                                                                                   apiVersion: apps/v1
                                                                                   metadata:
                                                                                  @@ -145,23 +148,53 @@ spec:
                                                                                                 memory: 512Mi
                                                                                         imagePullSecrets:
                                                                                           - name: default-secret
                                                                                  -

                                                                                  View the created pod.

                                                                                  -
                                                                                  $ kubectl get pod -owide
                                                                                  -NAME                       READY   STATUS    RESTARTS   AGE   IP            NODE         NOMINATED NODE   READINESS GATES
                                                                                  +

                                                                                  Create the workload.

                                                                                  +
                                                                                  kubectl apply -f deployment.yaml
                                                                                  +

                                                                                3. Check the running pods.

                                                                                  kubectl get pod -owide
                                                                                  +

                                                                                  Command output:

                                                                                  +
                                                                                  NAME                       READY   STATUS    RESTARTS   AGE   IP            NODE         NOMINATED NODE   READINESS GATES
                                                                                   example-5bdc5699b7-54v7g   1/1     Running   0          7s    10.1.18.2     10.1.0.167   <none>           <none>
                                                                                   example-5bdc5699b7-6dzx5   1/1     Running   0          7s    10.1.18.216   10.1.0.186   <none>           <none>
                                                                                   example-5bdc5699b7-gq7xs   1/1     Running   0          7s    10.1.16.63    10.1.0.144   <none>           <none>
                                                                                   example-5bdc5699b7-h9rvb   1/1     Running   0          7s    10.1.16.125   10.1.0.167   <none>           <none>
                                                                                   example-5bdc5699b7-s9fts   1/1     Running   0          7s    10.1.16.89    10.1.0.144   <none>           <none>
                                                                                   example-5bdc5699b7-swq6q   1/1     Running   0          7s    10.1.17.111   10.1.0.167   <none>           <none>
                                                                                  -

                                                                                  The IP addresses of all pods are sub-ENIs, which are mounted to the ENI (extended ENI) of the node.

                                                                                  -

                                                                                  For example, the extended ENI of node 10.1.0.167 is 10.1.17.172. On the Network Interfaces page of the Network Console, you can see that three sub-ENIs are mounted to the extended ENI 10.1.17.172, which is the IP address of the pod.

                                                                                  -

                                                                                  In the VPC, the IP address of the pod can be successfully accessed.

                                                                                  +

                                                                                  The IP addresses of all pods are sub-ENIs, which are bound to the ENI (extended network interface) of the node.

                                                                                  +

                                                                                  For example, the IP address of the extended network interface of node 10.1.0.167 is 10.1.17.172. On the network interfaces console, you can see that there are three sub-ENIs bound to the extended network interface whose IP address is 10.1.17.172. These sub-ENIs are the IP addresses of the pods running on the node.

                                                                                  +

                                                                                4. Log in to an ECS in the same VPC and access the IP address of a pod from outside the cluster.

                                                                                  In this example, the accessed pod IP address is 10.1.18.2.

                                                                                  +
                                                                                  curl 10.1.18.2
                                                                                  +

                                                                                  If the following information is displayed, the workload can be properly accessed:

                                                                                  +
                                                                                  <!DOCTYPE html>
                                                                                  +<html>
                                                                                  +<head>
                                                                                  +<title>Welcome to nginx!</title>
                                                                                  +<style>
                                                                                  +    body {
                                                                                  +        width: 35em;
                                                                                  +        margin: 0 auto;
                                                                                  +        font-family: Tahoma, Verdana, Arial, sans-serif;
                                                                                  +    }
                                                                                  +</style>
                                                                                  +</head>
                                                                                  +<body>
                                                                                  +<h1>Welcome to nginx!</h1>
                                                                                  +<p>If you see this page, the nginx web server is successfully installed and
                                                                                  +working. Further configuration is required.</p>
                                                                                  +
                                                                                  +<p>For online documentation and support please refer to
                                                                                  +<a href="http://nginx.org/">nginx.org</a>.<br/>
                                                                                  +Commercial support is available at
                                                                                  +<a href="http://nginx.com/">nginx.com</a>.</p>
                                                                                  +
                                                                                  +<p><em>Thank you for using nginx.</em></p>
                                                                                  +</body>
                                                                                  +</html>
                                                                                  +

                                                                                diff --git a/docs/cce/umn/cce_10_0285.html b/docs/cce/umn/cce_10_0285.html index 2dca2f3f..28c69a83 100644 --- a/docs/cce/umn/cce_10_0285.html +++ b/docs/cce/umn/cce_10_0285.html @@ -7,18 +7,18 @@
                                                                                • Group them in different clusters for different environments.

                                                                                  Resources cannot be shared among different clusters. In addition, services in different environments can access each other only through load balancing.

                                                                                • Group them in different namespaces for different environments.

                                                                                  Workloads in the same namespace can be mutually accessed by using the Service name. Cross-namespace access can be implemented by using the Service name or namespace name.

                                                                                  The following figure shows namespaces created for the development, joint debugging, and testing environments, respectively.

                                                                                  -
                                                                                  Figure 1 One namespace for one environment
                                                                                  +
                                                                                  Figure 1 One namespace for one environment
                                                                              • Isolating namespaces by application

                                                                                You are advised to use this method if a large number of workloads are deployed in the same environment. For example, in the following figure, different namespaces (APP1 and APP2) are created to logically manage workloads as different groups. Workloads in the same namespace access each other using the Service name, and workloads in different namespaces access each other using the Service name or namespace name.

                                                                                -
                                                                                Figure 2 Grouping workloads into different namespaces
                                                                                +
                                                                                Figure 2 Grouping workloads into different namespaces

                                                                              Managing Namespace Labels

                                                                              1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Namespaces.
                                                                              2. Locate the row containing the target namespace and choose More > Manage Label in the Operation column.
                                                                              3. In the dialog box that is displayed, the existing labels of the namespace are displayed. Modify the labels as needed.

                                                                                • Adding a label: Click the add icon, enter the key and value of the label to be added, and click OK.

                                                                                  For example, the key is project and the value is cicd, indicating that the namespace is used to deploy CICD.

                                                                                  -
                                                                                • Deleting a label: Click next the label to be deleted and then OK.
                                                                                +
                                                                              4. Deleting a label: Click next the label to be deleted and then OK.

                                                                          • Switch to the Manage Label dialog box again and check the modified labels.

                                                                          Enabling Node Affinity in a Namespace

                                                                          After node affinity is enabled in a namespace, the workloads newly created in the namespace can be scheduled only to nodes with specific labels. For details, see PodNodeSelector.

                                                                          -
                                                                          1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Namespaces.
                                                                          2. Locate the target namespace and click in the Node Affinity column.
                                                                          3. In the displayed dialog box, select Enable and click OK.

                                                                            After node affinity is enabled, new workloads in the current namespace will be scheduled only to nodes with specified labels. For example, in namespace test, the workloads in the namespace can be scheduled only to the node whose label key is kubelet.kubernetes.io/namespace and label value is test.

                                                                            +
                                                                            1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Namespaces.
                                                                            2. Locate the target namespace and click in the Node Affinity column.
                                                                            3. In the displayed dialog box, select Enable and click OK.

                                                                              After node affinity is enabled, new workloads in the current namespace will be scheduled only to nodes with specified labels. For example, in namespace test, the workloads in the namespace can be scheduled only to the node whose label key is kubelet.kubernetes.io/namespace and label value is test.

                                                                            4. You can add specified labels to a node in Labels and Taints on the Nodes page. For details, see Managing Node Labels.

                                                                          Deleting a Namespace

                                                                          If a namespace is deleted, all resources (such as workloads, jobs, and ConfigMaps) in this namespace will also be deleted. Exercise caution when deleting a namespace.

                                                                          diff --git a/docs/cce/umn/cce_10_0287.html b/docs/cce/umn/cce_10_0287.html index fb3e8a8e..3dc2af4a 100644 --- a/docs/cce/umn/cce_10_0287.html +++ b/docs/cce/umn/cce_10_0287.html @@ -108,7 +108,7 @@
                                                                          -

                                                                          Constraints

                                                                          Kubernetes provides optimistic concurrency control (OCC), also known as optimistic locking, for frequent data updates. You can use optimistic locking by defining the resourceVersion field. This field is in the object metadata. This field identifies the internal version number of the object. When the object is modified, this field is modified accordingly. You can use kube-apiserver to check whether an object has been modified. When the API server receives an update request containing the resourceVersion field, the server compares the requested data with the resource version number of the server. If they are different, the object on the server has been modified when the update is submitted. In this case, the API server returns a conflict error (409). Obtain the server data, modify the data, and submit the data to the server again. The resource quota limits the total resource consumption of each namespace and records the resource information in the cluster. Therefore, after the enable-resource-quota option is enabled, the probability of resource creation conflicts increases in large-scale concurrency scenarios, affecting the performance of batch resource creation.

                                                                          +

                                                                          Notes and Constraints

                                                                          Kubernetes provides optimistic concurrency control (OCC), also known as optimistic locking, for frequent data updates. You can use optimistic locking by defining the resourceVersion field. This field is in the object metadata. This field identifies the internal version number of the object. When the object is modified, this field is modified accordingly. You can use kube-apiserver to check whether an object has been modified. When the API server receives an update request containing the resourceVersion field, the server compares the requested data with the resource version number of the server. If they are different, the object on the server has been modified when the update is submitted. In this case, the API server returns a conflict error (409). Obtain the server data, modify the data, and submit the data to the server again. The resource quota limits the total resource consumption of each namespace and records the resource information in the cluster. Therefore, after the enable-resource-quota option is enabled, the probability of resource creation conflicts increases in large-scale concurrency scenarios, affecting the performance of batch resource creation.

                                                                          Procedure

                                                                          1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                          2. In the navigation pane, click Namespaces.
                                                                          3. Click Quota Management next to the target namespace.

                                                                            This operation cannot be performed on system namespaces kube-system and kube-public.

                                                                          4. Set the resource quotas and click OK.

                                                                            • After setting CPU and memory quotas for a namespace, you must specify the request and limit values of CPU and memory resources when creating a workload. Otherwise, the workload cannot be created. If the quota of a resource is set to 0, the resource usage is not limited.
                                                                            • Accumulated quota usage includes the resources used by CCE to create default components, such as the Kubernetes Services (which can be viewed using kubectl) created under the default namespace. Therefore, you are advised to set a resource quota greater than expected to reserve resource for creating default components.
                                                                            diff --git a/docs/cce/umn/cce_10_0288.html b/docs/cce/umn/cce_10_0288.html index c9f1e86e..454fd1d8 100644 --- a/docs/cce/umn/cce_10_0288.html +++ b/docs/cce/umn/cce_10_0288.html @@ -1,10 +1,10 @@ -

                                                                            Binding a Custom Security Group to a Workload

                                                                            +

                                                                            Binding a Security Group to a Workload Using a Security Group Policy

                                                                            In Cloud Native Network 2.0, pods use VPC ENIs or sub-ENIs for networking. You can directly bind security groups and EIPs to pods. To bind CCE pods with security groups, CCE provides a custom resource object named SecurityGroup. Using this resource object, you can customize security isolation for workloads.

                                                                            The priority of the security group bound to pods using the security group policy is higher than that of the security group in the NetworkAttachmentDefinition.

                                                                            -

                                                                            Constraints

                                                                            • This function is supported for CCE Turbo clusters of v1.19 and later. Upgrade your CCE Turbo clusters if their versions are earlier than v1.19.
                                                                            • A workload can be bound to a maximum of five security groups.
                                                                            +

                                                                            Notes and Constraints

                                                                            • This function is supported for CCE Turbo clusters of v1.19 and later. Upgrade your CCE Turbo clusters if their versions are earlier than v1.19.
                                                                            • A workload can be bound to a maximum of five security groups.

                                                                            Using the Console

                                                                            1. Log in to the CCE console and click the cluster name to access the cluster console.
                                                                            2. In the navigation pane, choose Workloads. On the displayed page, click the desired workload name.
                                                                            3. Switch to the SecurityGroups tab and click Create.

                                                                            4. Set the parameters as described in Table 1.

                                                                              @@ -28,7 +28,7 @@

                                                                          The selected security group will be bound to the ENI or supplementary ENI of the selected workload. A maximum of five security groups can be selected from the drop-down list. You must select one or multiple security groups to create a SecurityGroup.

                                                                          If no security group has not been created, click Create Security Group. After the security group is created, click the refresh button.

                                                                          -
                                                                          NOTICE:
                                                                          • A maximum of five security groups can be selected.
                                                                          • Hover the cursor on next to the security group name, and you can view details about the security group.
                                                                          +
                                                                          NOTICE:
                                                                          • A maximum of five security groups can be selected.
                                                                          • Hover the cursor on the security group name, and you can obtain details about the security group.

                                                                          64566556-bd6f-48fb-b2c6-df8f44617953

                                                                          @@ -41,7 +41,7 @@

                                                                        17. After setting the parameters, click OK.

                                                                          After the security group is created, the system automatically returns to the security group list page where you can see the new security group.

                                                                        18. -

                                                                          Using kubectl

                                                                          1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                                          2. Create a description file named securitygroup-demo.yaml.

                                                                            vi securitygroup-demo.yaml

                                                                            +

                                                                            Using kubectl

                                                                            1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                                                                            2. Create a description file named securitygroup-demo.yaml.

                                                                              vi securitygroup-demo.yaml

                                                                              For example, create the following SecurityGroup to bind all nginx workloads with two security groups 64566556-bd6f-48fb-b2c6-df8f44617953 and 5451f1b0-bd6f-48fb-b2c6-df8f44617953 that have been created in advance. An example is as follows:

                                                                              apiVersion: crd.yangtse.cni/v1
                                                                               kind: SecurityGroup
                                                                              @@ -127,7 +127,7 @@ spec:
                                                                               

                                                                            3. Run the following command to create the SecurityGroup:

                                                                              kubectl create -f securitygroup-demo.yaml

                                                                              If the following information is displayed, the SecurityGroup is being created.

                                                                              securitygroup.crd.yangtse.cni/demo created
                                                                              -

                                                                            4. Run the following command to view the SecurityGroup:

                                                                              kubectl get sg

                                                                              +

                                                                            5. Run the following command to check the SecurityGroup:

                                                                              kubectl get sg

                                                                              If the name of the created SecurityGroup is demo in the command output, the SecurityGroup is created successfully.

                                                                              NAME                       POD-SELECTOR                      AGE
                                                                               all-no                     map[matchLabels:map[app:nginx]]   4h1m
                                                                              diff --git a/docs/cce/umn/cce_10_0290.html b/docs/cce/umn/cce_10_0290.html
                                                                              index 938a6842..4d5079b2 100644
                                                                              --- a/docs/cce/umn/cce_10_0290.html
                                                                              +++ b/docs/cce/umn/cce_10_0290.html
                                                                              @@ -2,12 +2,14 @@
                                                                               
                                                                               

                                                                              Workload Scaling Rules

                                                                              How HPA Works

                                                                              HPA is a controller that controls horizontal pod scaling. HPA periodically checks the pod metrics, calculates the number of replicas required to meet the target values configured for HPA resources, and then adjusts the value of the replicas field in the target resource object (such as a Deployment).

                                                                              -

                                                                              A prerequisite for auto scaling is that your container running data can be collected, such as number of cluster nodes/pods, and CPU and memory usage of containers. Kubernetes does not provide such monitoring capabilities itself. You can use extensions to monitor and collect your data. CCE integrates Metrics Server to realize such capabilities:

                                                                              -
                                                                              • Metrics Server is a cluster-wide aggregator of resource utilization data. Metrics Server collects metrics from the Summary API exposed by kubelet. These metrics are set for core Kubernetes resources, such as pods, nodes, containers, and Services. Metrics Server provides a set of standard APIs for external systems to collect these metrics.
                                                                              +

                                                                              A prerequisite for auto scaling is that your container running data can be collected, such as number of cluster nodes/pods, and CPU and memory usage of containers. Kubernetes does not have built-in monitoring capabilities, but you can use extensions like Prometheus and Metrics Server to monitor and collect data.

                                                                              +
                                                                              • Prometheus is an open-source monitoring and alarming framework that can collect multiple types of metrics. Prometheus has been a standard monitoring solution of Kubernetes.
                                                                              • Metrics Server is a cluster-wide aggregator of resource utilization data. Metrics Server collects metrics from the Summary API exposed by kubelet. These metrics are set for core Kubernetes resources, such as pods, nodes, containers, and Services. Metrics Server provides a set of standard APIs for external systems to collect these metrics.

                                                                              HPA can work with Metrics Server to implement auto scaling based on the CPU and memory usage.

                                                                              +

                                                                              Figure 1 shows how HPA works.

                                                                              +
                                                                              Figure 1 HPA working process

                                                                              Two core modules of HPA:

                                                                              -
                                                                              • Data Source Monitoring

                                                                                The community provided only CPU- and memory-based HPA at the early stage. With the population of Kubernetes, developers need more custom metrics or monitoring information at the access layer for their own applications, for example, the QPS of the load balancer and the number of online users of the website. In response, the community defines a set of standard metric APIs to provide services externally through these aggregated APIs.

                                                                                +
                                                                                • Data Source Monitoring

                                                                                  The community provided only CPU- and memory-based HPA at the early stage. With the population of Kubernetes and Prometheus, developers need more custom metrics or monitoring information at the access layer for their own applications, for example, the QPS of the load balancer and the number of online users of the website. In response, the community defines a set of standard metric APIs to provide services externally through these aggregated APIs.

                                                                                  • metrics.k8s.io provides monitoring metrics related to the CPU and memory of pods and nodes.
                                                                                  • custom.metrics.k8s.io provides custom monitoring metrics related to Kubernetes objects.
                                                                                  • external.metrics.k8s.io provides metrics that come from external systems and are irrelevant to any Kubernetes resource metrics.
                                                                                • Scaling Decision-Making Algorithms

                                                                                  The HPA controller calculates the scaling ratio based on the current metric values and desired metric values using the following formula:

                                                                                  desiredReplicas = ceil[currentReplicas x (currentMetricValue/desiredMetricValue)]

                                                                                  diff --git a/docs/cce/umn/cce_10_0291.html b/docs/cce/umn/cce_10_0291.html index 1507770a..48e412c9 100644 --- a/docs/cce/umn/cce_10_0291.html +++ b/docs/cce/umn/cce_10_0291.html @@ -7,6 +7,8 @@