diff --git a/docs/css/umn/ALL_META.TXT.json b/docs/css/umn/ALL_META.TXT.json
index e6db8bd0..8cb64b92 100644
--- a/docs/css/umn/ALL_META.TXT.json
+++ b/docs/css/umn/ALL_META.TXT.json
@@ -3,34 +3,34 @@
"dockw":"101010001,User Guide"
},
{
- "uri":"css_01_0001.html",
- "node_id":"css_01_0001.xml",
+ "uri":"en-us_topic_0000001921967557.html",
+ "node_id":"en-us_topic_0000001921967557.xml",
"product_code":"css",
"code":"1",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
- "kw":"Overview",
+ "kw":"Product Overview",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
- "title":"Overview",
+ "title":"Product Overview",
"githuburl":""
},
{
- "uri":"css_04_0001.html",
- "node_id":"css_04_0001.xml",
+ "uri":"en-us_topic_0000001667545170.html",
+ "node_id":"en-us_topic_0000001667545170.xml",
"product_code":"css",
"code":"2",
"des":"Cloud Search Service (CSS) is a fully hosted distributed search service based on Elasticsearch. You can use it for structured and unstructured data search, and use AI vec",
"doc_type":"usermanual",
- "kw":"What Is Cloud Search Service?,Overview,User Guide",
+ "kw":"What Is Cloud Search Service?,Product Overview,User Guide",
"search_title":"",
"metedata":[
{
@@ -44,13 +44,13 @@
"githuburl":""
},
{
- "uri":"css_04_0010.html",
- "node_id":"css_04_0010.xml",
+ "uri":"en-us_topic_0000001667704890.html",
+ "node_id":"en-us_topic_0000001667704890.xml",
"product_code":"css",
"code":"3",
"des":"CSS has the following features and advantages.You can get insights from terabyte-scale data in milliseconds. In addition, you can use the visualized platform for data dis",
"doc_type":"usermanual",
- "kw":"Advantages,Overview,User Guide",
+ "kw":"Advantages,Product Overview,User Guide",
"search_title":"",
"metedata":[
{
@@ -64,13 +64,13 @@
"githuburl":""
},
{
- "uri":"css_04_0007.html",
- "node_id":"css_04_0007.xml",
+ "uri":"en-us_topic_0000001667704882.html",
+ "node_id":"en-us_topic_0000001667704882.xml",
"product_code":"css",
"code":"4",
"des":"CSS supports Kibana and Cerebro.Kibana is an open-source data analytics and visualization platform that works with Elasticsearch. You can use Kibana to search for and vie",
"doc_type":"usermanual",
- "kw":"Product Components,Overview,User Guide",
+ "kw":"Product Components,Product Overview,User Guide",
"search_title":"",
"metedata":[
{
@@ -84,47 +84,53 @@
"githuburl":""
},
{
- "uri":"css_04_0002.html",
- "node_id":"css_04_0002.xml",
- "product_code":"",
+ "uri":"en-us_topic_0000001715704493.html",
+ "node_id":"en-us_topic_0000001715704493.xml",
+ "product_code":"css",
"code":"5",
"des":"CSS can be used to build search boxes for websites and apps to improve user experience. You can also build a log analysis platform with it, facilitating data-driven O&M a",
- "doc_type":"",
- "kw":"Scenarios,Overview,User Guide",
+ "doc_type":"usermanual",
+ "kw":"Scenarios,Product Overview,User Guide",
"search_title":"",
"metedata":[
{
-
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsBot":"Yes",
+ "IsMulti":"Yes"
}
],
"title":"Scenarios",
"githuburl":""
},
{
- "uri":"css_04_0014.html",
- "node_id":"css_04_0014.xml",
- "product_code":"",
+ "uri":"en-us_topic_0000001715624665.html",
+ "node_id":"en-us_topic_0000001715624665.xml",
+ "product_code":"css",
"code":"6",
"des":"If you need to assign different permissions to employees in your organization to access your CSS resources, IAM is a good choice for fine-grained permissions management. ",
- "doc_type":"",
- "kw":"Permissions Management,Overview,User Guide",
+ "doc_type":"usermanual",
+ "kw":"Permissions Management,Product Overview,User Guide",
"search_title":"",
"metedata":[
{
-
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsBot":"Yes",
+ "IsMulti":"Yes"
}
],
"title":"Permissions Management",
"githuburl":""
},
{
- "uri":"css_04_0005.html",
- "node_id":"css_04_0005.xml",
+ "uri":"en-us_topic_0000001715624677.html",
+ "node_id":"en-us_topic_0000001715624677.xml",
"product_code":"css",
"code":"7",
"des":"The following table describes restrictions on clusters and nodes in CSS.You are advised to use the following browsers to access the CSS management console:Google Chrome 3",
"doc_type":"usermanual",
- "kw":"Constraints,Overview,User Guide",
+ "kw":"Constraints,Product Overview,User Guide",
"search_title":"",
"metedata":[
{
@@ -138,13 +144,13 @@
"githuburl":""
},
{
- "uri":"css_04_0019.html",
- "node_id":"css_04_0019.xml",
+ "uri":"en-us_topic_0000001667545182.html",
+ "node_id":"en-us_topic_0000001667545182.xml",
"product_code":"css",
"code":"8",
"des":"CSS uses the following resources:InstanceCPUMemory (GB)Disk quantityDisk size (GB)",
"doc_type":"usermanual",
- "kw":"Quotas,Overview,User Guide",
+ "kw":"Quotas,Product Overview,User Guide",
"search_title":"",
"metedata":[
{
@@ -158,13 +164,13 @@
"githuburl":""
},
{
- "uri":"css_04_0004.html",
- "node_id":"css_04_0004.xml",
+ "uri":"en-us_topic_0000001715624661.html",
+ "node_id":"en-us_topic_0000001715624661.xml",
"product_code":"css",
"code":"9",
"des":"Figure 1 shows the relationships between CSS and other services.",
"doc_type":"usermanual",
- "kw":"Related Services,Overview,User Guide",
+ "kw":"Related Services,Product Overview,User Guide",
"search_title":"",
"metedata":[
{
@@ -178,13 +184,13 @@
"githuburl":""
},
{
- "uri":"css_04_0012.html",
- "node_id":"css_04_0012.xml",
+ "uri":"en-us_topic_0000001715624649.html",
+ "node_id":"en-us_topic_0000001715624649.xml",
"product_code":"css",
"code":"10",
"des":"CSS provides functions on a per cluster basis. A cluster represents an independent search service that consists of multiple nodes.An index stores Elasticsearch data. It i",
"doc_type":"usermanual",
- "kw":"Basic Concepts,Overview,User Guide",
+ "kw":"Basic Concepts,Product Overview,User Guide",
"search_title":"",
"metedata":[
{
@@ -198,8 +204,8 @@
"githuburl":""
},
{
- "uri":"css_01_0006.html",
- "node_id":"css_01_0006.xml",
+ "uri":"en-us_topic_0000001477739396.html",
+ "node_id":"en-us_topic_0000001477739396.xml",
"product_code":"css",
"code":"11",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
@@ -209,18 +215,18 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Getting Started",
"githuburl":""
},
{
- "uri":"css_01_0007.html",
- "node_id":"css_01_0007.xml",
+ "uri":"en-us_topic_0000001528379317.html",
+ "node_id":"en-us_topic_0000001528379317.xml",
"product_code":"css",
"code":"12",
"des":"This section describes how to use Elasticsearch for product search. You can use the Elasticsearch search engine of CSS to search for data based on the scenario example. T",
@@ -230,18 +236,18 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Getting Started with Elasticsearch",
"githuburl":""
},
{
- "uri":"css_01_0070.html",
- "node_id":"css_01_0070.xml",
+ "uri":"en-us_topic_0000001477419768.html",
+ "node_id":"en-us_topic_0000001477419768.xml",
"product_code":"css",
"code":"13",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
@@ -251,18 +257,18 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Permissions Management",
"githuburl":""
},
{
- "uri":"css_01_0072.html",
- "node_id":"css_01_0072.xml",
+ "uri":"en-us_topic_0000001477419752.html",
+ "node_id":"en-us_topic_0000001477419752.xml",
"product_code":"css",
"code":"14",
"des":"This section describes how to use a group to grant permissions to a user. Figure 1 shows the process for granting permissions.CSS has two types of user permissions: CSS a",
@@ -272,18 +278,18 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Creating a User and Granting Permissions",
"githuburl":""
},
{
- "uri":"css_01_0086.html",
- "node_id":"css_01_0086.xml",
+ "uri":"en-us_topic_0000001477899148.html",
+ "node_id":"en-us_topic_0000001477899148.xml",
"product_code":"css",
"code":"15",
"des":"Custom policies can be created to supplement the system-defined policies of CSS. For the actions supported for custom policies, see section \"Permissions Policies and Supp",
@@ -293,83 +299,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"CSS Custom Policies",
"githuburl":""
},
{
- "uri":"css_01_0053.html",
- "node_id":"css_01_0053.xml",
+ "uri":"en-us_topic_0000001504911882.html",
+ "node_id":"en-us_topic_0000001504911882.xml",
"product_code":"css",
"code":"16",
- "des":"On the Dashboard page of the CSS management console, you can view information about the status and storage capacity of existing clusters.",
- "doc_type":"usermanual",
- "kw":"Viewing the Cluster Runtime Status and Storage Capacity Status,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Viewing the Cluster Runtime Status and Storage Capacity Status",
- "githuburl":""
- },
- {
- "uri":"css_01_0056.html",
- "node_id":"css_01_0056.xml",
- "product_code":"css",
- "code":"17",
- "des":"The cluster list displays all CSS clusters. If there are a large number of clusters, these clusters will be displayed on multiple pages. You can view clusters of all stat",
- "doc_type":"usermanual",
- "kw":"Cluster List Overview,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Cluster List Overview",
- "githuburl":""
- },
- {
- "uri":"css_01_0188.html",
- "node_id":"css_01_0188.xml",
- "product_code":"css",
- "code":"18",
- "des":"To prevent data loss and minimize the cluster downtime in case of service interruption, CSS supports cross-AZ cluster deployment. When creating a cluster, you can select ",
- "doc_type":"usermanual",
- "kw":"Deploying a Cross-AZ Cluster,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Deploying a Cross-AZ Cluster",
- "githuburl":""
- },
- {
- "uri":"css_01_0207.html",
- "node_id":"css_01_0207.xml",
- "product_code":"css",
- "code":"19",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Elasticsearch",
@@ -377,251 +320,188 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Elasticsearch",
"githuburl":""
},
{
- "uri":"css_01_0009.html",
- "node_id":"css_01_0009.xml",
+ "uri":"en-us_topic_0000001477899180.html",
+ "node_id":"en-us_topic_0000001477899180.xml",
+ "product_code":"css",
+ "code":"17",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Creating a Cluster",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Creating a Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001477739344.html",
+ "node_id":"en-us_topic_0000001477739344.xml",
+ "product_code":"css",
+ "code":"18",
+ "des":"This section describes how to create an Elasticsearch cluster in security mode.Public IP address access and Kibana public access can be used only after security mode is e",
+ "doc_type":"usermanual",
+ "kw":"Creating a Cluster in Security Mode,Creating a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Creating a Cluster in Security Mode",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001477899172.html",
+ "node_id":"en-us_topic_0000001477899172.xml",
+ "product_code":"css",
+ "code":"19",
+ "des":"This section describes how to create an Elasticsearch cluster in non-security mode.Log in to the CSS management console.On the Dashboard page, click Create Cluster in the",
+ "doc_type":"usermanual",
+ "kw":"Creating a Cluster in Non-Security Mode,Creating a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Creating a Cluster in Non-Security Mode",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001528659093.html",
+ "node_id":"en-us_topic_0000001528659093.xml",
"product_code":"css",
"code":"20",
- "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "doc_type":"usermanual",
- "kw":"Managing Elasticsearch Clusters",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Managing Elasticsearch Clusters",
- "githuburl":""
- },
- {
- "uri":"css_01_0008.html",
- "node_id":"css_01_0008.xml",
- "product_code":"css",
- "code":"21",
- "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "doc_type":"usermanual",
- "kw":"Creating an Elasticsearch Cluster",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Creating an Elasticsearch Cluster",
- "githuburl":""
- },
- {
- "uri":"css_01_0189.html",
- "node_id":"css_01_0189.xml",
- "product_code":"css",
- "code":"22",
"des":"When creating an Elasticsearch cluster, you can enable the security mode for it. Identity authentication is required when users access a security cluster. You can also au",
"doc_type":"usermanual",
- "kw":"Clusters in Security Mode,Creating an Elasticsearch Cluster,User Guide",
+ "kw":"Clusters in Security Mode,Creating a Cluster,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Clusters in Security Mode",
"githuburl":""
},
{
- "uri":"css_01_0011.html",
- "node_id":"css_01_0011.xml",
+ "uri":"en-us_topic_0000001477419724.html",
+ "node_id":"en-us_topic_0000001477419724.xml",
+ "product_code":"css",
+ "code":"21",
+ "des":"To prevent data loss and minimize the cluster downtime in case of service interruption, CSS supports cross-AZ cluster deployment. When creating a cluster, you can select ",
+ "doc_type":"usermanual",
+ "kw":"Deploying a Cross-AZ Cluster,Creating a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Deploying a Cross-AZ Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001528379277.html",
+ "node_id":"en-us_topic_0000001528379277.xml",
+ "product_code":"css",
+ "code":"22",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Importing Data",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Importing Data",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001528499137.html",
+ "node_id":"en-us_topic_0000001528499137.xml",
"product_code":"css",
"code":"23",
- "des":"This section describes how to create an Elasticsearch cluster in security mode.Public IP address access and Kibana public access can be used only after security mode is e",
+ "des":"You can use Logstash to collect data and migrate collected data to Elasticsearch in CSS. This method helps you effectively obtain and manage data through Elasticsearch. D",
"doc_type":"usermanual",
- "kw":"Creating an Elasticsearch Cluster in Security Mode,Creating an Elasticsearch Cluster,User Guide",
+ "kw":"Using Logstash to Import Data to Elasticsearch,Importing Data,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
- "title":"Creating an Elasticsearch Cluster in Security Mode",
+ "title":"Using Logstash to Import Data to Elasticsearch",
"githuburl":""
},
{
- "uri":"css_01_0094.html",
- "node_id":"css_01_0094.xml",
+ "uri":"en-us_topic_0000001477899200.html",
+ "node_id":"en-us_topic_0000001477899200.xml",
"product_code":"css",
"code":"24",
- "des":"This section describes how to create an Elasticsearch cluster in non-security mode.Log in to the CSS management console.On the Dashboard page, click Create Cluster in the",
+ "des":"You can import data in various formats, such as JSON, to Elasticsearch in CSS by using Kibana or APIs.Before importing data, ensure that you can use Kibana to access the ",
"doc_type":"usermanual",
- "kw":"Creating an Elasticsearch Cluster in Non-Security Mode,Creating an Elasticsearch Cluster,User Guide",
+ "kw":"Using Kibana or APIs to Import Data to Elasticsearch,Importing Data,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
- "title":"Creating an Elasticsearch Cluster in Non-Security Mode",
+ "title":"Using Kibana or APIs to Import Data to Elasticsearch",
"githuburl":""
},
{
- "uri":"css_01_0185.html",
- "node_id":"css_01_0185.xml",
+ "uri":"en-us_topic_0000001555591537.html",
+ "node_id":"en-us_topic_0000001555591537.xml",
"product_code":"css",
"code":"25",
- "des":"On the Cluster Information page, you can view the information about a cluster, including the private network address, public IP address, version, and node.Log in to the C",
- "doc_type":"usermanual",
- "kw":"Viewing Basic Information About an Elasticsearch Cluster,Managing Elasticsearch Clusters,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Viewing Basic Information About an Elasticsearch Cluster",
- "githuburl":""
- },
- {
- "uri":"css_01_0075.html",
- "node_id":"css_01_0075.xml",
- "product_code":"css",
- "code":"26",
- "des":"Tags are cluster identifiers. Adding tags to clusters can help you identify and manage your cluster resources.You can add tags to a cluster when creating the cluster or a",
- "doc_type":"usermanual",
- "kw":"Managing Tags,Managing Elasticsearch Clusters,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Managing Tags",
- "githuburl":""
- },
- {
- "uri":"css_01_0058.html",
- "node_id":"css_01_0058.xml",
- "product_code":"css",
- "code":"27",
- "des":"You can create enterprise projects based on your organizational structure. Then you can manage resources across different regions by enterprise project, add users and use",
- "doc_type":"usermanual",
- "kw":"Binding an Enterprise Project,Managing Elasticsearch Clusters,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Binding an Enterprise Project",
- "githuburl":""
- },
- {
- "uri":"css_01_0014.html",
- "node_id":"css_01_0014.xml",
- "product_code":"css",
- "code":"28",
- "des":"If a cluster becomes faulty, you can restart it to check if it can run normally.The target cluster is not frozen and has no task in progress.If a cluster is available, en",
- "doc_type":"usermanual",
- "kw":"Restarting a Cluster,Managing Elasticsearch Clusters,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Restarting a Cluster",
- "githuburl":""
- },
- {
- "uri":"css_01_0015.html",
- "node_id":"css_01_0015.xml",
- "product_code":"css",
- "code":"29",
- "des":"You can delete clusters that you no longer need.If you delete a cluster, the cluster service data will be cleared. Exercise caution when performing this operation.The sna",
- "doc_type":"usermanual",
- "kw":"Deleting a Cluster,Managing Elasticsearch Clusters,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Deleting a Cluster",
- "githuburl":""
- },
- {
- "uri":"css_01_0060.html",
- "node_id":"css_01_0060.xml",
- "product_code":"css",
- "code":"30",
- "des":"In the Failed Tasks dialog box, you can view the failed tasks related to a cluster, such as failing to create, restart, scale out, back up, or restore a cluster. In addit",
- "doc_type":"usermanual",
- "kw":"Managing Failed Tasks,Managing Elasticsearch Clusters,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Managing Failed Tasks",
- "githuburl":""
- },
- {
- "uri":"css_01_0210.html",
- "node_id":"css_01_0210.xml",
- "product_code":"css",
- "code":"31",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Accessing an Elasticsearch Cluster",
@@ -629,20 +509,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Accessing an Elasticsearch Cluster",
"githuburl":""
},
{
- "uri":"css_01_0190.html",
- "node_id":"css_01_0190.xml",
+ "uri":"en-us_topic_0000001528379305.html",
+ "node_id":"en-us_topic_0000001528379305.xml",
"product_code":"css",
- "code":"32",
+ "code":"26",
"des":"Elasticsearch clusters have built-in Kibana and Cerebro components. You can quickly access an Elasticsearch cluster through Kibana and Cerebro.Log in to the CSS managemen",
"doc_type":"usermanual",
"kw":"Accessing an Elasticsearch Cluster,Accessing an Elasticsearch Cluster,User Guide",
@@ -650,20 +530,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Accessing an Elasticsearch Cluster",
"githuburl":""
},
{
- "uri":"css_01_0076.html",
- "node_id":"css_01_0076.xml",
+ "uri":"en-us_topic_0000001528299629.html",
+ "node_id":"en-us_topic_0000001528299629.xml",
"product_code":"css",
- "code":"33",
+ "code":"27",
"des":"You can access a security cluster (Elasticsearch clusters in version 6.5.4 or later support the security mode) that has the HTTPS access enabled through the public IP add",
"doc_type":"usermanual",
"kw":"Accessing a Cluster from a Public Network,Accessing an Elasticsearch Cluster,User Guide",
@@ -671,20 +551,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Accessing a Cluster from a Public Network",
"githuburl":""
},
{
- "uri":"css_01_0082.html",
- "node_id":"css_01_0082.xml",
+ "uri":"en-us_topic_0000001477579412.html",
+ "node_id":"en-us_topic_0000001477579412.xml",
"product_code":"css",
- "code":"34",
+ "code":"28",
"des":"If the VPC endpoint service is enabled, you can use a private domain name or node IP address generated by the endpoint to access the cluster. When the VPC endpoint servic",
"doc_type":"usermanual",
"kw":"Accessing a Cluster Using a VPC Endpoint,Accessing an Elasticsearch Cluster,User Guide",
@@ -692,20 +572,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Accessing a Cluster Using a VPC Endpoint",
"githuburl":""
},
{
- "uri":"css_01_0184.html",
- "node_id":"css_01_0184.xml",
+ "uri":"en-us_topic_0000001528379297.html",
+ "node_id":"en-us_topic_0000001528379297.xml",
"product_code":"css",
- "code":"35",
+ "code":"29",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"(Optional) Interconnecting with a Dedicated Load Balancer",
@@ -713,20 +593,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"(Optional) Interconnecting with a Dedicated Load Balancer",
"githuburl":""
},
{
- "uri":"css_01_0181.html",
- "node_id":"css_01_0181.xml",
+ "uri":"en-us_topic_0000001477739400.html",
+ "node_id":"en-us_topic_0000001477739400.xml",
"product_code":"css",
- "code":"36",
+ "code":"30",
"des":"CSS integrates shared load balancers and allows you to bind public network access and enable the VPC Endpoint service. Dedicated load balancers provide more functions and",
"doc_type":"usermanual",
"kw":"Scenario,(Optional) Interconnecting with a Dedicated Load Balancer,User Guide",
@@ -734,20 +614,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Scenario",
"githuburl":""
},
{
- "uri":"css_01_0182.html",
- "node_id":"css_01_0182.xml",
+ "uri":"en-us_topic_0000001477739348.html",
+ "node_id":"en-us_topic_0000001477739348.xml",
"product_code":"css",
- "code":"37",
+ "code":"31",
"des":"This section describes how to connect a CSS cluster to a dedicated load balancer.If the target ELB listener uses the HTTP protocol, skip this step.Prepare and upload a se",
"doc_type":"usermanual",
"kw":"Connecting to a Dedicated Load Balancer,(Optional) Interconnecting with a Dedicated Load Balancer,Us",
@@ -755,20 +635,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Connecting to a Dedicated Load Balancer",
"githuburl":""
},
{
- "uri":"css_01_0183.html",
- "node_id":"css_01_0183.xml",
+ "uri":"en-us_topic_0000001477419788.html",
+ "node_id":"en-us_topic_0000001477419788.xml",
"product_code":"css",
- "code":"38",
+ "code":"32",
"des":"This section provides the sample code for two-way authentication during the access to a cluster from a Java client.",
"doc_type":"usermanual",
"kw":"Sample Code for Two-Way Authentication During the Access to a Cluster,(Optional) Interconnecting wit",
@@ -776,20 +656,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Sample Code for Two-Way Authentication During the Access to a Cluster",
"githuburl":""
},
{
- "uri":"css_01_0269.html",
- "node_id":"css_01_0269.xml",
+ "uri":"en-us_topic_0000001633221741.html",
+ "node_id":"en-us_topic_0000001633221741.xml",
"product_code":"css",
- "code":"39",
+ "code":"33",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Index Backup and Restoration",
@@ -797,20 +677,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Index Backup and Restoration",
"githuburl":""
},
{
- "uri":"css_01_0033.html",
- "node_id":"css_01_0033.xml",
+ "uri":"en-us_topic_0000001633303977.html",
+ "node_id":"en-us_topic_0000001633303977.xml",
"product_code":"css",
- "code":"40",
+ "code":"34",
"des":"You can back up index data in clusters. If data loss occurs or you want to retrieve data of a specified duration, you can restore the index data. Index backup is implemen",
"doc_type":"usermanual",
"kw":"Backup and Restoration Overview,Index Backup and Restoration,User Guide",
@@ -818,41 +698,41 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Backup and Restoration Overview",
"githuburl":""
},
{
- "uri":"css_01_0267.html",
- "node_id":"css_01_0267.xml",
+ "uri":"en-us_topic_0000001583460750.html",
+ "node_id":"en-us_topic_0000001583460750.xml",
"product_code":"css",
- "code":"41",
- "des":"Snapshots are automatically created at a specified time each day according to the rules you create. You can enable or disable the automatic snapshot creation function and",
+ "code":"35",
+ "des":"Snapshots are automatically created at a specified time according to the rules you create. You can enable or disable the automatic snapshot creation function and set the ",
"doc_type":"usermanual",
"kw":"Managing Automatic Snapshot Creation,Index Backup and Restoration,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Managing Automatic Snapshot Creation",
"githuburl":""
},
{
- "uri":"css_01_0268.html",
- "node_id":"css_01_0268.xml",
+ "uri":"en-us_topic_0000001633220693.html",
+ "node_id":"en-us_topic_0000001633220693.xml",
"product_code":"css",
- "code":"42",
+ "code":"36",
"des":"You can manually create a snapshot at any time to back up all data or data of specified indexes.To use the function of creating or restoring snapshots, the account or IAM",
"doc_type":"usermanual",
"kw":"Manually Creating a Snapshot,Index Backup and Restoration,User Guide",
@@ -860,20 +740,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Manually Creating a Snapshot",
"githuburl":""
},
{
- "uri":"css_01_0266.html",
- "node_id":"css_01_0266.xml",
+ "uri":"en-us_topic_0000001583300810.html",
+ "node_id":"en-us_topic_0000001583300810.xml",
"product_code":"css",
- "code":"43",
+ "code":"37",
"des":"You can use existing snapshots to restore the backup index data to a specified cluster.To use the function of creating or restoring snapshots, the account or IAM user log",
"doc_type":"usermanual",
"kw":"Restoring Data,Index Backup and Restoration,User Guide",
@@ -881,20 +761,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Restoring Data",
"githuburl":""
},
{
- "uri":"css_01_0271.html",
- "node_id":"css_01_0271.xml",
+ "uri":"en-us_topic_0000001583146906.html",
+ "node_id":"en-us_topic_0000001583146906.xml",
"product_code":"css",
- "code":"44",
+ "code":"38",
"des":"If you no longer need a snapshot, delete it to release storage resources. If the automatic snapshot creation function is enabled, snapshots that are automatically created",
"doc_type":"usermanual",
"kw":"Deleting a Snapshot,Index Backup and Restoration,User Guide",
@@ -902,293 +782,587 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Deleting a Snapshot",
"githuburl":""
},
{
- "uri":"css_01_0149.html",
- "node_id":"css_01_0149.xml",
+ "uri":"en-us_topic_0000001477739404.html",
+ "node_id":"en-us_topic_0000001477739404.xml",
"product_code":"css",
- "code":"45",
+ "code":"39",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
- "kw":"Changing the Elasticsearch Cluster Form",
+ "kw":"Cluster Specification Modification",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
- "title":"Changing the Elasticsearch Cluster Form",
+ "title":"Cluster Specification Modification",
"githuburl":""
},
{
- "uri":"css_01_0150.html",
- "node_id":"css_01_0150.xml",
+ "uri":"en-us_topic_0000001528379253.html",
+ "node_id":"en-us_topic_0000001528379253.xml",
"product_code":"css",
- "code":"46",
+ "code":"40",
"des":"You can scale in or out a cluster and change cluster specifications. In this way, you can improve cluster efficiency and reduce O&M costs.Scaling Out a ClusterIf a data n",
"doc_type":"usermanual",
- "kw":"Overview,Changing the Elasticsearch Cluster Form,User Guide",
+ "kw":"Overview,Cluster Specification Modification,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Overview",
"githuburl":""
},
{
- "uri":"css_01_0151.html",
- "node_id":"css_01_0151.xml",
+ "uri":"en-us_topic_0000001477899164.html",
+ "node_id":"en-us_topic_0000001477899164.xml",
"product_code":"css",
- "code":"47",
+ "code":"41",
"des":"If the workloads on the data plane of a cluster change, you can scale out the cluster by increasing the number or capacity of its nodes. Services are not interrupted duri",
"doc_type":"usermanual",
- "kw":"Scaling Out a Cluster,Changing the Elasticsearch Cluster Form,User Guide",
+ "kw":"Scaling Out a Cluster,Cluster Specification Modification,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Scaling Out a Cluster",
"githuburl":""
},
{
- "uri":"css_01_0152.html",
- "node_id":"css_01_0152.xml",
+ "uri":"en-us_topic_0000001477739368.html",
+ "node_id":"en-us_topic_0000001477739368.xml",
"product_code":"css",
- "code":"48",
+ "code":"42",
"des":"If the workloads on the data plane of a cluster change, you can change its node specifications as needed.The target cluster is available and has no tasks in progress.The ",
"doc_type":"usermanual",
- "kw":"Changing Specifications,Changing the Elasticsearch Cluster Form,User Guide",
+ "kw":"Changing Specifications,Cluster Specification Modification,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Changing Specifications",
"githuburl":""
},
{
- "uri":"css_01_0153.html",
- "node_id":"css_01_0153.xml",
+ "uri":"en-us_topic_0000001528299597.html",
+ "node_id":"en-us_topic_0000001528299597.xml",
"product_code":"css",
- "code":"49",
- "des":"If a cluster can process existing data without fully using its resources, you can scale in the cluster to reduce costs. Services are not interrupted during cluster scale-",
+ "code":"43",
+ "des":"If a cluster can process existing data without fully using its resources, you can scale in the cluster to reduce costs. You are advised to scale in clusters during off-pe",
"doc_type":"usermanual",
- "kw":"Scaling in a Cluster,Changing the Elasticsearch Cluster Form,User Guide",
+ "kw":"Scaling in a Cluster,Cluster Specification Modification,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Scaling in a Cluster",
"githuburl":""
},
{
- "uri":"css_01_0154.html",
- "node_id":"css_01_0154.xml",
+ "uri":"en-us_topic_0000001477899184.html",
+ "node_id":"en-us_topic_0000001477899184.xml",
"product_code":"css",
- "code":"50",
+ "code":"44",
"des":"If a cluster can process existing data without fully using its nodes, you can remove one or more specified nodes from the cluster to reduce costs. Services will not be in",
"doc_type":"usermanual",
- "kw":"Removing Specified Nodes,Changing the Elasticsearch Cluster Form,User Guide",
+ "kw":"Removing Specified Nodes,Cluster Specification Modification,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Removing Specified Nodes",
"githuburl":""
},
{
- "uri":"css_01_0156.html",
- "node_id":"css_01_0156.xml",
+ "uri":"en-us_topic_0000001477579404.html",
+ "node_id":"en-us_topic_0000001477579404.xml",
"product_code":"css",
- "code":"51",
- "des":"If a node in the cluster is faulty, you can create a new node with the same specifications to replace it.The target cluster is available and has no tasks in progress.Only",
+ "code":"45",
+ "des":"If a node in the cluster is faulty, you can create a new node with the same specifications to replace it. During the replacement of a specified node, data of that node wi",
"doc_type":"usermanual",
- "kw":"Replacing a Specified Node,Changing the Elasticsearch Cluster Form,User Guide",
+ "kw":"Replacing a Specified Node,Cluster Specification Modification,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Replacing a Specified Node",
"githuburl":""
},
{
- "uri":"css_01_0157.html",
- "node_id":"css_01_0157.xml",
+ "uri":"en-us_topic_0000001477899188.html",
+ "node_id":"en-us_topic_0000001477899188.xml",
"product_code":"css",
- "code":"52",
+ "code":"46",
"des":"If workloads on the data plane of a cluster increase, you can add master or client nodes as needed. Services are not interrupted while they are added.The target cluster i",
"doc_type":"usermanual",
- "kw":"Adding Master/Client Nodes,Changing the Elasticsearch Cluster Form,User Guide",
+ "kw":"Adding Master/Client Nodes,Cluster Specification Modification,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Adding Master/Client Nodes",
"githuburl":""
},
{
- "uri":"css_01_0158.html",
- "node_id":"css_01_0158.xml",
+ "uri":"en-us_topic_0000001528379285.html",
+ "node_id":"en-us_topic_0000001528379285.xml",
"product_code":"css",
- "code":"53",
+ "code":"47",
"des":"After a cluster is created, its security mode can be changed using the following methods:Switching from the Non-Security Mode to Security ModeSwitching from the Security ",
"doc_type":"usermanual",
- "kw":"Changing the Security Mode,Changing the Elasticsearch Cluster Form,User Guide",
+ "kw":"Changing the Security Mode,Cluster Specification Modification,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Changing the Security Mode",
"githuburl":""
},
{
- "uri":"css_01_0201.html",
- "node_id":"css_01_0201.xml",
+ "uri":"en-us_topic_0000001528299585.html",
+ "node_id":"en-us_topic_0000001528299585.xml",
"product_code":"css",
- "code":"54",
+ "code":"48",
"des":"CSS supports cross-AZ deployment. You can add an AZ to obtain more resources or improve cluster availability, and can migrate your current AZ to one with higher specifica",
"doc_type":"usermanual",
- "kw":"Changing AZs,Changing the Elasticsearch Cluster Form,User Guide",
+ "kw":"Changing AZs,Cluster Specification Modification,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Changing AZs",
"githuburl":""
},
{
- "uri":"css_01_0211.html",
- "node_id":"css_01_0211.xml",
+ "uri":"en-us_topic_0000001528659153.html",
+ "node_id":"en-us_topic_0000001528659153.xml",
"product_code":"css",
- "code":"55",
- "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "code":"49",
+ "des":"Same-version upgrade, cross-engine upgrade, and cross-version upgrade are supported. Same-version upgrade is to upgrade the kernel patch of a cluster to fix problems or o",
"doc_type":"usermanual",
- "kw":"Configuring an Elasticsearch Cluster",
+ "kw":"Upgrading the Cluster Version,Elasticsearch,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
- "title":"Configuring an Elasticsearch Cluster",
+ "title":"Upgrading the Cluster Version",
"githuburl":""
},
{
- "uri":"css_01_0080.html",
- "node_id":"css_01_0080.xml",
+ "uri":"en-us_topic_0000001477579340.html",
+ "node_id":"en-us_topic_0000001477579340.xml",
"product_code":"css",
- "code":"56",
- "des":"You can modify the elasticsearch.yml file.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, click the name of the target ",
+ "code":"50",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
- "kw":"Configuring YML Parameters,Configuring an Elasticsearch Cluster,User Guide",
+ "kw":"Cluster Management",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Cluster Management",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001528299613.html",
+ "node_id":"en-us_topic_0000001528299613.xml",
+ "product_code":"css",
+ "code":"51",
+ "des":"The cluster list displays all CSS clusters. If there are a large number of clusters, these clusters will be displayed on multiple pages. You can view clusters of all stat",
+ "doc_type":"usermanual",
+ "kw":"Cluster List Overview,Cluster Management,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Cluster List Overview",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001528499201.html",
+ "node_id":"en-us_topic_0000001528499201.xml",
+ "product_code":"css",
+ "code":"52",
+ "des":"On the Cluster Information page, you can view the information about a cluster, including the private network address, public IP address, version, and node.Log in to the C",
+ "doc_type":"usermanual",
+ "kw":"Viewing Basic Cluster Information,Cluster Management,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Viewing Basic Cluster Information",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001528659137.html",
+ "node_id":"en-us_topic_0000001528659137.xml",
+ "product_code":"css",
+ "code":"53",
+ "des":"Tags are cluster identifiers. Adding tags to clusters can help you identify and manage your cluster resources.You can add tags to a cluster when creating the cluster or a",
+ "doc_type":"usermanual",
+ "kw":"Managing Tags,Cluster Management,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Managing Tags",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001477739336.html",
+ "node_id":"en-us_topic_0000001477739336.xml",
+ "product_code":"css",
+ "code":"54",
+ "des":"CSS provides log backup and search functions to help you locate faults. You can back up cluster logs to OBS buckets and download required log files to analyze and locate ",
+ "doc_type":"usermanual",
+ "kw":"Managing Logs,Cluster Management,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Managing Logs",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001528299601.html",
+ "node_id":"en-us_topic_0000001528299601.xml",
+ "product_code":"css",
+ "code":"55",
+ "des":"You can modify the elasticsearch.yml file.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, click the name of the target ",
+ "doc_type":"usermanual",
+ "kw":"Configuring YML Parameters,Cluster Management,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Configuring YML Parameters",
"githuburl":""
},
{
- "uri":"css_01_0079.html",
- "node_id":"css_01_0079.xml",
+ "uri":"en-us_topic_0000001477739388.html",
+ "node_id":"en-us_topic_0000001477739388.xml",
"product_code":"css",
- "code":"57",
- "des":"CSS provides you with cold data nodes. You can store data that requires query response in seconds on high-performance nodes and store data that requires query response in",
+ "code":"56",
+ "des":"CSS clusters have default plugins. You can view the default plugin information on the console or Kibana.Log in to the CSS management console.In the navigation pane, choos",
"doc_type":"usermanual",
- "kw":"Hot and Cold Data Node Switchover,Configuring an Elasticsearch Cluster,User Guide",
+ "kw":"Viewing the Default Plugin List,Cluster Management,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
- "title":"Hot and Cold Data Node Switchover",
+ "title":"Viewing the Default Plugin List",
"githuburl":""
},
{
- "uri":"css_01_0091.html",
- "node_id":"css_01_0091.xml",
+ "uri":"en-us_topic_0000001528299621.html",
+ "node_id":"en-us_topic_0000001528299621.xml",
+ "product_code":"css",
+ "code":"57",
+ "des":"You can create enterprise projects based on your organizational structure. Then you can manage resources across different regions by enterprise project, add users and use",
+ "doc_type":"usermanual",
+ "kw":"Binding an Enterprise Project,Cluster Management,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Binding an Enterprise Project",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001528499141.html",
+ "node_id":"en-us_topic_0000001528499141.xml",
"product_code":"css",
"code":"58",
+ "des":"If a cluster becomes faulty, you can restart it to check if it can run normally.The target cluster is not frozen and has no task in progress.If a cluster is available, en",
+ "doc_type":"usermanual",
+ "kw":"Restarting a Cluster,Cluster Management,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Restarting a Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001477579396.html",
+ "node_id":"en-us_topic_0000001477579396.xml",
+ "product_code":"css",
+ "code":"59",
+ "des":"You can delete clusters that you no longer need.If you delete a cluster, the cluster service data will be cleared. Exercise caution when performing this operation.The sna",
+ "doc_type":"usermanual",
+ "kw":"Deleting a Cluster,Cluster Management,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Deleting a Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001528299581.html",
+ "node_id":"en-us_topic_0000001528299581.xml",
+ "product_code":"css",
+ "code":"60",
+ "des":"In the Failed Tasks dialog box, you can view the failed tasks related to a cluster, such as failing to create, restart, scale out, back up, or restore a cluster. In addit",
+ "doc_type":"usermanual",
+ "kw":"Managing Failed Tasks,Cluster Management,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Managing Failed Tasks",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001477899212.html",
+ "node_id":"en-us_topic_0000001477899212.xml",
+ "product_code":"css",
+ "code":"61",
+ "des":"For Elasticsearch 6.5.4 and later versions, Open Distro for Elasticsearch SQL lets you write queries in SQL rather than in the Elasticsearch query domain-specific languag",
+ "doc_type":"usermanual",
+ "kw":"Using the Open Distro SQL Plug-in to Compile Queries,Elasticsearch,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Using the Open Distro SQL Plug-in to Compile Queries",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001866261281.html",
+ "node_id":"en-us_topic_0000001866261281.xml",
+ "product_code":"css",
+ "code":"62",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Using the Open Distro Alarm Plug-in to Configure SMN Alarms",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Using the Open Distro Alarm Plug-in to Configure SMN Alarms",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001564706853.html",
+ "node_id":"en-us_topic_0000001564706853.xml",
+ "product_code":"css",
+ "code":"63",
+ "des":"To use the OpenDistro alarm plugin (opendistro_alerting), authorize your Elasticsearch cluster to use SMN to send notifications. For details about how to use the OpenDist",
+ "doc_type":"usermanual",
+ "kw":"(Optional) Authorizing CSS to Use SMN,Using the Open Distro Alarm Plug-in to Configure SMN Alarms,Us",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"(Optional) Authorizing CSS to Use SMN",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001564906577.html",
+ "node_id":"en-us_topic_0000001564906577.xml",
+ "product_code":"css",
+ "code":"64",
+ "des":"By default, the open-source OpenDistro alarm plugin (opendistro_alerting) is integrated into CSS to send notifications when data meets specific conditions. This plugin co",
+ "doc_type":"usermanual",
+ "kw":"Configuring SMN Alarms,Using the Open Distro Alarm Plug-in to Configure SMN Alarms,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Configuring SMN Alarms",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001528659081.html",
+ "node_id":"en-us_topic_0000001528659081.xml",
+ "product_code":"css",
+ "code":"65",
+ "des":"CSS provides you with cold data nodes. You can store data that requires query response in seconds on high-performance nodes and store data that requires query response in",
+ "doc_type":"usermanual",
+ "kw":"Switching Hot and Cold Data,Elasticsearch,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Switching Hot and Cold Data",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001477579380.html",
+ "node_id":"en-us_topic_0000001477579380.xml",
+ "product_code":"css",
+ "code":"66",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Managing Indexes",
@@ -1196,20 +1370,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Managing Indexes",
"githuburl":""
},
{
- "uri":"css_01_0093.html",
- "node_id":"css_01_0093.xml",
+ "uri":"en-us_topic_0000001477739392.html",
+ "node_id":"en-us_topic_0000001477739392.xml",
"product_code":"css",
- "code":"59",
+ "code":"67",
"des":"Clusters of version 7.6.2 or later support index status management. ISM is a plugin that allows you to automate periodic and administrative operations based on changes on",
"doc_type":"usermanual",
"kw":"Creating and Managing Indexes,Managing Indexes,User Guide",
@@ -1217,20 +1391,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Creating and Managing Indexes",
"githuburl":""
},
{
- "uri":"css_01_0092.html",
- "node_id":"css_01_0092.xml",
+ "uri":"en-us_topic_0000001528659085.html",
+ "node_id":"en-us_topic_0000001528659085.xml",
"product_code":"css",
- "code":"60",
+ "code":"68",
"des":"You can change any managed index policy. ISM has constraints to ensure that policy changes do not break indexes.If an index is stuck in its current status, never proceedi",
"doc_type":"usermanual",
"kw":"Changing Policies,Managing Indexes,User Guide",
@@ -1238,104 +1412,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Changing Policies",
"githuburl":""
},
{
- "uri":"css_01_0077.html",
- "node_id":"css_01_0077.xml",
+ "uri":"en-us_topic_0000001528299625.html",
+ "node_id":"en-us_topic_0000001528299625.xml",
"product_code":"css",
- "code":"61",
- "des":"CSS provides log backup and search functions to help you locate faults. You can back up cluster logs to OBS buckets and download required log files to analyze and locate ",
- "doc_type":"usermanual",
- "kw":"Managing Logs,Elasticsearch,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Managing Logs",
- "githuburl":""
- },
- {
- "uri":"css_01_0212.html",
- "node_id":"css_01_0212.xml",
- "product_code":"css",
- "code":"62",
- "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "doc_type":"usermanual",
- "kw":"Managing Plugins",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Managing Plugins",
- "githuburl":""
- },
- {
- "uri":"css_01_0078.html",
- "node_id":"css_01_0078.xml",
- "product_code":"css",
- "code":"63",
- "des":"CSS clusters have default plugins. You can view the default plugin information on the console or Kibana.Log in to the CSS management console.In the navigation pane, choos",
- "doc_type":"usermanual",
- "kw":"Viewing the Default Plugin List,Managing Plugins,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Viewing the Default Plugin List",
- "githuburl":""
- },
- {
- "uri":"css_01_0061.html",
- "node_id":"css_01_0061.xml",
- "product_code":"css",
- "code":"64",
- "des":"For Elasticsearch 6.5.4 and later versions, Open Distro for Elasticsearch SQL lets you write queries in SQL rather than in the Elasticsearch query domain-specific languag",
- "doc_type":"usermanual",
- "kw":"Using the Open Distro SQL Plugin,Managing Plugins,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Using the Open Distro SQL Plugin",
- "githuburl":""
- },
- {
- "uri":"css_01_0107.html",
- "node_id":"css_01_0107.xml",
- "product_code":"css",
- "code":"65",
+ "code":"69",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Kibana Platform",
@@ -1343,62 +1433,41 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Kibana Platform",
"githuburl":""
},
{
- "uri":"css_01_0125.html",
- "node_id":"css_01_0125.xml",
+ "uri":"en-us_topic_0000001477419728.html",
+ "node_id":"en-us_topic_0000001477419728.xml",
"product_code":"css",
- "code":"66",
- "des":"You can customize the username, role name, and tenant name in Kibana.",
- "doc_type":"usermanual",
- "kw":"Kibana Usage Restrictions,Kibana Platform,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Kibana Usage Restrictions",
- "githuburl":""
- },
- {
- "uri":"css_01_0108.html",
- "node_id":"css_01_0108.xml",
- "product_code":"css",
- "code":"67",
- "des":"After creating a CSS cluster, you can log in to Kibana through the console or public network.Logging in to the consoleLog in to the CSS management console.On the Clusters",
+ "code":"70",
+ "des":"After creating a CSS cluster, you can log in to Kibana through the console or public network.You can customize the username, role name, and tenant name in Kibana.Logging ",
"doc_type":"usermanual",
"kw":"Logging In to Kibana,Kibana Platform,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Logging In to Kibana",
"githuburl":""
},
{
- "uri":"css_01_0088.html",
- "node_id":"css_01_0088.xml",
+ "uri":"en-us_topic_0000001477419764.html",
+ "node_id":"en-us_topic_0000001477419764.xml",
"product_code":"css",
- "code":"68",
+ "code":"71",
"des":"For CSS clusters that have security mode enabled, you can enable Kibana public access. After the configuration is complete, an IP address will be provided to access Kiban",
"doc_type":"usermanual",
"kw":"Accessing a Cluster from a Kibana Public Network,Kibana Platform,User Guide",
@@ -1406,20 +1475,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Accessing a Cluster from a Kibana Public Network",
"githuburl":""
},
{
- "uri":"css_01_0109.html",
- "node_id":"css_01_0109.xml",
+ "uri":"en-us_topic_0000001528379273.html",
+ "node_id":"en-us_topic_0000001528379273.xml",
"product_code":"css",
- "code":"69",
+ "code":"72",
"des":"CSS uses the opendistro_security plug-in to provide security cluster capabilities. The opendistro_security plug-in is built based on the RBAC model. RBAC involves three c",
"doc_type":"usermanual",
"kw":"Creating a User and Granting Permissions by Using Kibana,Kibana Platform,User Guide",
@@ -1427,20 +1496,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Creating a User and Granting Permissions by Using Kibana",
"githuburl":""
},
{
- "uri":"css_02_0097.html",
- "node_id":"css_02_0097.xml",
+ "uri":"en-us_topic_0000001477419776.html",
+ "node_id":"en-us_topic_0000001477419776.xml",
"product_code":"css",
- "code":"70",
+ "code":"73",
"des":"To interconnect user-built Kibana with CSS Elasticsearch clusters, the following conditions must be met:The local environment must support access from external networks.K",
"doc_type":"usermanual",
"kw":"Connecting User-Built Kibana to an Elasticsearch Cluster,Kibana Platform,User Guide",
@@ -1448,20 +1517,944 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Connecting User-Built Kibana to an Elasticsearch Cluster",
"githuburl":""
},
{
- "uri":"css_01_0111.html",
- "node_id":"css_01_0111.xml",
+ "uri":"en-us_topic_0000001633949601.html",
+ "node_id":"en-us_topic_0000001633949601.xml",
"product_code":"css",
- "code":"71",
+ "code":"74",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"OpenSearch",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"OpenSearch",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001584149004.html",
+ "node_id":"en-us_topic_0000001584149004.xml",
+ "product_code":"css",
+ "code":"75",
+ "des":"This section describes how to create an OpenSearch cluster.Public IP address access and Kibana public access can be used only after security mode is enabled.When creating",
+ "doc_type":"usermanual",
+ "kw":"Creating a Cluster,OpenSearch,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "IsMulti":"No",
+ "opensource":"true",
+ "documenttype":"usermanual",
+ "IsBot":"Yes"
+ }
+ ],
+ "title":"Creating a Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001583669892.html",
+ "node_id":"en-us_topic_0000001583669892.xml",
+ "product_code":"css",
+ "code":"76",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Accessing a Cluster",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Accessing a Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001641003025.html",
+ "node_id":"en-us_topic_0000001641003025.xml",
+ "product_code":"css",
+ "code":"77",
+ "des":"OpenSearch clusters have built-in Kibana and Cerebro components. You can quickly access an OpenSearch cluster through Kibana and Cerebro.Log in to the CSS management cons",
+ "doc_type":"usermanual",
+ "kw":"Quickly Accessing an OpenSearch Cluster,Accessing a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Quickly Accessing an OpenSearch Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001590963076.html",
+ "node_id":"en-us_topic_0000001590963076.xml",
+ "product_code":"css",
+ "code":"78",
+ "des":"You can access a security cluster that has the HTTPS access enabled through the public IP address provided by the system.By default, CSS uses a shared load balancer for p",
+ "doc_type":"usermanual",
+ "kw":"Accessing a Cluster from a Public Network,Accessing a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Accessing a Cluster from a Public Network",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001590323656.html",
+ "node_id":"en-us_topic_0000001590323656.xml",
+ "product_code":"css",
+ "code":"79",
+ "des":"If the VPC endpoint service is enabled, you can use a private domain name or node IP address generated by the endpoint to access the cluster. When the VPC endpoint servic",
+ "doc_type":"usermanual",
+ "kw":"Accessing a Cluster Using a VPC Endpoint,Accessing a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Accessing a Cluster Using a VPC Endpoint",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001591285452.html",
+ "node_id":"en-us_topic_0000001591285452.xml",
+ "product_code":"css",
+ "code":"80",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"(Optional) Interconnecting with a Dedicated Load Balancer",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"(Optional) Interconnecting with a Dedicated Load Balancer",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640645481.html",
+ "node_id":"en-us_topic_0000001640645481.xml",
+ "product_code":"css",
+ "code":"81",
+ "des":"CSS integrates shared load balancers and allows you to bind public network access and enable the VPC Endpoint service. Dedicated load balancers provide more functions and",
+ "doc_type":"usermanual",
+ "kw":"Scenario Description,(Optional) Interconnecting with a Dedicated Load Balancer,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Scenario Description",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640764229.html",
+ "node_id":"en-us_topic_0000001640764229.xml",
+ "product_code":"css",
+ "code":"82",
+ "des":"This section describes how to connect a CSS cluster to a dedicated load balancer.If the target ELB listener uses the HTTP protocol, skip this step.Prepare and upload a se",
+ "doc_type":"usermanual",
+ "kw":"Connecting to a Dedicated Load Balancer,(Optional) Interconnecting with a Dedicated Load Balancer,Us",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Connecting to a Dedicated Load Balancer",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001590603388.html",
+ "node_id":"en-us_topic_0000001590603388.xml",
+ "product_code":"css",
+ "code":"83",
+ "des":"This section provides the sample code for two-way authentication during the access to a cluster from a Java client.",
+ "doc_type":"usermanual",
+ "kw":"Sample Code for Two-Way Authentication During the Access to a Cluster,(Optional) Interconnecting wit",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Sample Code for Two-Way Authentication During the Access to a Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640883633.html",
+ "node_id":"en-us_topic_0000001640883633.xml",
+ "product_code":"css",
+ "code":"84",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Index Backup and Restoration",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Index Backup and Restoration",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001641003029.html",
+ "node_id":"en-us_topic_0000001641003029.xml",
+ "product_code":"css",
+ "code":"85",
+ "des":"You can back up index data in clusters. If data loss occurs or you want to retrieve data of a specified duration, you can restore the index data. Index backup is implemen",
+ "doc_type":"usermanual",
+ "kw":"Backup and Restoration Overview,Index Backup and Restoration,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Backup and Restoration Overview",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001590963080.html",
+ "node_id":"en-us_topic_0000001590963080.xml",
+ "product_code":"css",
+ "code":"86",
+ "des":"Snapshots are automatically created at a specified time each day according to the rules you create. You can enable or disable the automatic snapshot creation function and",
+ "doc_type":"usermanual",
+ "kw":"Managing Automatic Snapshot Creation,Index Backup and Restoration,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Managing Automatic Snapshot Creation",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001590323664.html",
+ "node_id":"en-us_topic_0000001590323664.xml",
+ "product_code":"css",
+ "code":"87",
+ "des":"You can manually create a snapshot at any time to back up all data or data of specified indexes.To use the function of creating or restoring snapshots, the account or IAM",
+ "doc_type":"usermanual",
+ "kw":"Manually Creating a Snapshot,Index Backup and Restoration,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Manually Creating a Snapshot",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001591285456.html",
+ "node_id":"en-us_topic_0000001591285456.xml",
+ "product_code":"css",
+ "code":"88",
+ "des":"You can use existing snapshots to restore the backup index data to a specified cluster.To use the function of creating or restoring snapshots, the account or IAM user log",
+ "doc_type":"usermanual",
+ "kw":"Restoring Data,Index Backup and Restoration,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Restoring Data",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640645485.html",
+ "node_id":"en-us_topic_0000001640645485.xml",
+ "product_code":"css",
+ "code":"89",
+ "des":"If you no longer need a snapshot, delete it to release storage resources. If the automatic snapshot creation function is enabled, snapshots that are automatically created",
+ "doc_type":"usermanual",
+ "kw":"Deleting a Snapshot,Index Backup and Restoration,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Deleting a Snapshot",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001637436445.html",
+ "node_id":"en-us_topic_0000001637436445.xml",
+ "product_code":"css",
+ "code":"90",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Scaling In/Out a Cluster",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Scaling In/Out a Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640773493.html",
+ "node_id":"en-us_topic_0000001640773493.xml",
+ "product_code":"css",
+ "code":"91",
+ "des":"You can scale in or out a cluster and change cluster specifications. In this way, you can improve cluster efficiency and reduce O&M costs.Scaling Out a ClusterIf a data n",
+ "doc_type":"usermanual",
+ "kw":"Overview,Scaling In/Out a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Overview",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001590332948.html",
+ "node_id":"en-us_topic_0000001590332948.xml",
+ "product_code":"css",
+ "code":"92",
+ "des":"If the workloads on the data plane of a cluster change, you can scale out the cluster by increasing the number or capacity of its nodes. Services are not interrupted duri",
+ "doc_type":"usermanual",
+ "kw":"Scaling Out a Cluster,Scaling In/Out a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Scaling Out a Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001641012329.html",
+ "node_id":"en-us_topic_0000001641012329.xml",
+ "product_code":"css",
+ "code":"93",
+ "des":"If the workloads on the data plane of a cluster change, you can change its node specifications as needed.The target cluster is available and has no tasks in progress.The ",
+ "doc_type":"usermanual",
+ "kw":"Changing Specifications,Scaling In/Out a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Changing Specifications",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001590972372.html",
+ "node_id":"en-us_topic_0000001590972372.xml",
+ "product_code":"css",
+ "code":"94",
+ "des":"If a cluster can process existing data without fully using its resources, you can scale in the cluster to reduce costs. You are advised to scale in clusters during off-pe",
+ "doc_type":"usermanual",
+ "kw":"Scaling in a Cluster,Scaling In/Out a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Scaling in a Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001590612676.html",
+ "node_id":"en-us_topic_0000001590612676.xml",
+ "product_code":"css",
+ "code":"95",
+ "des":"If a cluster can process existing data without fully using its nodes, you can remove one or more specified nodes from the cluster to reduce costs. Services will not be in",
+ "doc_type":"usermanual",
+ "kw":"Removing Specified Nodes,Scaling In/Out a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Removing Specified Nodes",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640892937.html",
+ "node_id":"en-us_topic_0000001640892937.xml",
+ "product_code":"css",
+ "code":"96",
+ "des":"If a node in the cluster is faulty, you can create a new node with the same specifications to replace it.The target cluster is available and has no tasks in progress.Only",
+ "doc_type":"usermanual",
+ "kw":"Replacing a Specified Node,Scaling In/Out a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Replacing a Specified Node",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640654793.html",
+ "node_id":"en-us_topic_0000001640654793.xml",
+ "product_code":"css",
+ "code":"97",
+ "des":"If workloads on the data plane of a cluster increase, you can add master or client nodes as needed. Services are not interrupted while they are added.The cluster is in th",
+ "doc_type":"usermanual",
+ "kw":"Adding Master/Client Nodes,Scaling In/Out a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Adding Master/Client Nodes",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001591294758.html",
+ "node_id":"en-us_topic_0000001591294758.xml",
+ "product_code":"css",
+ "code":"98",
+ "des":"After a cluster is created, its security mode can be changed in the following methods:Switching from the Non-Security Mode to Security ModeSwitching from the Security to ",
+ "doc_type":"usermanual",
+ "kw":"Changing the Security Mode,Scaling In/Out a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Changing the Security Mode",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640773505.html",
+ "node_id":"en-us_topic_0000001640773505.xml",
+ "product_code":"css",
+ "code":"99",
+ "des":"CSS supports cross-AZ deployment. You can add an AZ to obtain more resources or improve cluster availability, and can migrate your current AZ to one with higher specifica",
+ "doc_type":"usermanual",
+ "kw":"Changing AZs,Scaling In/Out a Cluster,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Changing AZs",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001583989096.html",
+ "node_id":"en-us_topic_0000001583989096.xml",
+ "product_code":"css",
+ "code":"100",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Managing Clusters",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Managing Clusters",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001583669884.html",
+ "node_id":"en-us_topic_0000001583669884.xml",
+ "product_code":"css",
+ "code":"101",
+ "des":"On the basic information page of an Opensearch cluster, you can view the private network address, public network address, version, and node of the cluster.Log in to the C",
+ "doc_type":"usermanual",
+ "kw":"Viewing Basic Information About an Opensearch Cluster,Managing Clusters,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Viewing Basic Information About an Opensearch Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001647464345.html",
+ "node_id":"en-us_topic_0000001647464345.xml",
+ "product_code":"css",
+ "code":"102",
+ "des":"Tags are cluster identifiers. Adding tags to clusters can help you identify and manage your cluster resources.You can add tags to a cluster when creating the cluster or a",
+ "doc_type":"usermanual",
+ "kw":"Managing Tags,Managing Clusters,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Managing Tags",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001591456866.html",
+ "node_id":"en-us_topic_0000001591456866.xml",
+ "product_code":"css",
+ "code":"103",
+ "des":"CSS provides log backup and search functions to help you locate faults. You can back up cluster logs to OBS buckets and download required log files to analyze and locate ",
+ "doc_type":"usermanual",
+ "kw":"Managing Logs,Managing Clusters,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Managing Logs",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640777441.html",
+ "node_id":"en-us_topic_0000001640777441.xml",
+ "product_code":"css",
+ "code":"104",
+ "des":"You can modify the elasticsearch.yml file.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, click the name of the target ",
+ "doc_type":"usermanual",
+ "kw":"Configuring YML Parameters,Managing Clusters,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Configuring YML Parameters",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001591616594.html",
+ "node_id":"en-us_topic_0000001591616594.xml",
+ "product_code":"css",
+ "code":"105",
+ "des":"CSS clusters have default plug-ins. You can view the default plugin information on the console or Kibana.Log in to the CSS management console.In the navigation pane, choo",
+ "doc_type":"usermanual",
+ "kw":"Viewing the Default Plugin List,Managing Clusters,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Viewing the Default Plugin List",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640879293.html",
+ "node_id":"en-us_topic_0000001640879293.xml",
+ "product_code":"css",
+ "code":"106",
+ "des":"If a cluster becomes faulty, you can restart it to check if it can run normally.The target cluster is not frozen and has no task in progress.If a cluster is available, en",
+ "doc_type":"usermanual",
+ "kw":"Restarting a Cluster,Managing Clusters,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Restarting a Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640998693.html",
+ "node_id":"en-us_topic_0000001640998693.xml",
+ "product_code":"css",
+ "code":"107",
+ "des":"You can delete clusters that you no longer need.If you delete a cluster, the cluster service data will be cleared. Exercise caution when performing this operation.The sna",
+ "doc_type":"usermanual",
+ "kw":"Deleting a Cluster,Managing Clusters,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Deleting a Cluster",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001815107861.html",
+ "node_id":"en-us_topic_0000001815107861.xml",
+ "product_code":"css",
+ "code":"108",
+ "des":"By default, CSS has installed the open-source alert plugin opensearch-alerting for OpenSearch clusters to send notifications when data meets specific conditions. This plu",
+ "doc_type":"usermanual",
+ "kw":"Configuring SMN Alarms,OpenSearch,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Configuring SMN Alarms",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001591776270.html",
+ "node_id":"en-us_topic_0000001591776270.xml",
+ "product_code":"css",
+ "code":"109",
+ "des":"CSS provides you with cold data nodes. You can store data that requires query response in seconds on hot data nodes with high performance and store historical data that r",
+ "doc_type":"usermanual",
+ "kw":"Switching Hot and Cold Data,OpenSearch,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Switching Hot and Cold Data",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640658697.html",
+ "node_id":"en-us_topic_0000001640658697.xml",
+ "product_code":"css",
+ "code":"110",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Managing Indexes",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Managing Indexes",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001591298678.html",
+ "node_id":"en-us_topic_0000001591298678.xml",
+ "product_code":"css",
+ "code":"111",
+ "des":"You can manage the indexes of OpenSearch clusters. ISM is a plugin that allows you to automate periodic and administrative operations based on changes on the index age, i",
+ "doc_type":"usermanual",
+ "kw":"Creating and Managing Index Policies,Managing Indexes,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Creating and Managing Index Policies",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001641016221.html",
+ "node_id":"en-us_topic_0000001641016221.xml",
+ "product_code":"css",
+ "code":"112",
+ "des":"You can change any managed index policy. ISM has constraints to ensure that policy changes do not break indexes.If an index is stuck in its current status and you want to",
+ "doc_type":"usermanual",
+ "kw":"Changing an Index Policy,Managing Indexes,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Changing an Index Policy",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001587956758.html",
+ "node_id":"en-us_topic_0000001587956758.xml",
+ "product_code":"css",
+ "code":"113",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"OpenSearch Dashboards",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"OpenSearch Dashboards",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001591776274.html",
+ "node_id":"en-us_topic_0000001591776274.xml",
+ "product_code":"css",
+ "code":"114",
+ "des":"An OpenSearch cluster has been created.Logging in to the consoleLog in to the CSS management console.In the navigation pane, choose Clusters > OpenSearch.On the Clusters ",
+ "doc_type":"usermanual",
+ "kw":"Logging In to the OpenSearch Dashboards,OpenSearch Dashboards,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Logging In to the OpenSearch Dashboards",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001640658701.html",
+ "node_id":"en-us_topic_0000001640658701.xml",
+ "product_code":"css",
+ "code":"115",
+ "des":"For CSS clusters that have security mode enabled, you can enable Kibana public access. After the configuration is complete, an IP address will be provided to access Kiban",
+ "doc_type":"usermanual",
+ "kw":"Accessing a Cluster from a Kibana Public Network,OpenSearch Dashboards,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Accessing a Cluster from a Kibana Public Network",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001591298682.html",
+ "node_id":"en-us_topic_0000001591298682.xml",
+ "product_code":"css",
+ "code":"116",
+ "des":"The security mode has been enabled for the OpenSearch cluster.You can customize the username, role name, and tenant name in the OpenSearch Dashboards.Log in to the CSS ma",
+ "doc_type":"usermanual",
+ "kw":"Creating and Authorizing a User on the OpenSearch Dashboards,OpenSearch Dashboards,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Creating and Authorizing a User on the OpenSearch Dashboards",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001477579368.html",
+ "node_id":"en-us_topic_0000001477579368.xml",
+ "product_code":"css",
+ "code":"117",
+ "des":"On the Dashboard page of the CSS management console, you can view information about the status and storage capacity of existing clusters.",
+ "doc_type":"usermanual",
+ "kw":"Viewing the Cluster Runtime Status and Storage Capacity Status,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Viewing the Cluster Runtime Status and Storage Capacity Status",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001528659089.html",
+ "node_id":"en-us_topic_0000001528659089.xml",
+ "product_code":"css",
+ "code":"118",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Enhanced Cluster Features",
@@ -1469,20 +2462,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Enhanced Cluster Features",
"githuburl":""
},
{
- "uri":"css_01_0117.html",
- "node_id":"css_01_0117.xml",
+ "uri":"en-us_topic_0000001477899160.html",
+ "node_id":"en-us_topic_0000001477899160.xml",
"product_code":"css",
- "code":"72",
+ "code":"119",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Vector Retrieval",
@@ -1490,20 +2483,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Vector Retrieval",
"githuburl":""
},
{
- "uri":"css_01_0118.html",
- "node_id":"css_01_0118.xml",
+ "uri":"en-us_topic_0000001528299617.html",
+ "node_id":"en-us_topic_0000001528299617.xml",
"product_code":"css",
- "code":"73",
+ "code":"120",
"des":"Image recognition and retrieval, video search, and personalized recommendation impose high requirements on the latency and accuracy of high-dimensional space vector retri",
"doc_type":"usermanual",
"kw":"Description,Vector Retrieval,User Guide",
@@ -1511,20 +2504,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Description",
"githuburl":""
},
{
- "uri":"css_01_0122.html",
- "node_id":"css_01_0122.xml",
+ "uri":"en-us_topic_0000001477419716.html",
+ "node_id":"en-us_topic_0000001477419716.xml",
"product_code":"css",
- "code":"74",
+ "code":"121",
"des":"Off-heap memory is used for index construction and query in vector retrieval. Therefore, the required cluster capacity is related to the index type and off-heap memory si",
"doc_type":"usermanual",
"kw":"Cluster Planning for Vector Retrieval,Vector Retrieval,User Guide",
@@ -1532,41 +2525,41 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Cluster Planning for Vector Retrieval",
"githuburl":""
},
{
- "uri":"css_01_0121.html",
- "node_id":"css_01_0121.xml",
+ "uri":"en-us_topic_0000001528299557.html",
+ "node_id":"en-us_topic_0000001528299557.xml",
"product_code":"css",
- "code":"75",
- "des":"A cluster of version 7.6.2 or 7.10.2 has been created by referring to Cluster Planning for Vector Retrieval.Cluster advanced settings have been configured as required by ",
+ "code":"122",
+ "des":"You have created a cluster by referring to Cluster Planning for Vector Retrieval. The cluster must be an Elasticsearch cluster of version 7.6.2 or 7.10.2, or an OpenSearc",
"doc_type":"usermanual",
"kw":"Creating a Vector Index,Vector Retrieval,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Creating a Vector Index",
"githuburl":""
},
{
- "uri":"css_01_0123.html",
- "node_id":"css_01_0123.xml",
+ "uri":"en-us_topic_0000001477899192.html",
+ "node_id":"en-us_topic_0000001477899192.xml",
"product_code":"css",
- "code":"76",
+ "code":"123",
"des":"Standard vector query syntax is provided for vector fields with vector indexes. The following command will return n (specified by size/topk) data records that are most cl",
"doc_type":"usermanual",
"kw":"Querying Vectors,Vector Retrieval,User Guide",
@@ -1574,20 +2567,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Querying Vectors",
"githuburl":""
},
{
- "uri":"css_01_0126.html",
- "node_id":"css_01_0126.xml",
+ "uri":"en-us_topic_0000001528659117.html",
+ "node_id":"en-us_topic_0000001528659117.xml",
"product_code":"css",
- "code":"77",
+ "code":"124",
"des":"To reduce the cost of backup, disable the backup function before data import and enable it afterwards.Set refresh_interval to 120s or a larger value. Larger segments can ",
"doc_type":"usermanual",
"kw":"Optimizing the Performance of Vector Retrieval,Vector Retrieval,User Guide",
@@ -1595,20 +2588,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Optimizing the Performance of Vector Retrieval",
"githuburl":""
},
{
- "uri":"css_01_0124.html",
- "node_id":"css_01_0124.xml",
+ "uri":"en-us_topic_0000001528299609.html",
+ "node_id":"en-us_topic_0000001528299609.xml",
"product_code":"css",
- "code":"78",
+ "code":"125",
"des":"When you perform operations in Creating a Vector Index, if IVF_GRAPH and IVF_GRAPH_PQ index algorithms are selected, you need to pre-build and register the center point v",
"doc_type":"usermanual",
"kw":"(Optional) Pre-Building and Registering a Center Point Vector,Vector Retrieval,User Guide",
@@ -1616,20 +2609,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"(Optional) Pre-Building and Registering a Center Point Vector",
"githuburl":""
},
{
- "uri":"css_01_0130.html",
- "node_id":"css_01_0130.xml",
+ "uri":"en-us_topic_0000001477899208.html",
+ "node_id":"en-us_topic_0000001477899208.xml",
"product_code":"css",
- "code":"79",
+ "code":"126",
"des":"The vector retrieval engine is developed in C++ and uses off-heap memory. You can use the following APIs to manage the index cache.View cache statistics.GET /_vector/stat",
"doc_type":"usermanual",
"kw":"Managing the Vector Index Cache,Vector Retrieval,User Guide",
@@ -1637,20 +2630,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Managing the Vector Index Cache",
"githuburl":""
},
{
- "uri":"css_01_0129.html",
- "node_id":"css_01_0129.xml",
+ "uri":"en-us_topic_0000001528499121.html",
+ "node_id":"en-us_topic_0000001528499121.xml",
"product_code":"css",
- "code":"80",
+ "code":"127",
"des":"Elasticsearch provides standard REST APIs and clients developed using Java, Python, and Go.Based on the open-source dataset SIFT1M (http://corpus-texmex.irisa.fr/) and Py",
"doc_type":"usermanual",
"kw":"Sample Code for Vector Search on a Client,Vector Retrieval,User Guide",
@@ -1658,20 +2651,41 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Sample Code for Vector Search on a Client",
"githuburl":""
},
{
- "uri":"css_01_0112.html",
- "node_id":"css_01_0112.xml",
+ "uri":"en-us_topic_0000001814230837.html",
+ "node_id":"en-us_topic_0000001814230837.xml",
"product_code":"css",
- "code":"81",
+ "code":"128",
+ "des":"PV_GRAPH deeply optimizes the HNSW algorithm and supports the vector and scalar joint filtering. When the vector and scalar joint filtering is used, the result filling ra",
+ "doc_type":"usermanual",
+ "kw":"Using PV_GRAPH to Search for Vector Indexes,Vector Retrieval,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Using PV_GRAPH to Search for Vector Indexes",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001528499153.html",
+ "node_id":"en-us_topic_0000001528499153.xml",
+ "product_code":"css",
+ "code":"129",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Storage-Compute Decoupling",
@@ -1679,20 +2693,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Storage-Compute Decoupling",
"githuburl":""
},
{
- "uri":"css_01_0113.html",
- "node_id":"css_01_0113.xml",
+ "uri":"en-us_topic_0000001477739384.html",
+ "node_id":"en-us_topic_0000001477739384.xml",
"product_code":"css",
- "code":"82",
+ "code":"130",
"des":"You can store hot data on SSD to achieve the optimal query performance, and store historical data in OBS to reduce data storage costs.A large volume of data is written to",
"doc_type":"usermanual",
"kw":"Context,Storage-Compute Decoupling,User Guide",
@@ -1700,20 +2714,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Context",
"githuburl":""
},
{
- "uri":"css_01_0114.html",
- "node_id":"css_01_0114.xml",
+ "uri":"en-us_topic_0000001528299569.html",
+ "node_id":"en-us_topic_0000001528299569.xml",
"product_code":"css",
- "code":"83",
+ "code":"131",
"des":"Before freezing an index, ensure no data is being written to it. The index will be set to read only before being frozen, and data write will fail.After an index is frozen",
"doc_type":"usermanual",
"kw":"Freezing an Index,Storage-Compute Decoupling,User Guide",
@@ -1721,20 +2735,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Freezing an Index",
"githuburl":""
},
{
- "uri":"css_01_0116.html",
- "node_id":"css_01_0116.xml",
+ "uri":"en-us_topic_0000001528379309.html",
+ "node_id":"en-us_topic_0000001528379309.xml",
"product_code":"css",
- "code":"84",
+ "code":"132",
"des":"After data is dumped to OBS, some data is cached to reduce access to OBS and improve Elasticsearch query performance. Data that is requested for the first time is obtaine",
"doc_type":"usermanual",
"kw":"Configuring Cache,Storage-Compute Decoupling,User Guide",
@@ -1742,20 +2756,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Configuring Cache",
"githuburl":""
},
{
- "uri":"css_01_0187.html",
- "node_id":"css_01_0187.xml",
+ "uri":"en-us_topic_0000001477739360.html",
+ "node_id":"en-us_topic_0000001477739360.xml",
"product_code":"css",
- "code":"85",
+ "code":"133",
"des":"When you query data on the Discover page of Kibana for the first time, all data needs to be obtained from OBS because there is no cache. If a large number of documents ar",
"doc_type":"usermanual",
"kw":"Enhanced Cold Data Query Performance,Storage-Compute Decoupling,User Guide",
@@ -1763,20 +2777,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Enhanced Cold Data Query Performance",
"githuburl":""
},
{
- "uri":"css_01_0202.html",
- "node_id":"css_01_0202.xml",
+ "uri":"en-us_topic_0000001528379301.html",
+ "node_id":"en-us_topic_0000001528379301.xml",
"product_code":"css",
- "code":"86",
+ "code":"134",
"des":"To clearly display the operations of the storage and compute decoupling plugin in OBS, the real-time OBS rate metric is added to CSS and recorded in the system index.This",
"doc_type":"usermanual",
"kw":"Monitoring OBS Operations,Storage-Compute Decoupling,User Guide",
@@ -1784,20 +2798,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Monitoring OBS Operations",
"githuburl":""
},
{
- "uri":"css_01_0227.html",
- "node_id":"css_01_0227.xml",
+ "uri":"en-us_topic_0000001533988876.html",
+ "node_id":"en-us_topic_0000001533988876.xml",
"product_code":"css",
- "code":"87",
+ "code":"135",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Enhanced Import Performance",
@@ -1805,20 +2819,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Enhanced Import Performance",
"githuburl":""
},
{
- "uri":"css_01_0228.html",
- "node_id":"css_01_0228.xml",
+ "uri":"en-us_topic_0000001533829376.html",
+ "node_id":"en-us_topic_0000001533829376.xml",
"product_code":"css",
- "code":"88",
+ "code":"136",
"des":"CSS provides enhanced data import function. It optimizes bulk route, and speeds up processing through indexes and word segmentation, improving import performance and redu",
"doc_type":"usermanual",
"kw":"Context,Enhanced Import Performance,User Guide",
@@ -1826,20 +2840,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Context",
"githuburl":""
},
{
- "uri":"css_01_0229.html",
- "node_id":"css_01_0229.xml",
+ "uri":"en-us_topic_0000001585148465.html",
+ "node_id":"en-us_topic_0000001585148465.xml",
"product_code":"css",
- "code":"89",
+ "code":"137",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Instructions",
@@ -1847,20 +2861,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Instructions",
"githuburl":""
},
{
- "uri":"css_01_0230.html",
- "node_id":"css_01_0230.xml",
+ "uri":"en-us_topic_0000001534148568.html",
+ "node_id":"en-us_topic_0000001534148568.xml",
"product_code":"css",
- "code":"90",
+ "code":"138",
"des":"According to the default routing rule of Elasticsearch, data in a bulk request is routed to different shards. When massive data is written and a large number of index sha",
"doc_type":"usermanual",
"kw":"Bulk Route Optimization,Instructions,User Guide",
@@ -1868,20 +2882,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Bulk Route Optimization",
"githuburl":""
},
{
- "uri":"css_01_0231.html",
- "node_id":"css_01_0231.xml",
+ "uri":"en-us_topic_0000001534308508.html",
+ "node_id":"en-us_topic_0000001534308508.xml",
"product_code":"css",
- "code":"91",
+ "code":"139",
"des":"You can specify the index.aggr_perf_batch_size configuration item to enable or disable batch import optimization. After the batch import function is enabled, documents in",
"doc_type":"usermanual",
"kw":"Bulk Aggregation Optimization,Instructions,User Guide",
@@ -1889,20 +2903,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Bulk Aggregation Optimization",
"githuburl":""
},
{
- "uri":"css_01_0232.html",
- "node_id":"css_01_0232.xml",
+ "uri":"en-us_topic_0000001584828717.html",
+ "node_id":"en-us_topic_0000001584828717.xml",
"product_code":"css",
- "code":"92",
+ "code":"140",
"des":"You can configure index.native_speed_up to enable or disable text index acceleration. This function optimizes the index process and memory usage to accelerate index build",
"doc_type":"usermanual",
"kw":"Text Index Acceleration,Instructions,User Guide",
@@ -1910,20 +2924,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Text Index Acceleration",
"githuburl":""
},
{
- "uri":"css_01_0233_0.html",
- "node_id":"css_01_0233_0.xml",
+ "uri":"en-us_topic_0000001584988497.html",
+ "node_id":"en-us_topic_0000001584988497.xml",
"product_code":"css",
- "code":"93",
+ "code":"141",
"des":"After the import performance is enhanced, the number of index merge tasks increases accordingly. You can adjust the following configuration to reduce the impact of merge ",
"doc_type":"usermanual",
"kw":"Optimization of Other Parameters,Instructions,User Guide",
@@ -1931,62 +2945,41 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Optimization of Other Parameters",
"githuburl":""
},
{
- "uri":"css_01_0234.html",
- "node_id":"css_01_0234.xml",
+ "uri":"en-us_topic_0000001584708761.html",
+ "node_id":"en-us_topic_0000001584708761.xml",
"product_code":"css",
- "code":"94",
- "des":"Test environmentCluster: 3 M6 ECSs (8 vCPUs | 64 GB memory)Data: open-source web server access logs and internal service dataset (dns_logs)Configuration: 120 shards, no r",
+ "code":"142",
+ "des":"Test environmentCluster: 3 Cloud M6 ECSs (8 vCPUs | 64 GB memory)Data: open-source web server access logs and internal service dataset (dns_logs)Configuration: 120 shards",
"doc_type":"usermanual",
"kw":"Performance Data,Enhanced Import Performance,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Performance Data",
"githuburl":""
},
{
- "uri":"css_01_0200.html",
- "node_id":"css_01_0200.xml",
+ "uri":"en-us_topic_0000001477899176.html",
+ "node_id":"en-us_topic_0000001477899176.xml",
"product_code":"css",
- "code":"95",
- "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "doc_type":"usermanual",
- "kw":"Flow Control",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Flow Control",
- "githuburl":""
- },
- {
- "uri":"css_01_0191.html",
- "node_id":"css_01_0191.xml",
- "product_code":"css",
- "code":"96",
+ "code":"143",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Flow Control 2.0",
@@ -1994,20 +2987,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Flow Control 2.0",
"githuburl":""
},
{
- "uri":"css_01_0192.html",
- "node_id":"css_01_0192.xml",
+ "uri":"en-us_topic_0000001528379257.html",
+ "node_id":"en-us_topic_0000001528379257.xml",
"product_code":"css",
- "code":"97",
+ "code":"144",
"des":"CSS can control traffic at the node level. You can configure the blacklist and whitelist, the maximum concurrent HTTP connections, and the maximum HTTP connections for a ",
"doc_type":"usermanual",
"kw":"Context,Flow Control 2.0,User Guide",
@@ -2015,41 +3008,41 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Context",
"githuburl":""
},
{
- "uri":"css_01_0193.html",
- "node_id":"css_01_0193.xml",
+ "uri":"en-us_topic_0000001477579372.html",
+ "node_id":"en-us_topic_0000001477579372.xml",
"product_code":"css",
- "code":"98",
- "des":"You can run commands in Kibana to enable or disable HTTP/HTTPS flow control for your cluster. The command parameters are as follows.Log in to the CSS management console.C",
+ "code":"145",
+ "des":"You can run commands in Kibana to enable or disable HTTP/HTTPS flow control for your cluster.Log in to the CSS management console.Choose Clusters in the navigation pane. ",
"doc_type":"usermanual",
"kw":"HTTP/HTTPS Flow Control,Flow Control 2.0,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"HTTP/HTTPS Flow Control",
"githuburl":""
},
{
- "uri":"css_01_0194.html",
- "node_id":"css_01_0194.xml",
+ "uri":"en-us_topic_0000001477419736.html",
+ "node_id":"en-us_topic_0000001477419736.xml",
"product_code":"css",
- "code":"99",
+ "code":"146",
"des":"Elasticsearch provides a circuit breaker, which will terminate requests or return the error code 429 if the memory usage exceeds its threshold. However, the circuit break",
"doc_type":"usermanual",
"kw":"Memory Flow Control,Flow Control 2.0,User Guide",
@@ -2057,20 +3050,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Memory Flow Control",
"githuburl":""
},
{
- "uri":"css_01_0195.html",
- "node_id":"css_01_0195.xml",
+ "uri":"en-us_topic_0000001477739364.html",
+ "node_id":"en-us_topic_0000001477739364.xml",
"product_code":"css",
- "code":"100",
+ "code":"147",
"des":"Request sampling can record the access of client IP addresses and the type of requests from the client. Based on the statistics, you can identify the access traffic of cl",
"doc_type":"usermanual",
"kw":"Request Sampling,Flow Control 2.0,User Guide",
@@ -2078,20 +3071,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Request Sampling",
"githuburl":""
},
{
- "uri":"css_01_0196.html",
- "node_id":"css_01_0196.xml",
+ "uri":"en-us_topic_0000001477579364.html",
+ "node_id":"en-us_topic_0000001477579364.xml",
"product_code":"css",
- "code":"101",
+ "code":"148",
"des":"You can block all connections in one click, except the connections that passes through O&M APIs, to handle unexpected traffic burst and quickly recover your cluster.Log i",
"doc_type":"usermanual",
"kw":"One-click Traffic Blocking,Flow Control 2.0,User Guide",
@@ -2099,20 +3092,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"One-click Traffic Blocking",
"githuburl":""
},
{
- "uri":"css_01_0198.html",
- "node_id":"css_01_0198.xml",
+ "uri":"en-us_topic_0000001477419720.html",
+ "node_id":"en-us_topic_0000001477419720.xml",
"product_code":"css",
- "code":"102",
+ "code":"149",
"des":"Flow control can be implemented via an independent API.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, locate the targe",
"doc_type":"usermanual",
"kw":"Access Statistics and Traffic Control Information Query,Flow Control 2.0,User Guide",
@@ -2120,20 +3113,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Access Statistics and Traffic Control Information Query",
"githuburl":""
},
{
- "uri":"css_01_0199.html",
- "node_id":"css_01_0199.xml",
+ "uri":"en-us_topic_0000001528659149.html",
+ "node_id":"en-us_topic_0000001528659149.xml",
"product_code":"css",
- "code":"103",
+ "code":"150",
"des":"You can check access logs in either of the following ways:Enable and check access logs via an independent API. Configure the API parameters to record the access log time ",
"doc_type":"usermanual",
"kw":"Temporary Access Statistics Logs,Flow Control 2.0,User Guide",
@@ -2141,20 +3134,41 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Temporary Access Statistics Logs",
"githuburl":""
},
{
- "uri":"css_01_0139.html",
- "node_id":"css_01_0139.xml",
+ "uri":"en-us_topic_0000001832788405.html",
+ "node_id":"en-us_topic_0000001832788405.xml",
"product_code":"css",
- "code":"104",
+ "code":"151",
+ "des":"The traffic control function can record cluster access logs and write the logs to background log files. You can back up the logs to OBS for viewing. You can run the follo",
+ "doc_type":"usermanual",
+ "kw":"Recording Access Logs in Files,Flow Control 2.0,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"css",
+ "documenttype":"usermanual",
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
+ }
+ ],
+ "title":"Recording Access Logs in Files",
+ "githuburl":""
+ },
+ {
+ "uri":"en-us_topic_0000001477899152.html",
+ "node_id":"en-us_topic_0000001477899152.xml",
+ "product_code":"css",
+ "code":"152",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Flow Control 1.0",
@@ -2162,20 +3176,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Flow Control 1.0",
"githuburl":""
},
{
- "uri":"css_01_0140.html",
- "node_id":"css_01_0140.xml",
+ "uri":"en-us_topic_0000001477739408.html",
+ "node_id":"en-us_topic_0000001477739408.xml",
"product_code":"css",
- "code":"105",
+ "code":"153",
"des":"CSS can control traffic at the node level. You can configure the blacklist and whitelist, the maximum concurrent HTTP connections, and the maximum HTTP connections for a ",
"doc_type":"usermanual",
"kw":"Context,Flow Control 1.0,User Guide",
@@ -2183,20 +3197,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Context",
"githuburl":""
},
{
- "uri":"css_01_0141.html",
- "node_id":"css_01_0141.xml",
+ "uri":"en-us_topic_0000001477579352.html",
+ "node_id":"en-us_topic_0000001477579352.xml",
"product_code":"css",
- "code":"106",
+ "code":"154",
"des":"You can run commands in Kibana to enable or disable HTTP/HTTPS flow control for your cluster. The command parameters are as follows.Log in to the CSS management console.C",
"doc_type":"usermanual",
"kw":"HTTP/HTTPS Flow Control,Flow Control 1.0,User Guide",
@@ -2204,20 +3218,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"HTTP/HTTPS Flow Control",
"githuburl":""
},
{
- "uri":"css_01_0142.html",
- "node_id":"css_01_0142.xml",
+ "uri":"en-us_topic_0000001528499157.html",
+ "node_id":"en-us_topic_0000001528499157.xml",
"product_code":"css",
- "code":"107",
+ "code":"155",
"des":"Elasticsearch provides a circuit breaker, which will terminate requests if the memory usage exceeds its threshold. However, Elasticsearch does not check the heap memory u",
"doc_type":"usermanual",
"kw":"Memory Flow Control,Flow Control 1.0,User Guide",
@@ -2225,20 +3239,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Memory Flow Control",
"githuburl":""
},
{
- "uri":"css_01_0143.html",
- "node_id":"css_01_0143.xml",
+ "uri":"en-us_topic_0000001528499165.html",
+ "node_id":"en-us_topic_0000001528499165.xml",
"product_code":"css",
- "code":"108",
+ "code":"156",
"des":"The following table describes the global path whitelist parameters for flow control.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Cl",
"doc_type":"usermanual",
"kw":"Global Path Whitelist for Flow Control,Flow Control 1.0,User Guide",
@@ -2246,20 +3260,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Global Path Whitelist for Flow Control",
"githuburl":""
},
{
- "uri":"css_01_0144.html",
- "node_id":"css_01_0144.xml",
+ "uri":"en-us_topic_0000001477579356.html",
+ "node_id":"en-us_topic_0000001477579356.xml",
"product_code":"css",
- "code":"109",
+ "code":"157",
"des":"Request sampling can record the access IP addresses, the number of accessed nodes, request paths, request URLs, and request bodies, which can be used to obtain the IP add",
"doc_type":"usermanual",
"kw":"Request Sampling,Flow Control 1.0,User Guide",
@@ -2267,20 +3281,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Request Sampling",
"githuburl":""
},
{
- "uri":"css_01_0145.html",
- "node_id":"css_01_0145.xml",
+ "uri":"en-us_topic_0000001528299577.html",
+ "node_id":"en-us_topic_0000001528299577.xml",
"product_code":"css",
- "code":"110",
+ "code":"158",
"des":"Flow control can be implemented via an independent API.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, locate the targe",
"doc_type":"usermanual",
"kw":"Flow Control,Flow Control 1.0,User Guide",
@@ -2288,20 +3302,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Flow Control",
"githuburl":""
},
{
- "uri":"css_01_0146.html",
- "node_id":"css_01_0146.xml",
+ "uri":"en-us_topic_0000001528659101.html",
+ "node_id":"en-us_topic_0000001528659101.xml",
"product_code":"css",
- "code":"111",
+ "code":"159",
"des":"You can check access logs in either of the following ways:Enable and check access logs via an independent API. Configure the API parameters to record the access log time ",
"doc_type":"usermanual",
"kw":"Access Logs,Flow Control 1.0,User Guide",
@@ -2309,20 +3323,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Access Logs",
"githuburl":""
},
{
- "uri":"css_01_0147.html",
- "node_id":"css_01_0147.xml",
+ "uri":"en-us_topic_0000001477419744.html",
+ "node_id":"en-us_topic_0000001477419744.xml",
"product_code":"css",
- "code":"112",
+ "code":"160",
"des":"CPU flow control can be implemented based on the CPU usage of a node.You can configure the CPU usage threshold of a node to prevent the node from breaking down due to hea",
"doc_type":"usermanual",
"kw":"CPU Flow Control,Flow Control 1.0,User Guide",
@@ -2330,20 +3344,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"CPU Flow Control",
"githuburl":""
},
{
- "uri":"css_01_0148.html",
- "node_id":"css_01_0148.xml",
+ "uri":"en-us_topic_0000001477739380.html",
+ "node_id":"en-us_topic_0000001477739380.xml",
"product_code":"css",
- "code":"113",
+ "code":"161",
"des":"You can block all traffic in one click, except the traffic that passes through O&M APIs, to handle unexpected traffic burst and quickly recover your cluster.Log in to the",
"doc_type":"usermanual",
"kw":"One-click Traffic Blocking,Flow Control 1.0,User Guide",
@@ -2351,20 +3365,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"One-click Traffic Blocking",
"githuburl":""
},
{
- "uri":"css_01_0131.html",
- "node_id":"css_01_0131.xml",
+ "uri":"en-us_topic_0000001477899220.html",
+ "node_id":"en-us_topic_0000001477899220.xml",
"product_code":"css",
- "code":"114",
+ "code":"162",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Large Query Isolation",
@@ -2372,20 +3386,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Large Query Isolation",
"githuburl":""
},
{
- "uri":"css_01_0132.html",
- "node_id":"css_01_0132.xml",
+ "uri":"en-us_topic_0000001477739376.html",
+ "node_id":"en-us_topic_0000001477739376.xml",
"product_code":"css",
- "code":"115",
+ "code":"163",
"des":"The large query isolation feature allows you to separately manage large queries. You can isolate query requests that consume a large amount of memory or take a long perio",
"doc_type":"usermanual",
"kw":"Context,Large Query Isolation,User Guide",
@@ -2393,20 +3407,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Context",
"githuburl":""
},
{
- "uri":"css_01_0133.html",
- "node_id":"css_01_0133.xml",
+ "uri":"en-us_topic_0000001477579388.html",
+ "node_id":"en-us_topic_0000001477579388.xml",
"product_code":"css",
- "code":"116",
+ "code":"164",
"des":"The large query isolation and global timeout features are disabled by default. If you enable them, the configuration will take effect immediately. Perform the following s",
"doc_type":"usermanual",
"kw":"Procedure,Large Query Isolation,User Guide",
@@ -2414,20 +3428,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Procedure",
"githuburl":""
},
{
- "uri":"css_01_0134.html",
- "node_id":"css_01_0134.xml",
+ "uri":"en-us_topic_0000001528499133.html",
+ "node_id":"en-us_topic_0000001528499133.xml",
"product_code":"css",
- "code":"117",
+ "code":"165",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Index Monitoring",
@@ -2435,20 +3449,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Index Monitoring",
"githuburl":""
},
{
- "uri":"css_01_0135.html",
- "node_id":"css_01_0135.xml",
+ "uri":"en-us_topic_0000001528499125.html",
+ "node_id":"en-us_topic_0000001528499125.xml",
"product_code":"css",
- "code":"118",
+ "code":"166",
"des":"CSS monitors various metrics of the running status and change trend of cluster indexes to measure service usage and handle potential risks in a timely manner, ensuring th",
"doc_type":"usermanual",
"kw":"Context,Index Monitoring,User Guide",
@@ -2456,20 +3470,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Context",
"githuburl":""
},
{
- "uri":"css_01_0136.html",
- "node_id":"css_01_0136.xml",
+ "uri":"en-us_topic_0000001477579408.html",
+ "node_id":"en-us_topic_0000001477579408.xml",
"product_code":"css",
- "code":"119",
+ "code":"167",
"des":"Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation colu",
"doc_type":"usermanual",
"kw":"Enabling Index Monitoring,Index Monitoring,User Guide",
@@ -2477,20 +3491,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Enabling Index Monitoring",
"githuburl":""
},
{
- "uri":"css_01_0137.html",
- "node_id":"css_01_0137.xml",
+ "uri":"en-us_topic_0000001528659105.html",
+ "node_id":"en-us_topic_0000001528659105.xml",
"product_code":"css",
- "code":"120",
+ "code":"168",
"des":"You can call an API to query the index read and write traffic within a period of time.A cluster has been created and index monitoring has been enabled.Log in to the CSS m",
"doc_type":"usermanual",
"kw":"Checking the Index Read and Write Traffic,Index Monitoring,User Guide",
@@ -2498,20 +3512,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Checking the Index Read and Write Traffic",
"githuburl":""
},
{
- "uri":"css_01_0138.html",
- "node_id":"css_01_0138.xml",
+ "uri":"en-us_topic_0000001528499197.html",
+ "node_id":"en-us_topic_0000001528499197.xml",
"product_code":"css",
- "code":"121",
+ "code":"169",
"des":"You can check preconfigured index monitoring visualizations on the Dashboard and Visualizations pages of Kibana. You can also customize tables and charts.A cluster has be",
"doc_type":"usermanual",
"kw":"Checking Index Monitoring Information,Index Monitoring,User Guide",
@@ -2519,20 +3533,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Checking Index Monitoring Information",
"githuburl":""
},
{
- "uri":"css_01_0197.html",
- "node_id":"css_01_0197.xml",
+ "uri":"en-us_topic_0000001528499129.html",
+ "node_id":"en-us_topic_0000001528499129.xml",
"product_code":"css",
- "code":"122",
+ "code":"170",
"des":"The configuration file content of kibana-monitor is as follows. You are advised to save the file as monitoring-kibana.ndjson.",
"doc_type":"usermanual",
"kw":"kibana-monitor,Index Monitoring,User Guide",
@@ -2540,20 +3554,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"kibana-monitor",
"githuburl":""
},
{
- "uri":"css_01_0177.html",
- "node_id":"css_01_0177.xml",
+ "uri":"en-us_topic_0000001477419712.html",
+ "node_id":"en-us_topic_0000001477419712.xml",
"product_code":"css",
- "code":"123",
+ "code":"171",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Enhanced Cluster Monitoring",
@@ -2561,20 +3575,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Enhanced Cluster Monitoring",
"githuburl":""
},
{
- "uri":"css_01_0178.html",
- "node_id":"css_01_0178.xml",
+ "uri":"en-us_topic_0000001477419748.html",
+ "node_id":"en-us_topic_0000001477419748.xml",
"product_code":"css",
- "code":"124",
+ "code":"172",
"des":"The Elasticsearch community only discusses how to monitor the average latency of search requests, which cannot reflect the actual search performance of a cluster. To enha",
"doc_type":"usermanual",
"kw":"P99 Latency Monitoring,Enhanced Cluster Monitoring,User Guide",
@@ -2582,20 +3596,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"P99 Latency Monitoring",
"githuburl":""
},
{
- "uri":"css_01_0179.html",
- "node_id":"css_01_0179.xml",
+ "uri":"en-us_topic_0000001477419772.html",
+ "node_id":"en-us_topic_0000001477419772.xml",
"product_code":"css",
- "code":"125",
+ "code":"173",
"des":"When an external system accesses Elasticsearch through the HTTP protocol, a response and the corresponding status code are returned. The open-source Elasticsearch server ",
"doc_type":"usermanual",
"kw":"HTTP Status Code Monitoring,Enhanced Cluster Monitoring,User Guide",
@@ -2603,20 +3617,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"HTTP Status Code Monitoring",
"githuburl":""
},
{
- "uri":"css_01_0172.html",
- "node_id":"css_01_0172.xml",
+ "uri":"en-us_topic_0000001528659121.html",
+ "node_id":"en-us_topic_0000001528659121.xml",
"product_code":"css",
- "code":"126",
+ "code":"174",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Enhanced Aggregation",
@@ -2624,20 +3638,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Enhanced Aggregation",
"githuburl":""
},
{
- "uri":"css_01_0173.html",
- "node_id":"css_01_0173.xml",
+ "uri":"en-us_topic_0000001477419760.html",
+ "node_id":"en-us_topic_0000001477419760.xml",
"product_code":"css",
- "code":"127",
+ "code":"175",
"des":"The enhanced aggregation is an optimization feature for service awareness. With this feature, you can optimize the aggregation analysis capability of observable services.",
"doc_type":"usermanual",
"kw":"Features,Enhanced Aggregation,User Guide",
@@ -2645,20 +3659,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Features",
"githuburl":""
},
{
- "uri":"css_01_0174.html",
- "node_id":"css_01_0174.xml",
+ "uri":"en-us_topic_0000001528659125.html",
+ "node_id":"en-us_topic_0000001528659125.xml",
"product_code":"css",
- "code":"128",
+ "code":"176",
"des":"Low-cardinality fields have high data clustering performance when being sorted, which facilitates vectorized optimization. Assume that the following query statement exist",
"doc_type":"usermanual",
"kw":"Grouping and Aggregation of Low-cardinality Fields,Enhanced Aggregation,User Guide",
@@ -2666,20 +3680,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Grouping and Aggregation of Low-cardinality Fields",
"githuburl":""
},
{
- "uri":"css_01_0175.html",
- "node_id":"css_01_0175.xml",
+ "uri":"en-us_topic_0000001528499145.html",
+ "node_id":"en-us_topic_0000001528499145.xml",
"product_code":"css",
- "code":"129",
+ "code":"177",
"des":"High-cardinality fields are usually used for histogram grouping and aggregation instead of single-point grouping and aggregation. For example, collecting the statistics o",
"doc_type":"usermanual",
"kw":"High-cardinality Field Histogram Aggregation,Enhanced Aggregation,User Guide",
@@ -2687,20 +3701,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"High-cardinality Field Histogram Aggregation",
"githuburl":""
},
{
- "uri":"css_01_0176.html",
- "node_id":"css_01_0176.xml",
+ "uri":"en-us_topic_0000001528659141.html",
+ "node_id":"en-us_topic_0000001528659141.xml",
"product_code":"css",
- "code":"130",
+ "code":"178",
"des":"In the scenario where low-cardinality and high-cardinality fields are mixed, assume that the following query statement exists:Group the low-cardinality fields and create ",
"doc_type":"usermanual",
"kw":"Low-cardinality and High-cardinality Field Mixing,Enhanced Aggregation,User Guide",
@@ -2708,20 +3722,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Low-cardinality and High-cardinality Field Mixing",
"githuburl":""
},
{
- "uri":"css_01_0161.html",
- "node_id":"css_01_0161.xml",
+ "uri":"en-us_topic_0000001528379249.html",
+ "node_id":"en-us_topic_0000001528379249.xml",
"product_code":"css",
- "code":"131",
+ "code":"179",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Read/Write Splitting",
@@ -2729,20 +3743,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Read/Write Splitting",
"githuburl":""
},
{
- "uri":"css_01_0162.html",
- "node_id":"css_01_0162.xml",
+ "uri":"en-us_topic_0000001528379321.html",
+ "node_id":"en-us_topic_0000001528379321.xml",
"product_code":"css",
- "code":"132",
+ "code":"180",
"des":"CSS supports read/write splitting. Data written to the primary cluster (Leader) can be automatically synchronized to the secondary cluster (Follower). In this way, data i",
"doc_type":"usermanual",
"kw":"Features,Read/Write Splitting,User Guide",
@@ -2750,20 +3764,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Features",
"githuburl":""
},
{
- "uri":"css_01_0164.html",
- "node_id":"css_01_0164.xml",
+ "uri":"en-us_topic_0000001477579376.html",
+ "node_id":"en-us_topic_0000001477579376.xml",
"product_code":"css",
- "code":"133",
+ "code":"181",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Instructions",
@@ -2771,20 +3785,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Instructions",
"githuburl":""
},
{
- "uri":"css_01_0165.html",
- "node_id":"css_01_0165.xml",
+ "uri":"en-us_topic_0000001528379313.html",
+ "node_id":"en-us_topic_0000001528379313.xml",
"product_code":"css",
- "code":"134",
+ "code":"182",
"des":"Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation col",
"doc_type":"usermanual",
"kw":"Basic Settings,Instructions,User Guide",
@@ -2792,20 +3806,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Basic Settings",
"githuburl":""
},
{
- "uri":"css_01_0166.html",
- "node_id":"css_01_0166.xml",
+ "uri":"en-us_topic_0000001528659157.html",
+ "node_id":"en-us_topic_0000001528659157.xml",
"product_code":"css",
- "code":"135",
+ "code":"183",
"des":"Synchronize a single index.The request URL and request body parameters are as follows:After the synchronization function is enabled, indexes in the secondary cluster beco",
"doc_type":"usermanual",
"kw":"Synchronizing Specified Indexes,Instructions,User Guide",
@@ -2813,20 +3827,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Synchronizing Specified Indexes",
"githuburl":""
},
{
- "uri":"css_01_0167.html",
- "node_id":"css_01_0167.xml",
+ "uri":"en-us_topic_0000001528379265.html",
+ "node_id":"en-us_topic_0000001528379265.xml",
"product_code":"css",
- "code":"136",
+ "code":"184",
"des":"The request URL and request body parameters are as follows:The following are two examples:1. Synchronize a single index from the primary cluster to the secondary cluster.",
"doc_type":"usermanual",
"kw":"Matching Index Synchronization,Instructions,User Guide",
@@ -2834,20 +3848,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Matching Index Synchronization",
"githuburl":""
},
{
- "uri":"css_01_0168.html",
- "node_id":"css_01_0168.xml",
+ "uri":"en-us_topic_0000001477899224.html",
+ "node_id":"en-us_topic_0000001477899224.xml",
"product_code":"css",
- "code":"137",
+ "code":"185",
"des":"You can specify multiple indexes or use wildcard to match the target indexes and terminate their synchronization tasks. Subsequent modifications to the indexes in the pri",
"doc_type":"usermanual",
"kw":"Stopping Index Synchronization,Instructions,User Guide",
@@ -2855,20 +3869,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Stopping Index Synchronization",
"githuburl":""
},
{
- "uri":"css_01_0169.html",
- "node_id":"css_01_0169.xml",
+ "uri":"en-us_topic_0000001528499169.html",
+ "node_id":"en-us_topic_0000001528499169.xml",
"product_code":"css",
- "code":"138",
+ "code":"186",
"des":"Querying the created patterns.This API is used to query the pattern list and query a specified pattern by name.An example request is as follows:GET auto_sync/pattern\nGET ",
"doc_type":"usermanual",
"kw":"Other Management APIs,Instructions,User Guide",
@@ -2876,20 +3890,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Other Management APIs",
"githuburl":""
},
{
- "uri":"css_01_0170.html",
- "node_id":"css_01_0170.xml",
+ "uri":"en-us_topic_0000001477739356.html",
+ "node_id":"en-us_topic_0000001477739356.xml",
"product_code":"css",
- "code":"139",
+ "code":"187",
"des":"This section describes how to switch from the primary cluster to the secondary cluster when the primary cluster is faulty.1. If the synchronization of specified indexes h",
"doc_type":"usermanual",
"kw":"Best Practices,Read/Write Splitting,User Guide",
@@ -2897,83 +3911,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Best Practices",
"githuburl":""
},
{
- "uri":"css_01_0045.html",
- "node_id":"css_01_0045.xml",
+ "uri":"en-us_topic_0000001477419740.html",
+ "node_id":"en-us_topic_0000001477419740.xml",
"product_code":"css",
- "code":"140",
- "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "doc_type":"usermanual",
- "kw":"Importing Data to Elasticsearch",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Importing Data to Elasticsearch",
- "githuburl":""
- },
- {
- "uri":"css_01_0048.html",
- "node_id":"css_01_0048.xml",
- "product_code":"css",
- "code":"141",
- "des":"You can use Logstash to collect data and migrate collected data to Elasticsearch in CSS. This method helps you effectively obtain and manage data through Elasticsearch. D",
- "doc_type":"usermanual",
- "kw":"Using Logstash to Import Data to Elasticsearch,Importing Data to Elasticsearch,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Using Logstash to Import Data to Elasticsearch",
- "githuburl":""
- },
- {
- "uri":"css_01_0024.html",
- "node_id":"css_01_0024.xml",
- "product_code":"css",
- "code":"142",
- "des":"You can import data in various formats, such as JSON, to Elasticsearch in CSS by using Kibana or APIs.Before importing data, ensure that you can use Kibana to access the ",
- "doc_type":"usermanual",
- "kw":"Using Kibana or APIs to Import Data to Elasticsearch,Importing Data to Elasticsearch,User Guide",
- "search_title":"",
- "metedata":[
- {
- "prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
- "documenttype":"usermanual",
- "IsBot":"Yes"
- }
- ],
- "title":"Using Kibana or APIs to Import Data to Elasticsearch",
- "githuburl":""
- },
- {
- "uri":"css_01_0041.html",
- "node_id":"css_01_0041.xml",
- "product_code":"css",
- "code":"143",
+ "code":"188",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Monitoring",
@@ -2981,41 +3932,41 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Monitoring",
"githuburl":""
},
{
- "uri":"css_01_0042.html",
- "node_id":"css_01_0042.xml",
+ "uri":"en-us_topic_0000001528659133.html",
+ "node_id":"en-us_topic_0000001528659133.xml",
"product_code":"css",
- "code":"144",
+ "code":"189",
"des":"You can use Cloud Eye to monitor cluster metrics of CSS in real time and quickly handle exceptions. For details about Cloud Eye, see the Cloud Eye User Guide.Table 1 list",
"doc_type":"usermanual",
- "kw":"Monitoring Metrics of Elasticsearch Clusters,Monitoring,User Guide",
+ "kw":"Monitoring Metrics of Clusters,Monitoring,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
- "title":"Monitoring Metrics of Elasticsearch Clusters",
+ "title":"Monitoring Metrics of Clusters",
"githuburl":""
},
{
- "uri":"css_01_0246.html",
- "node_id":"css_01_0246.xml",
+ "uri":"en-us_topic_0000001599872681.html",
+ "node_id":"en-us_topic_0000001599872681.xml",
"product_code":"css",
- "code":"145",
+ "code":"190",
"des":"This topic describes CSS metrics that can be monitored by Cloud Eye as well as their namespaces and dimensions. You can search for the monitoring metrics and alarms gener",
"doc_type":"usermanual",
"kw":"Monitoring Metrics,Monitoring,User Guide",
@@ -3033,10 +3984,10 @@
"githuburl":""
},
{
- "uri":"css_01_0155.html",
- "node_id":"css_01_0155.xml",
+ "uri":"en-us_topic_0000001477579336.html",
+ "node_id":"en-us_topic_0000001477579336.xml",
"product_code":"css",
- "code":"146",
+ "code":"191",
"des":"You can use Cloud Eye to monitor the created clusters. After configuring the cluster monitoring, you can log in to the Cloud Eye management console to view cluster metric",
"doc_type":"usermanual",
"kw":"Configuring Cluster Monitoring,Monitoring,User Guide",
@@ -3044,20 +3995,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Configuring Cluster Monitoring",
"githuburl":""
},
{
- "uri":"css_01_0049.html",
- "node_id":"css_01_0049.xml",
+ "uri":"en-us_topic_0000001528499181.html",
+ "node_id":"en-us_topic_0000001528499181.xml",
"product_code":"css",
- "code":"147",
+ "code":"192",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Auditing",
@@ -3065,20 +4016,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Auditing",
"githuburl":""
},
{
- "uri":"css_01_0050.html",
- "node_id":"css_01_0050.xml",
+ "uri":"en-us_topic_0000001528299573.html",
+ "node_id":"en-us_topic_0000001528299573.xml",
"product_code":"css",
- "code":"148",
+ "code":"193",
"des":"Cloud Trace Service (CTS) is available on the public cloud platform. With CTS, you can record operations associated with CSS for later query, audit, and backtrack operati",
"doc_type":"usermanual",
"kw":"Key Operations Recorded by CTS,Auditing,User Guide",
@@ -3086,41 +4037,41 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Key Operations Recorded by CTS",
"githuburl":""
},
{
- "uri":"css_01_0051.html",
- "node_id":"css_01_0051.xml",
+ "uri":"en-us_topic_0000001720964408.html",
+ "node_id":"en-us_topic_0000001720964408.xml",
"product_code":"css",
- "code":"149",
- "des":"After you enable CTS, it starts recording operations related to CSS. The CTS management console stores the last seven days of operation records. This section describes ho",
+ "code":"194",
+ "des":"After you enable CTS and the management tracker is created, CTS starts recording operations on cloud resources. CTS stores operation records generated in the last seven d",
"doc_type":"usermanual",
- "kw":"Viewing Audit Logs,Auditing,User Guide",
+ "kw":"Querying Real-Time Traces,Auditing,User Guide",
"search_title":"",
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
- "title":"Viewing Audit Logs",
+ "title":"Querying Real-Time Traces",
"githuburl":""
},
{
- "uri":"css_02_0001.html",
- "node_id":"css_02_0001.xml",
+ "uri":"en-us_topic_0000001876048066.html",
+ "node_id":"en-us_topic_0000001876048066.xml",
"product_code":"css",
- "code":"150",
+ "code":"195",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"FAQs",
@@ -3128,20 +4079,20 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"FAQs",
"githuburl":""
},
{
- "uri":"css_02_0051.html",
- "node_id":"css_02_0051.xml",
+ "uri":"en-us_topic_0000001477137534.html",
+ "node_id":"en-us_topic_0000001477137534.xml",
"product_code":"css",
- "code":"151",
+ "code":"196",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"General Consulting",
@@ -3157,10 +4108,10 @@
"githuburl":""
},
{
- "uri":"css_02_0034.html",
- "node_id":"css_02_0034.xml",
+ "uri":"en-us_topic_0000001528097305.html",
+ "node_id":"en-us_topic_0000001528097305.xml",
"product_code":"css",
- "code":"152",
+ "code":"197",
"des":"A region and availability zone (AZ) identify the location of a data center. You can create resources in a specific region and AZ.A region is a physical data center. Each ",
"doc_type":"usermanual",
"kw":"What Are Regions and AZs?,General Consulting,User Guide",
@@ -3176,10 +4127,10 @@
"githuburl":""
},
{
- "uri":"css_02_0006.html",
- "node_id":"css_02_0006.xml",
+ "uri":"en-us_topic_0000001476977546.html",
+ "node_id":"en-us_topic_0000001476977546.xml",
"product_code":"css",
- "code":"153",
+ "code":"198",
"des":"CSS uses network isolation, in addition to various host and data security measures.Network isolationThe entire network is divided into two planes: service plane and manag",
"doc_type":"usermanual",
"kw":"How Does CSS Ensure Data and Service Security?,General Consulting,User Guide",
@@ -3195,10 +4146,10 @@
"githuburl":""
},
{
- "uri":"css_02_0007.html",
- "node_id":"css_02_0007.xml",
+ "uri":"en-us_topic_0000001528097297.html",
+ "node_id":"en-us_topic_0000001528097297.xml",
"product_code":"css",
- "code":"154",
+ "code":"199",
"des":"Disk usage and cluster health status are two key metrics that you can focus on. You can log in to Cloud Eye and configure alarm rules for these metrics. If alarms are rep",
"doc_type":"usermanual",
"kw":"Which CSS Metrics Should I Focus On?,General Consulting,User Guide",
@@ -3214,10 +4165,10 @@
"githuburl":""
},
{
- "uri":"css_02_0008.html",
- "node_id":"css_02_0008.xml",
+ "uri":"en-us_topic_0000001528097293.html",
+ "node_id":"en-us_topic_0000001528097293.xml",
"product_code":"css",
- "code":"155",
+ "code":"200",
"des":"CSS uses EVS and local disks to store your indices. During cluster creation, you can specify the EVS disk type and specifications (the EVS disk size).Supported EVS disk t",
"doc_type":"usermanual",
"kw":"What Storage Options Does CSS Provide?,General Consulting,User Guide",
@@ -3233,10 +4184,10 @@
"githuburl":""
},
{
- "uri":"css_02_0009.html",
- "node_id":"css_02_0009.xml",
+ "uri":"en-us_topic_0000001527937337.html",
+ "node_id":"en-us_topic_0000001527937337.xml",
"product_code":"css",
- "code":"156",
+ "code":"201",
"des":"You can configure up to 200 nodes for a cluster (each node corresponds to an ECS). The maximum storage capacity of an ECS is the total capacity of EVS disks attached to t",
"doc_type":"usermanual",
"kw":"What Is the Maximum Storage Capacity of CSS?,General Consulting,User Guide",
@@ -3252,10 +4203,10 @@
"githuburl":""
},
{
- "uri":"css_02_0017.html",
- "node_id":"css_02_0017.xml",
+ "uri":"en-us_topic_0000001477137546.html",
+ "node_id":"en-us_topic_0000001477137546.xml",
"product_code":"css",
- "code":"157",
+ "code":"202",
"des":"You can use any of the following three methods to manage CSS or to use search engine APIs. You can initiate requests based on constructed request messages.curlcurl is a c",
"doc_type":"usermanual",
"kw":"How Can I Manage CSS?,General Consulting,User Guide",
@@ -3271,10 +4222,10 @@
"githuburl":""
},
{
- "uri":"css_02_0010.html",
- "node_id":"css_02_0010.xml",
+ "uri":"en-us_topic_0000001477297350.html",
+ "node_id":"en-us_topic_0000001477297350.xml",
"product_code":"css",
- "code":"158",
+ "code":"203",
"des":"You can store the following logs and files:Log files: Elasticsearch logsData files: Elasticsearch index filesOther files: cluster configuration filesOS: 5% storage space ",
"doc_type":"usermanual",
"kw":"What Can the Disk Space of a CSS Cluster Be Used For?,General Consulting,User Guide",
@@ -3290,10 +4241,10 @@
"githuburl":""
},
{
- "uri":"css_02_0093.html",
- "node_id":"css_02_0093.xml",
+ "uri":"en-us_topic_0000001527697797.html",
+ "node_id":"en-us_topic_0000001527697797.xml",
"product_code":"css",
- "code":"159",
+ "code":"204",
"des":"Log in to the console.On the Clusters page, click Access Kibana in the Operation column of a cluster.Log in to Kibana and choose Dev Tools.On the Console page, run the GE",
"doc_type":"usermanual",
"kw":"How Do I Check the Numbers of Shards and Replicas in a Cluster on the CSS Console?,General Consultin",
@@ -3309,10 +4260,10 @@
"githuburl":""
},
{
- "uri":"css_02_0041.html",
- "node_id":"css_02_0041.xml",
+ "uri":"en-us_topic_0000001477297354.html",
+ "node_id":"en-us_topic_0000001477297354.xml",
"product_code":"css",
- "code":"160",
+ "code":"205",
"des":"CSS supports two data compression algorithms: LZ4 (by default) and best_compression.LZ4 algorithmLZ4 is the default compression algorithm of Elasticsearch. This algorithm",
"doc_type":"usermanual",
"kw":"What Data Compression Algorithms Does CSS Use?,General Consulting,User Guide",
@@ -3328,10 +4279,10 @@
"githuburl":""
},
{
- "uri":"css_02_0055.html",
- "node_id":"css_02_0055.xml",
+ "uri":"en-us_topic_0000001477137530.html",
+ "node_id":"en-us_topic_0000001477137530.xml",
"product_code":"css",
- "code":"161",
+ "code":"206",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Functions",
@@ -3347,10 +4298,10 @@
"githuburl":""
},
{
- "uri":"css_02_0058.html",
- "node_id":"css_02_0058.xml",
+ "uri":"en-us_topic_0000001527937341.html",
+ "node_id":"en-us_topic_0000001527937341.xml",
"product_code":"css",
- "code":"162",
+ "code":"207",
"des":"Elasticsearch does not support direct data migration between different VPCs. You can use either of the following methods to migrate data.Use the backup and restoration fu",
"doc_type":"usermanual",
"kw":"Can Elasticsearch Data Be Migrated Between VPCs?,Functions,User Guide",
@@ -3366,10 +4317,10 @@
"githuburl":""
},
{
- "uri":"css_02_0094.html",
- "node_id":"css_02_0094.xml",
+ "uri":"en-us_topic_0000001528097309.html",
+ "node_id":"en-us_topic_0000001528097309.xml",
"product_code":"css",
- "code":"163",
+ "code":"208",
"des":"CSS clusters cannot be directly migrated. You can back up a cluster to an OBS bucket and restore it to a new region.If the OBS bucket is in the same region as your CSS cl",
"doc_type":"usermanual",
"kw":"How Do I Migrate a CSS Cluster Across Regions?,Functions,User Guide",
@@ -3385,10 +4336,10 @@
"githuburl":""
},
{
- "uri":"css_02_0096.html",
- "node_id":"css_02_0096.xml",
+ "uri":"en-us_topic_0000001476977558.html",
+ "node_id":"en-us_topic_0000001476977558.xml",
"product_code":"css",
- "code":"164",
+ "code":"209",
"des":"The slow query log settings of CSS are the same as those of Elasticsearch. You can configure slow query logs via the _settings API. For example, you can run the following",
"doc_type":"usermanual",
"kw":"How Do I Configure the Threshold for CSS Slow Query Logs?,Functions,User Guide",
@@ -3404,10 +4355,10 @@
"githuburl":""
},
{
- "uri":"css_02_0119.html",
- "node_id":"css_02_0119.xml",
+ "uri":"en-us_topic_0000001477297334.html",
+ "node_id":"en-us_topic_0000001477297334.xml",
"product_code":"css",
- "code":"165",
+ "code":"210",
"des":"The CSS lifecycle is implemented using the Index State Management (ISM) of Open Distro. For details about how to configure policies related to the ISM template, see the O",
"doc_type":"usermanual",
"kw":"How Do I Update the CSS Lifecycle Policy?,Functions,User Guide",
@@ -3423,10 +4374,10 @@
"githuburl":""
},
{
- "uri":"css_02_0118.html",
- "node_id":"css_02_0118.xml",
+ "uri":"en-us_topic_0000001477297358.html",
+ "node_id":"en-us_topic_0000001477297358.xml",
"product_code":"css",
- "code":"166",
+ "code":"211",
"des":"Log in to the Kibana page of the cluster. In the navigation pane, choose Dev Tools.Modify and run the PUT /*/_settings{\"number_of_replicas\":0} command.Do not directly run",
"doc_type":"usermanual",
"kw":"How Do I Set the Numbers of Index Copies to 0 in Batches?,Functions,User Guide",
@@ -3442,10 +4393,10 @@
"githuburl":""
},
{
- "uri":"css_02_0042.html",
- "node_id":"css_02_0042.xml",
+ "uri":"en-us_topic_0000001527777449.html",
+ "node_id":"en-us_topic_0000001527777449.xml",
"product_code":"css",
- "code":"167",
+ "code":"212",
"des":"The possible causes are as follows:Shards were unevenly distributed in previous index allocations, and the predominate parameter in the latest indexed shard allocation wa",
"doc_type":"usermanual",
"kw":"Why All New Index Shards Are Allocated to the Same Node?,Functions,User Guide",
@@ -3461,10 +4412,10 @@
"githuburl":""
},
{
- "uri":"css_02_0043.html",
- "node_id":"css_02_0043.xml",
+ "uri":"en-us_topic_0000001527697777.html",
+ "node_id":"en-us_topic_0000001527697777.xml",
"product_code":"css",
- "code":"168",
+ "code":"213",
"des":"The snapshot function has been enabled for the cluster and snapshot information has been configured.Log in to the CSS management console, and click Clusters in the naviga",
"doc_type":"usermanual",
"kw":"How Do I Query Snapshot Information?,Functions,User Guide",
@@ -3480,10 +4431,10 @@
"githuburl":""
},
{
- "uri":"css_02_0052.html",
- "node_id":"css_02_0052.xml",
+ "uri":"en-us_topic_0000001477137542.html",
+ "node_id":"en-us_topic_0000001477137542.xml",
"product_code":"css",
- "code":"169",
+ "code":"214",
"des":"A cluster cannot be directly upgraded. You can purchase a cluster of a later version and migrate your data to it.Creating a Cluster: Create a cluster of a later version i",
"doc_type":"usermanual",
"kw":"Can I Upgrade a Cluster from an Earlier Version to a Later Version?,Functions,User Guide",
@@ -3499,10 +4450,10 @@
"githuburl":""
},
{
- "uri":"css_02_0120.html",
- "node_id":"css_02_0120.xml",
+ "uri":"en-us_topic_0000001476817914.html",
+ "node_id":"en-us_topic_0000001476817914.xml",
"product_code":"css",
- "code":"170",
+ "code":"215",
"des":"Yes. You can use a snapshot stored in OBS to restore a cluster. A deleted cluster that has no snapshots in OBS cannot be restored. Exercise caution when deleting a cluste",
"doc_type":"usermanual",
"kw":"Can I Restore a Deleted Cluster?,Functions,User Guide",
@@ -3518,10 +4469,10 @@
"githuburl":""
},
{
- "uri":"css_02_0101.html",
- "node_id":"css_02_0101.xml",
+ "uri":"en-us_topic_0000001527777437.html",
+ "node_id":"en-us_topic_0000001527777437.xml",
"product_code":"css",
- "code":"171",
+ "code":"216",
"des":"You can modify TLS algorithms in CSS 7.6.2 and later versions.Log in to the CSS management console.In the navigation pane, choose Clusters. The cluster list is displayed.",
"doc_type":"usermanual",
"kw":"Can I Modify the TLS Algorithm of an Elasticsearch Cluster?,Functions,User Guide",
@@ -3537,10 +4488,10 @@
"githuburl":""
},
{
- "uri":"css_02_0102.html",
- "node_id":"css_02_0102.xml",
+ "uri":"en-us_topic_0000001476817910.html",
+ "node_id":"en-us_topic_0000001476817910.xml",
"product_code":"css",
- "code":"172",
+ "code":"217",
"des":"If the query results on shards exceed the upper limit of records that can be returned (default value: 10000), you need to increase the limit by changing the value of sear",
"doc_type":"usermanual",
"kw":"How Do I Set the search.max_buckets Parameter for an ES Cluster?,Functions,User Guide",
@@ -3556,10 +4507,10 @@
"githuburl":""
},
{
- "uri":"css_02_0127.html",
- "node_id":"css_02_0127.xml",
+ "uri":"en-us_topic_0000001477137526.html",
+ "node_id":"en-us_topic_0000001477137526.xml",
"product_code":"css",
- "code":"173",
+ "code":"218",
"des":"If the value of node.roles of a client node is i, then is this client node an injest node?Are there coordinating only nodes in clusters? Are the client requests distribut",
"doc_type":"usermanual",
"kw":"Does the Value i of node.roles Indicate an Injest Node?,Functions,User Guide",
@@ -3575,10 +4526,10 @@
"githuburl":""
},
{
- "uri":"css_02_0132.html",
- "node_id":"css_02_0132.xml",
+ "uri":"en-us_topic_0000001528097313.html",
+ "node_id":"en-us_topic_0000001528097313.xml",
"product_code":"css",
- "code":"174",
+ "code":"219",
"des":"In Elasticsearch 7.x and later versions, types cannot be created for indexes.If you need to use types, add include_type_name=true to the command. For example:After the co",
"doc_type":"usermanual",
"kw":"How Do I Create a Type Under an Index in an Elasticsearch 7.x Cluster?,Functions,User Guide",
@@ -3594,10 +4545,10 @@
"githuburl":""
},
{
- "uri":"css_02_0063.html",
- "node_id":"css_02_0063.xml",
+ "uri":"en-us_topic_0000001527777433.html",
+ "node_id":"en-us_topic_0000001527777433.xml",
"product_code":"css",
- "code":"175",
+ "code":"220",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Clusters in Security Mode",
@@ -3613,10 +4564,10 @@
"githuburl":""
},
{
- "uri":"css_02_0064.html",
- "node_id":"css_02_0064.xml",
+ "uri":"en-us_topic_0000001477137538.html",
+ "node_id":"en-us_topic_0000001477137538.xml",
"product_code":"css",
- "code":"176",
+ "code":"221",
"des":"Non-security mode: no restrictions.Cluster in security mode: The Filebeat OSS version must match the cluster version. For details on how to download the Filebeat OSS vers",
"doc_type":"usermanual",
"kw":"What Is the Relationship Between the Filebeat Version and Cluster Version?,Clusters in Security Mode",
@@ -3632,10 +4583,10 @@
"githuburl":""
},
{
- "uri":"css_02_0106.html",
- "node_id":"css_02_0106.xml",
+ "uri":"en-us_topic_0000001476817894.html",
+ "node_id":"en-us_topic_0000001476817894.xml",
"product_code":"css",
- "code":"177",
+ "code":"222",
"des":"The security certificate (CloudSearchService.cer) can be downloaded only for security clusters that have enabled HTTPS access.Log in to the CSS management console.In the ",
"doc_type":"usermanual",
"kw":"How Do I Obtain the Security Certificate of CSS?,Clusters in Security Mode,User Guide",
@@ -3651,10 +4602,10 @@
"githuburl":""
},
{
- "uri":"css_02_0128.html",
- "node_id":"css_02_0128.xml",
+ "uri":"en-us_topic_0000001476817906.html",
+ "node_id":"en-us_topic_0000001476817906.xml",
"product_code":"css",
- "code":"178",
+ "code":"223",
"des":"The security certificate (CloudSearchService.cer) can be downloaded only for security clusters that have enabled HTTPS access. Most software supports certificates in the ",
"doc_type":"usermanual",
"kw":"How Do I Convert the Format of a CER Security Certificate?,Clusters in Security Mode,User Guide",
@@ -3670,10 +4621,10 @@
"githuburl":""
},
{
- "uri":"css_02_0066.html",
- "node_id":"css_02_0066.xml",
+ "uri":"en-us_topic_0000001527697785.html",
+ "node_id":"en-us_topic_0000001527697785.xml",
"product_code":"css",
- "code":"179",
+ "code":"224",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Resource Usage and Change",
@@ -3689,10 +4640,10 @@
"githuburl":""
},
{
- "uri":"css_02_0067.html",
- "node_id":"css_02_0067.xml",
+ "uri":"en-us_topic_0000001528097289.html",
+ "node_id":"en-us_topic_0000001528097289.xml",
"product_code":"css",
- "code":"180",
+ "code":"225",
"des":"Run the following command to delete a single index data record.curl -XDELETE http://IP:9200/Index_nameIP: the IP address of any node in the clustercurl -XDELETE http://",
"doc_type":"usermanual",
"kw":"How Do I Clear Expired Data to Release Storage Space?,Resource Usage and Change,User Guide",
@@ -3708,10 +4659,10 @@
"githuburl":""
},
{
- "uri":"css_02_0068.html",
- "node_id":"css_02_0068.xml",
+ "uri":"en-us_topic_0000001527937333.html",
+ "node_id":"en-us_topic_0000001527937333.xml",
"product_code":"css",
- "code":"181",
+ "code":"226",
"des":"Run GET _cat/indices?v in Kibana to check the number of cluster replicas. If the value of rep is 1, the cluster has two replicas.If the value of rep is not 1, run the fol",
"doc_type":"usermanual",
"kw":"How Do I Configure a Two-Replica CSS Cluster?,Resource Usage and Change,User Guide",
@@ -3727,10 +4678,10 @@
"githuburl":""
},
{
- "uri":"css_02_0069.html",
- "node_id":"css_02_0069.xml",
+ "uri":"en-us_topic_0000001527777445.html",
+ "node_id":"en-us_topic_0000001527777445.xml",
"product_code":"css",
- "code":"182",
+ "code":"227",
"des":"Manually: Run the DELETE /my_index command in Kibana.Automatically: Create scheduled tasks to call the index deletion request and periodically execute the tasks. CSS supp",
"doc_type":"usermanual",
"kw":"How Do I Delete Index Data?,Resource Usage and Change,User Guide",
@@ -3746,10 +4697,10 @@
"githuburl":""
},
{
- "uri":"css_02_0089.html",
- "node_id":"css_02_0089.xml",
+ "uri":"en-us_topic_0000001476977562.html",
+ "node_id":"en-us_topic_0000001476977562.xml",
"product_code":"css",
- "code":"183",
+ "code":"228",
"des":"Once an index is created, the number of primary shards cannot be changed.You can run the following command in Kibana to change the number of replicas:index specifies the ",
"doc_type":"usermanual",
"kw":"Can I Change the Number of Shards to Four with Two Replicas When There Is One Shard Set in the JSON ",
@@ -3765,10 +4716,10 @@
"githuburl":""
},
{
- "uri":"css_02_0124.html",
- "node_id":"css_02_0124.xml",
+ "uri":"en-us_topic_0000001476977554.html",
+ "node_id":"en-us_topic_0000001476977554.xml",
"product_code":"css",
- "code":"184",
+ "code":"229",
"des":"A large number of shards in a cluster slows down shard creation.If automatic index creation is enabled, slow index creation may cause a large number of write requests to ",
"doc_type":"usermanual",
"kw":"What Are the Impacts If an Elasticsearch Cluster Has Too Many Shards?,Resource Usage and Change,User",
@@ -3784,10 +4735,10 @@
"githuburl":""
},
{
- "uri":"css_02_0125.html",
- "node_id":"css_02_0125.xml",
+ "uri":"en-us_topic_0000001527697781.html",
+ "node_id":"en-us_topic_0000001527697781.xml",
"product_code":"css",
- "code":"185",
+ "code":"230",
"des":"Method 1Open Kibana and run the following commands on the Dev Tools page:PUT _all/_settings?preserve_existing=true\n{\n\"index.max_result_window\" : \"10000000\"\n}Open Kibana a",
"doc_type":"usermanual",
"kw":"How Do I Set the Default Maximum Number of Records Displayed on a Page for an Elasticsearch Cluster,",
@@ -3803,10 +4754,10 @@
"githuburl":""
},
{
- "uri":"css_02_0126.html",
- "node_id":"css_02_0126.xml",
+ "uri":"en-us_topic_0000001527777425.html",
+ "node_id":"en-us_topic_0000001527777425.xml",
"product_code":"css",
- "code":"186",
+ "code":"231",
"des":"Running the delete_by_query command can only add a deletion mark to the target data instead of really deleting it. When you search for data, all data is searched and the ",
"doc_type":"usermanual",
"kw":"Why Does the Disk Usage Increase After the delete_by_query Command Was Executed to Delete Data?,Reso",
@@ -3822,10 +4773,10 @@
"githuburl":""
},
{
- "uri":"css_02_0130.html",
- "node_id":"css_02_0130.xml",
+ "uri":"en-us_topic_0000001528097317.html",
+ "node_id":"en-us_topic_0000001528097317.xml",
"product_code":"css",
- "code":"187",
+ "code":"232",
"des":"Clear the fielddataDuring aggregation and sorting, data are converted to the fielddata structure, which occupies a large amount of memory.Run the following commands on Ki",
"doc_type":"usermanual",
"kw":"How Do I Clear the Cache of a CSS Cluster?,Resource Usage and Change,User Guide",
@@ -3841,10 +4792,10 @@
"githuburl":""
},
{
- "uri":"css_02_0131.html",
- "node_id":"css_02_0131.xml",
+ "uri":"en-us_topic_0000001527777429.html",
+ "node_id":"en-us_topic_0000001527777429.xml",
"product_code":"css",
- "code":"188",
+ "code":"233",
"des":"The cluster monitoring result shows that the average memory usage of a cluster is 98%. Does it affect cluster performance?In an ES cluster, 50% of the memory is occupied ",
"doc_type":"usermanual",
"kw":"The Average Memory Usage of an Elasticsearch Cluster Reaches 98%,Resource Usage and Change,User Guid",
@@ -3860,10 +4811,10 @@
"githuburl":""
},
{
- "uri":"css_02_0070.html",
- "node_id":"css_02_0070.xml",
+ "uri":"en-us_topic_0000001527697789.html",
+ "node_id":"en-us_topic_0000001527697789.xml",
"product_code":"css",
- "code":"189",
+ "code":"234",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Components",
@@ -3879,10 +4830,10 @@
"githuburl":""
},
{
- "uri":"css_02_0088.html",
- "node_id":"css_02_0088.xml",
+ "uri":"en-us_topic_0000001476977550.html",
+ "node_id":"en-us_topic_0000001476977550.xml",
"product_code":"css",
- "code":"190",
+ "code":"235",
"des":"CSS does not currently support installation of Search Guard.CSS provides clusters in security mode, which have the same functions as Search Guard.",
"doc_type":"usermanual",
"kw":"Can I Install Search Guard on CSS?,Components,User Guide",
@@ -3898,10 +4849,10 @@
"githuburl":""
},
{
- "uri":"css_02_0073.html",
- "node_id":"css_02_0073.xml",
+ "uri":"en-us_topic_0000001527937345.html",
+ "node_id":"en-us_topic_0000001527937345.xml",
"product_code":"css",
- "code":"191",
+ "code":"236",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Kibana",
@@ -3917,10 +4868,10 @@
"githuburl":""
},
{
- "uri":"css_02_0098.html",
- "node_id":"css_02_0098.xml",
+ "uri":"en-us_topic_0000001527937329.html",
+ "node_id":"en-us_topic_0000001527937329.xml",
"product_code":"css",
- "code":"192",
+ "code":"237",
"des":"Exporting data from Kibana requires the SQL Workbench plugin. Currently, you can only export data from Kibana 7.6.2 or later.In SQL Workbench of Kibana, you can enter Ela",
"doc_type":"usermanual",
"kw":"Can I Export Data from Kibana?,Kibana,User Guide",
@@ -3936,10 +4887,10 @@
"githuburl":""
},
{
- "uri":"css_02_0099.html",
- "node_id":"css_02_0099.xml",
+ "uri":"en-us_topic_0000001527777441.html",
+ "node_id":"en-us_topic_0000001527777441.xml",
"product_code":"css",
- "code":"193",
+ "code":"238",
"des":"Run the following command to query index data through an API on Kibana:The returned data is shown in the following figure.took: How many milliseconds the query cost.time_",
"doc_type":"usermanual",
"kw":"How Do I Query Index Data on Kibana in an ES Cluster?,Kibana,User Guide",
@@ -3955,10 +4906,10 @@
"githuburl":""
},
{
- "uri":"css_02_0077.html",
- "node_id":"css_02_0077.xml",
+ "uri":"en-us_topic_0000001476817902.html",
+ "node_id":"en-us_topic_0000001476817902.xml",
"product_code":"css",
- "code":"194",
+ "code":"239",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Clusters",
@@ -3974,10 +4925,10 @@
"githuburl":""
},
{
- "uri":"css_02_0025.html",
- "node_id":"css_02_0025.xml",
+ "uri":"en-us_topic_0000001476977542.html",
+ "node_id":"en-us_topic_0000001476977542.xml",
"product_code":"css",
- "code":"195",
+ "code":"240",
"des":"Perform the following steps to troubleshoot this problem:Check whether the ECS instance and cluster are in the same VPC.If they are, go to 2.If they are not, create an EC",
"doc_type":"usermanual",
"kw":"Why Does My ECS Fail to Connect to a Cluster?,Clusters,User Guide",
@@ -3993,10 +4944,10 @@
"githuburl":""
},
{
- "uri":"css_02_0078.html",
- "node_id":"css_02_0078.xml",
+ "uri":"en-us_topic_0000001477297362.html",
+ "node_id":"en-us_topic_0000001477297362.xml",
"product_code":"css",
- "code":"196",
+ "code":"241",
"des":"No.",
"doc_type":"usermanual",
"kw":"Can a New Cluster Use the IP Address of the Old Cluster?,Clusters,User Guide",
@@ -4012,10 +4963,10 @@
"githuburl":""
},
{
- "uri":"css_02_0079.html",
- "node_id":"css_02_0079.xml",
+ "uri":"en-us_topic_0000001477297338.html",
+ "node_id":"en-us_topic_0000001477297338.xml",
"product_code":"css",
- "code":"197",
+ "code":"242",
"des":"No. To access a cluster from the Internet, see Public IP Address Access.",
"doc_type":"usermanual",
"kw":"Can I Associate My EIP If I Want to Access the Cluster from the Internet?,Clusters,User Guide",
@@ -4031,10 +4982,10 @@
"githuburl":""
},
{
- "uri":"css_02_0081.html",
- "node_id":"css_02_0081.xml",
+ "uri":"en-us_topic_0000001477297346.html",
+ "node_id":"en-us_topic_0000001477297346.xml",
"product_code":"css",
- "code":"198",
+ "code":"243",
"des":"No. Currently, CSS does not integrate the x-pack component.",
"doc_type":"usermanual",
"kw":"Can I Use x-pack-sql-jdbc to Access CSS Clusters and Query Data?,Clusters,User Guide",
@@ -4050,10 +5001,10 @@
"githuburl":""
},
{
- "uri":"css_02_0082.html",
- "node_id":"css_02_0082.xml",
+ "uri":"en-us_topic_0000001476817918.html",
+ "node_id":"en-us_topic_0000001476817918.xml",
"product_code":"css",
- "code":"199",
+ "code":"244",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Ports",
@@ -4069,10 +5020,10 @@
"githuburl":""
},
{
- "uri":"css_02_0083.html",
- "node_id":"css_02_0083.xml",
+ "uri":"en-us_topic_0000001527697793.html",
+ "node_id":"en-us_topic_0000001527697793.xml",
"product_code":"css",
- "code":"200",
+ "code":"245",
"des":"Yes. Port 9200 is used by external systems to access CSS clusters, and port 9300 is used for communication between nodes.The methods for accessing port 9300 are as follow",
"doc_type":"usermanual",
"kw":"Do Ports 9200 and 9300 Both Open?,Ports,User Guide",
@@ -4088,10 +5039,10 @@
"githuburl":""
},
{
- "uri":"css_02_0201.html",
- "node_id":"css_02_0201.xml",
+ "uri":"en-us_topic_0000001562137917.html",
+ "node_id":"en-us_topic_0000001562137917.xml",
"product_code":"css",
- "code":"201",
+ "code":"246",
"des":"Currently to access Kibana dashboard of CSS Service, a user has to login to OTC consoleand navigate to Kibana login page.To make the access convenient a user can utilize ",
"doc_type":"usermanual",
"kw":"How to access Kibana from outside cloud using ELB?,Ports,User Guide",
@@ -4107,10 +5058,10 @@
"githuburl":""
},
{
- "uri":"css_01_0055.html",
- "node_id":"css_01_0055.xml",
+ "uri":"en-us_topic_0000001477899216.html",
+ "node_id":"en-us_topic_0000001477899216.xml",
"product_code":"css",
- "code":"202",
+ "code":"247",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Change History,User Guide",
@@ -4118,10 +5069,10 @@
"metedata":[
{
"prodname":"css",
- "IsMulti":"No",
- "opensource":"true",
"documenttype":"usermanual",
- "IsBot":"Yes"
+ "IsMulti":"yes",
+ "opensource":"true",
+ "IsBot":"yes"
}
],
"title":"Change History",
diff --git a/docs/css/umn/CLASS.TXT.json b/docs/css/umn/CLASS.TXT.json
index 7a7bb5cd..42296a8a 100644
--- a/docs/css/umn/CLASS.TXT.json
+++ b/docs/css/umn/CLASS.TXT.json
@@ -2,8 +2,8 @@
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
- "title":"Overview",
- "uri":"css_01_0001.html",
+ "title":"Product Overview",
+ "uri":"en-us_topic_0000001921967557.html",
"doc_type":"usermanual",
"p_code":"",
"code":"1"
@@ -12,7 +12,7 @@
"desc":"Cloud Search Service (CSS) is a fully hosted distributed search service based on Elasticsearch. You can use it for structured and unstructured data search, and use AI vec",
"product_code":"css",
"title":"What Is Cloud Search Service?",
- "uri":"css_04_0001.html",
+ "uri":"en-us_topic_0000001667545170.html",
"doc_type":"usermanual",
"p_code":"1",
"code":"2"
@@ -21,7 +21,7 @@
"desc":"CSS has the following features and advantages.You can get insights from terabyte-scale data in milliseconds. In addition, you can use the visualized platform for data dis",
"product_code":"css",
"title":"Advantages",
- "uri":"css_04_0010.html",
+ "uri":"en-us_topic_0000001667704890.html",
"doc_type":"usermanual",
"p_code":"1",
"code":"3"
@@ -30,7 +30,7 @@
"desc":"CSS supports Kibana and Cerebro.Kibana is an open-source data analytics and visualization platform that works with Elasticsearch. You can use Kibana to search for and vie",
"product_code":"css",
"title":"Product Components",
- "uri":"css_04_0007.html",
+ "uri":"en-us_topic_0000001667704882.html",
"doc_type":"usermanual",
"p_code":"1",
"code":"4"
@@ -39,7 +39,7 @@
"desc":"CSS can be used to build search boxes for websites and apps to improve user experience. You can also build a log analysis platform with it, facilitating data-driven O&M a",
"product_code":"css",
"title":"Scenarios",
- "uri":"css_04_0002.html",
+ "uri":"en-us_topic_0000001715704493.html",
"doc_type":"usermanual",
"p_code":"1",
"code":"5"
@@ -48,7 +48,7 @@
"desc":"If you need to assign different permissions to employees in your organization to access your CSS resources, IAM is a good choice for fine-grained permissions management. ",
"product_code":"css",
"title":"Permissions Management",
- "uri":"css_04_0014.html",
+ "uri":"en-us_topic_0000001715624665.html",
"doc_type":"usermanual",
"p_code":"1",
"code":"6"
@@ -57,7 +57,7 @@
"desc":"The following table describes restrictions on clusters and nodes in CSS.You are advised to use the following browsers to access the CSS management console:Google Chrome 3",
"product_code":"css",
"title":"Constraints",
- "uri":"css_04_0005.html",
+ "uri":"en-us_topic_0000001715624677.html",
"doc_type":"usermanual",
"p_code":"1",
"code":"7"
@@ -66,7 +66,7 @@
"desc":"CSS uses the following resources:InstanceCPUMemory (GB)Disk quantityDisk size (GB)",
"product_code":"css",
"title":"Quotas",
- "uri":"css_04_0019.html",
+ "uri":"en-us_topic_0000001667545182.html",
"doc_type":"usermanual",
"p_code":"1",
"code":"8"
@@ -75,7 +75,7 @@
"desc":"Figure 1 shows the relationships between CSS and other services.",
"product_code":"css",
"title":"Related Services",
- "uri":"css_04_0004.html",
+ "uri":"en-us_topic_0000001715624661.html",
"doc_type":"usermanual",
"p_code":"1",
"code":"9"
@@ -84,7 +84,7 @@
"desc":"CSS provides functions on a per cluster basis. A cluster represents an independent search service that consists of multiple nodes.An index stores Elasticsearch data. It i",
"product_code":"css",
"title":"Basic Concepts",
- "uri":"css_04_0012.html",
+ "uri":"en-us_topic_0000001715624649.html",
"doc_type":"usermanual",
"p_code":"1",
"code":"10"
@@ -93,7 +93,7 @@
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
"title":"Getting Started",
- "uri":"css_01_0006.html",
+ "uri":"en-us_topic_0000001477739396.html",
"doc_type":"usermanual",
"p_code":"",
"code":"11"
@@ -102,7 +102,7 @@
"desc":"This section describes how to use Elasticsearch for product search. You can use the Elasticsearch search engine of CSS to search for data based on the scenario example. T",
"product_code":"css",
"title":"Getting Started with Elasticsearch",
- "uri":"css_01_0007.html",
+ "uri":"en-us_topic_0000001528379317.html",
"doc_type":"usermanual",
"p_code":"11",
"code":"12"
@@ -111,7 +111,7 @@
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
"title":"Permissions Management",
- "uri":"css_01_0070.html",
+ "uri":"en-us_topic_0000001477419768.html",
"doc_type":"usermanual",
"p_code":"",
"code":"13"
@@ -120,7 +120,7 @@
"desc":"This section describes how to use a group to grant permissions to a user. Figure 1 shows the process for granting permissions.CSS has two types of user permissions: CSS a",
"product_code":"css",
"title":"Creating a User and Granting Permissions",
- "uri":"css_01_0072.html",
+ "uri":"en-us_topic_0000001477419752.html",
"doc_type":"usermanual",
"p_code":"13",
"code":"14"
@@ -129,1692 +129,2097 @@
"desc":"Custom policies can be created to supplement the system-defined policies of CSS. For the actions supported for custom policies, see section \"Permissions Policies and Supp",
"product_code":"css",
"title":"CSS Custom Policies",
- "uri":"css_01_0086.html",
+ "uri":"en-us_topic_0000001477899148.html",
"doc_type":"usermanual",
"p_code":"13",
"code":"15"
},
{
- "desc":"On the Dashboard page of the CSS management console, you can view information about the status and storage capacity of existing clusters.",
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
- "title":"Viewing the Cluster Runtime Status and Storage Capacity Status",
- "uri":"css_01_0053.html",
+ "title":"Elasticsearch",
+ "uri":"en-us_topic_0000001504911882.html",
"doc_type":"usermanual",
"p_code":"",
"code":"16"
},
{
- "desc":"The cluster list displays all CSS clusters. If there are a large number of clusters, these clusters will be displayed on multiple pages. You can view clusters of all stat",
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
- "title":"Cluster List Overview",
- "uri":"css_01_0056.html",
+ "title":"Creating a Cluster",
+ "uri":"en-us_topic_0000001477899180.html",
"doc_type":"usermanual",
- "p_code":"",
+ "p_code":"16",
"code":"17"
},
{
- "desc":"To prevent data loss and minimize the cluster downtime in case of service interruption, CSS supports cross-AZ cluster deployment. When creating a cluster, you can select ",
+ "desc":"This section describes how to create an Elasticsearch cluster in security mode.Public IP address access and Kibana public access can be used only after security mode is e",
"product_code":"css",
- "title":"Deploying a Cross-AZ Cluster",
- "uri":"css_01_0188.html",
+ "title":"Creating a Cluster in Security Mode",
+ "uri":"en-us_topic_0000001477739344.html",
"doc_type":"usermanual",
- "p_code":"",
+ "p_code":"17",
"code":"18"
},
{
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "desc":"This section describes how to create an Elasticsearch cluster in non-security mode.Log in to the CSS management console.On the Dashboard page, click Create Cluster in the",
"product_code":"css",
- "title":"Elasticsearch",
- "uri":"css_01_0207.html",
+ "title":"Creating a Cluster in Non-Security Mode",
+ "uri":"en-us_topic_0000001477899172.html",
"doc_type":"usermanual",
- "p_code":"",
+ "p_code":"17",
"code":"19"
},
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Managing Elasticsearch Clusters",
- "uri":"css_01_0009.html",
- "doc_type":"usermanual",
- "p_code":"19",
- "code":"20"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Creating an Elasticsearch Cluster",
- "uri":"css_01_0008.html",
- "doc_type":"usermanual",
- "p_code":"20",
- "code":"21"
- },
{
"desc":"When creating an Elasticsearch cluster, you can enable the security mode for it. Identity authentication is required when users access a security cluster. You can also au",
"product_code":"css",
"title":"Clusters in Security Mode",
- "uri":"css_01_0189.html",
+ "uri":"en-us_topic_0000001528659093.html",
"doc_type":"usermanual",
- "p_code":"21",
+ "p_code":"17",
+ "code":"20"
+ },
+ {
+ "desc":"To prevent data loss and minimize the cluster downtime in case of service interruption, CSS supports cross-AZ cluster deployment. When creating a cluster, you can select ",
+ "product_code":"css",
+ "title":"Deploying a Cross-AZ Cluster",
+ "uri":"en-us_topic_0000001477419724.html",
+ "doc_type":"usermanual",
+ "p_code":"17",
+ "code":"21"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Importing Data",
+ "uri":"en-us_topic_0000001528379277.html",
+ "doc_type":"usermanual",
+ "p_code":"16",
"code":"22"
},
- {
- "desc":"This section describes how to create an Elasticsearch cluster in security mode.Public IP address access and Kibana public access can be used only after security mode is e",
- "product_code":"css",
- "title":"Creating an Elasticsearch Cluster in Security Mode",
- "uri":"css_01_0011.html",
- "doc_type":"usermanual",
- "p_code":"21",
- "code":"23"
- },
- {
- "desc":"This section describes how to create an Elasticsearch cluster in non-security mode.Log in to the CSS management console.On the Dashboard page, click Create Cluster in the",
- "product_code":"css",
- "title":"Creating an Elasticsearch Cluster in Non-Security Mode",
- "uri":"css_01_0094.html",
- "doc_type":"usermanual",
- "p_code":"21",
- "code":"24"
- },
- {
- "desc":"On the Cluster Information page, you can view the information about a cluster, including the private network address, public IP address, version, and node.Log in to the C",
- "product_code":"css",
- "title":"Viewing Basic Information About an Elasticsearch Cluster",
- "uri":"css_01_0185.html",
- "doc_type":"usermanual",
- "p_code":"20",
- "code":"25"
- },
- {
- "desc":"Tags are cluster identifiers. Adding tags to clusters can help you identify and manage your cluster resources.You can add tags to a cluster when creating the cluster or a",
- "product_code":"css",
- "title":"Managing Tags",
- "uri":"css_01_0075.html",
- "doc_type":"usermanual",
- "p_code":"20",
- "code":"26"
- },
- {
- "desc":"You can create enterprise projects based on your organizational structure. Then you can manage resources across different regions by enterprise project, add users and use",
- "product_code":"css",
- "title":"Binding an Enterprise Project",
- "uri":"css_01_0058.html",
- "doc_type":"usermanual",
- "p_code":"20",
- "code":"27"
- },
- {
- "desc":"If a cluster becomes faulty, you can restart it to check if it can run normally.The target cluster is not frozen and has no task in progress.If a cluster is available, en",
- "product_code":"css",
- "title":"Restarting a Cluster",
- "uri":"css_01_0014.html",
- "doc_type":"usermanual",
- "p_code":"20",
- "code":"28"
- },
- {
- "desc":"You can delete clusters that you no longer need.If you delete a cluster, the cluster service data will be cleared. Exercise caution when performing this operation.The sna",
- "product_code":"css",
- "title":"Deleting a Cluster",
- "uri":"css_01_0015.html",
- "doc_type":"usermanual",
- "p_code":"20",
- "code":"29"
- },
- {
- "desc":"In the Failed Tasks dialog box, you can view the failed tasks related to a cluster, such as failing to create, restart, scale out, back up, or restore a cluster. In addit",
- "product_code":"css",
- "title":"Managing Failed Tasks",
- "uri":"css_01_0060.html",
- "doc_type":"usermanual",
- "p_code":"20",
- "code":"30"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Accessing an Elasticsearch Cluster",
- "uri":"css_01_0210.html",
- "doc_type":"usermanual",
- "p_code":"19",
- "code":"31"
- },
- {
- "desc":"Elasticsearch clusters have built-in Kibana and Cerebro components. You can quickly access an Elasticsearch cluster through Kibana and Cerebro.Log in to the CSS managemen",
- "product_code":"css",
- "title":"Accessing an Elasticsearch Cluster",
- "uri":"css_01_0190.html",
- "doc_type":"usermanual",
- "p_code":"31",
- "code":"32"
- },
- {
- "desc":"You can access a security cluster (Elasticsearch clusters in version 6.5.4 or later support the security mode) that has the HTTPS access enabled through the public IP add",
- "product_code":"css",
- "title":"Accessing a Cluster from a Public Network",
- "uri":"css_01_0076.html",
- "doc_type":"usermanual",
- "p_code":"31",
- "code":"33"
- },
- {
- "desc":"If the VPC endpoint service is enabled, you can use a private domain name or node IP address generated by the endpoint to access the cluster. When the VPC endpoint servic",
- "product_code":"css",
- "title":"Accessing a Cluster Using a VPC Endpoint",
- "uri":"css_01_0082.html",
- "doc_type":"usermanual",
- "p_code":"31",
- "code":"34"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"(Optional) Interconnecting with a Dedicated Load Balancer",
- "uri":"css_01_0184.html",
- "doc_type":"usermanual",
- "p_code":"31",
- "code":"35"
- },
- {
- "desc":"CSS integrates shared load balancers and allows you to bind public network access and enable the VPC Endpoint service. Dedicated load balancers provide more functions and",
- "product_code":"css",
- "title":"Scenario",
- "uri":"css_01_0181.html",
- "doc_type":"usermanual",
- "p_code":"35",
- "code":"36"
- },
- {
- "desc":"This section describes how to connect a CSS cluster to a dedicated load balancer.If the target ELB listener uses the HTTP protocol, skip this step.Prepare and upload a se",
- "product_code":"css",
- "title":"Connecting to a Dedicated Load Balancer",
- "uri":"css_01_0182.html",
- "doc_type":"usermanual",
- "p_code":"35",
- "code":"37"
- },
- {
- "desc":"This section provides the sample code for two-way authentication during the access to a cluster from a Java client.",
- "product_code":"css",
- "title":"Sample Code for Two-Way Authentication During the Access to a Cluster",
- "uri":"css_01_0183.html",
- "doc_type":"usermanual",
- "p_code":"35",
- "code":"38"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Index Backup and Restoration",
- "uri":"css_01_0269.html",
- "doc_type":"usermanual",
- "p_code":"19",
- "code":"39"
- },
- {
- "desc":"You can back up index data in clusters. If data loss occurs or you want to retrieve data of a specified duration, you can restore the index data. Index backup is implemen",
- "product_code":"css",
- "title":"Backup and Restoration Overview",
- "uri":"css_01_0033.html",
- "doc_type":"usermanual",
- "p_code":"39",
- "code":"40"
- },
- {
- "desc":"Snapshots are automatically created at a specified time each day according to the rules you create. You can enable or disable the automatic snapshot creation function and",
- "product_code":"css",
- "title":"Managing Automatic Snapshot Creation",
- "uri":"css_01_0267.html",
- "doc_type":"usermanual",
- "p_code":"39",
- "code":"41"
- },
- {
- "desc":"You can manually create a snapshot at any time to back up all data or data of specified indexes.To use the function of creating or restoring snapshots, the account or IAM",
- "product_code":"css",
- "title":"Manually Creating a Snapshot",
- "uri":"css_01_0268.html",
- "doc_type":"usermanual",
- "p_code":"39",
- "code":"42"
- },
- {
- "desc":"You can use existing snapshots to restore the backup index data to a specified cluster.To use the function of creating or restoring snapshots, the account or IAM user log",
- "product_code":"css",
- "title":"Restoring Data",
- "uri":"css_01_0266.html",
- "doc_type":"usermanual",
- "p_code":"39",
- "code":"43"
- },
- {
- "desc":"If you no longer need a snapshot, delete it to release storage resources. If the automatic snapshot creation function is enabled, snapshots that are automatically created",
- "product_code":"css",
- "title":"Deleting a Snapshot",
- "uri":"css_01_0271.html",
- "doc_type":"usermanual",
- "p_code":"39",
- "code":"44"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Changing the Elasticsearch Cluster Form",
- "uri":"css_01_0149.html",
- "doc_type":"usermanual",
- "p_code":"19",
- "code":"45"
- },
- {
- "desc":"You can scale in or out a cluster and change cluster specifications. In this way, you can improve cluster efficiency and reduce O&M costs.Scaling Out a ClusterIf a data n",
- "product_code":"css",
- "title":"Overview",
- "uri":"css_01_0150.html",
- "doc_type":"usermanual",
- "p_code":"45",
- "code":"46"
- },
- {
- "desc":"If the workloads on the data plane of a cluster change, you can scale out the cluster by increasing the number or capacity of its nodes. Services are not interrupted duri",
- "product_code":"css",
- "title":"Scaling Out a Cluster",
- "uri":"css_01_0151.html",
- "doc_type":"usermanual",
- "p_code":"45",
- "code":"47"
- },
- {
- "desc":"If the workloads on the data plane of a cluster change, you can change its node specifications as needed.The target cluster is available and has no tasks in progress.The ",
- "product_code":"css",
- "title":"Changing Specifications",
- "uri":"css_01_0152.html",
- "doc_type":"usermanual",
- "p_code":"45",
- "code":"48"
- },
- {
- "desc":"If a cluster can process existing data without fully using its resources, you can scale in the cluster to reduce costs. Services are not interrupted during cluster scale-",
- "product_code":"css",
- "title":"Scaling in a Cluster",
- "uri":"css_01_0153.html",
- "doc_type":"usermanual",
- "p_code":"45",
- "code":"49"
- },
- {
- "desc":"If a cluster can process existing data without fully using its nodes, you can remove one or more specified nodes from the cluster to reduce costs. Services will not be in",
- "product_code":"css",
- "title":"Removing Specified Nodes",
- "uri":"css_01_0154.html",
- "doc_type":"usermanual",
- "p_code":"45",
- "code":"50"
- },
- {
- "desc":"If a node in the cluster is faulty, you can create a new node with the same specifications to replace it.The target cluster is available and has no tasks in progress.Only",
- "product_code":"css",
- "title":"Replacing a Specified Node",
- "uri":"css_01_0156.html",
- "doc_type":"usermanual",
- "p_code":"45",
- "code":"51"
- },
- {
- "desc":"If workloads on the data plane of a cluster increase, you can add master or client nodes as needed. Services are not interrupted while they are added.The target cluster i",
- "product_code":"css",
- "title":"Adding Master/Client Nodes",
- "uri":"css_01_0157.html",
- "doc_type":"usermanual",
- "p_code":"45",
- "code":"52"
- },
- {
- "desc":"After a cluster is created, its security mode can be changed using the following methods:Switching from the Non-Security Mode to Security ModeSwitching from the Security ",
- "product_code":"css",
- "title":"Changing the Security Mode",
- "uri":"css_01_0158.html",
- "doc_type":"usermanual",
- "p_code":"45",
- "code":"53"
- },
- {
- "desc":"CSS supports cross-AZ deployment. You can add an AZ to obtain more resources or improve cluster availability, and can migrate your current AZ to one with higher specifica",
- "product_code":"css",
- "title":"Changing AZs",
- "uri":"css_01_0201.html",
- "doc_type":"usermanual",
- "p_code":"45",
- "code":"54"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Configuring an Elasticsearch Cluster",
- "uri":"css_01_0211.html",
- "doc_type":"usermanual",
- "p_code":"19",
- "code":"55"
- },
- {
- "desc":"You can modify the elasticsearch.yml file.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, click the name of the target ",
- "product_code":"css",
- "title":"Configuring YML Parameters",
- "uri":"css_01_0080.html",
- "doc_type":"usermanual",
- "p_code":"55",
- "code":"56"
- },
- {
- "desc":"CSS provides you with cold data nodes. You can store data that requires query response in seconds on high-performance nodes and store data that requires query response in",
- "product_code":"css",
- "title":"Hot and Cold Data Node Switchover",
- "uri":"css_01_0079.html",
- "doc_type":"usermanual",
- "p_code":"55",
- "code":"57"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Managing Indexes",
- "uri":"css_01_0091.html",
- "doc_type":"usermanual",
- "p_code":"55",
- "code":"58"
- },
- {
- "desc":"Clusters of version 7.6.2 or later support index status management. ISM is a plugin that allows you to automate periodic and administrative operations based on changes on",
- "product_code":"css",
- "title":"Creating and Managing Indexes",
- "uri":"css_01_0093.html",
- "doc_type":"usermanual",
- "p_code":"58",
- "code":"59"
- },
- {
- "desc":"You can change any managed index policy. ISM has constraints to ensure that policy changes do not break indexes.If an index is stuck in its current status, never proceedi",
- "product_code":"css",
- "title":"Changing Policies",
- "uri":"css_01_0092.html",
- "doc_type":"usermanual",
- "p_code":"58",
- "code":"60"
- },
- {
- "desc":"CSS provides log backup and search functions to help you locate faults. You can back up cluster logs to OBS buckets and download required log files to analyze and locate ",
- "product_code":"css",
- "title":"Managing Logs",
- "uri":"css_01_0077.html",
- "doc_type":"usermanual",
- "p_code":"19",
- "code":"61"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Managing Plugins",
- "uri":"css_01_0212.html",
- "doc_type":"usermanual",
- "p_code":"19",
- "code":"62"
- },
- {
- "desc":"CSS clusters have default plugins. You can view the default plugin information on the console or Kibana.Log in to the CSS management console.In the navigation pane, choos",
- "product_code":"css",
- "title":"Viewing the Default Plugin List",
- "uri":"css_01_0078.html",
- "doc_type":"usermanual",
- "p_code":"62",
- "code":"63"
- },
- {
- "desc":"For Elasticsearch 6.5.4 and later versions, Open Distro for Elasticsearch SQL lets you write queries in SQL rather than in the Elasticsearch query domain-specific languag",
- "product_code":"css",
- "title":"Using the Open Distro SQL Plugin",
- "uri":"css_01_0061.html",
- "doc_type":"usermanual",
- "p_code":"62",
- "code":"64"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Kibana Platform",
- "uri":"css_01_0107.html",
- "doc_type":"usermanual",
- "p_code":"19",
- "code":"65"
- },
- {
- "desc":"You can customize the username, role name, and tenant name in Kibana.",
- "product_code":"css",
- "title":"Kibana Usage Restrictions",
- "uri":"css_01_0125.html",
- "doc_type":"usermanual",
- "p_code":"65",
- "code":"66"
- },
- {
- "desc":"After creating a CSS cluster, you can log in to Kibana through the console or public network.Logging in to the consoleLog in to the CSS management console.On the Clusters",
- "product_code":"css",
- "title":"Logging In to Kibana",
- "uri":"css_01_0108.html",
- "doc_type":"usermanual",
- "p_code":"65",
- "code":"67"
- },
- {
- "desc":"For CSS clusters that have security mode enabled, you can enable Kibana public access. After the configuration is complete, an IP address will be provided to access Kiban",
- "product_code":"css",
- "title":"Accessing a Cluster from a Kibana Public Network",
- "uri":"css_01_0088.html",
- "doc_type":"usermanual",
- "p_code":"65",
- "code":"68"
- },
- {
- "desc":"CSS uses the opendistro_security plug-in to provide security cluster capabilities. The opendistro_security plug-in is built based on the RBAC model. RBAC involves three c",
- "product_code":"css",
- "title":"Creating a User and Granting Permissions by Using Kibana",
- "uri":"css_01_0109.html",
- "doc_type":"usermanual",
- "p_code":"65",
- "code":"69"
- },
- {
- "desc":"To interconnect user-built Kibana with CSS Elasticsearch clusters, the following conditions must be met:The local environment must support access from external networks.K",
- "product_code":"css",
- "title":"Connecting User-Built Kibana to an Elasticsearch Cluster",
- "uri":"css_02_0097.html",
- "doc_type":"usermanual",
- "p_code":"65",
- "code":"70"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Enhanced Cluster Features",
- "uri":"css_01_0111.html",
- "doc_type":"usermanual",
- "p_code":"19",
- "code":"71"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Vector Retrieval",
- "uri":"css_01_0117.html",
- "doc_type":"usermanual",
- "p_code":"71",
- "code":"72"
- },
- {
- "desc":"Image recognition and retrieval, video search, and personalized recommendation impose high requirements on the latency and accuracy of high-dimensional space vector retri",
- "product_code":"css",
- "title":"Description",
- "uri":"css_01_0118.html",
- "doc_type":"usermanual",
- "p_code":"72",
- "code":"73"
- },
- {
- "desc":"Off-heap memory is used for index construction and query in vector retrieval. Therefore, the required cluster capacity is related to the index type and off-heap memory si",
- "product_code":"css",
- "title":"Cluster Planning for Vector Retrieval",
- "uri":"css_01_0122.html",
- "doc_type":"usermanual",
- "p_code":"72",
- "code":"74"
- },
- {
- "desc":"A cluster of version 7.6.2 or 7.10.2 has been created by referring to Cluster Planning for Vector Retrieval.Cluster advanced settings have been configured as required by ",
- "product_code":"css",
- "title":"Creating a Vector Index",
- "uri":"css_01_0121.html",
- "doc_type":"usermanual",
- "p_code":"72",
- "code":"75"
- },
- {
- "desc":"Standard vector query syntax is provided for vector fields with vector indexes. The following command will return n (specified by size/topk) data records that are most cl",
- "product_code":"css",
- "title":"Querying Vectors",
- "uri":"css_01_0123.html",
- "doc_type":"usermanual",
- "p_code":"72",
- "code":"76"
- },
- {
- "desc":"To reduce the cost of backup, disable the backup function before data import and enable it afterwards.Set refresh_interval to 120s or a larger value. Larger segments can ",
- "product_code":"css",
- "title":"Optimizing the Performance of Vector Retrieval",
- "uri":"css_01_0126.html",
- "doc_type":"usermanual",
- "p_code":"72",
- "code":"77"
- },
- {
- "desc":"When you perform operations in Creating a Vector Index, if IVF_GRAPH and IVF_GRAPH_PQ index algorithms are selected, you need to pre-build and register the center point v",
- "product_code":"css",
- "title":"(Optional) Pre-Building and Registering a Center Point Vector",
- "uri":"css_01_0124.html",
- "doc_type":"usermanual",
- "p_code":"72",
- "code":"78"
- },
- {
- "desc":"The vector retrieval engine is developed in C++ and uses off-heap memory. You can use the following APIs to manage the index cache.View cache statistics.GET /_vector/stat",
- "product_code":"css",
- "title":"Managing the Vector Index Cache",
- "uri":"css_01_0130.html",
- "doc_type":"usermanual",
- "p_code":"72",
- "code":"79"
- },
- {
- "desc":"Elasticsearch provides standard REST APIs and clients developed using Java, Python, and Go.Based on the open-source dataset SIFT1M (http://corpus-texmex.irisa.fr/) and Py",
- "product_code":"css",
- "title":"Sample Code for Vector Search on a Client",
- "uri":"css_01_0129.html",
- "doc_type":"usermanual",
- "p_code":"72",
- "code":"80"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Storage-Compute Decoupling",
- "uri":"css_01_0112.html",
- "doc_type":"usermanual",
- "p_code":"71",
- "code":"81"
- },
- {
- "desc":"You can store hot data on SSD to achieve the optimal query performance, and store historical data in OBS to reduce data storage costs.A large volume of data is written to",
- "product_code":"css",
- "title":"Context",
- "uri":"css_01_0113.html",
- "doc_type":"usermanual",
- "p_code":"81",
- "code":"82"
- },
- {
- "desc":"Before freezing an index, ensure no data is being written to it. The index will be set to read only before being frozen, and data write will fail.After an index is frozen",
- "product_code":"css",
- "title":"Freezing an Index",
- "uri":"css_01_0114.html",
- "doc_type":"usermanual",
- "p_code":"81",
- "code":"83"
- },
- {
- "desc":"After data is dumped to OBS, some data is cached to reduce access to OBS and improve Elasticsearch query performance. Data that is requested for the first time is obtaine",
- "product_code":"css",
- "title":"Configuring Cache",
- "uri":"css_01_0116.html",
- "doc_type":"usermanual",
- "p_code":"81",
- "code":"84"
- },
- {
- "desc":"When you query data on the Discover page of Kibana for the first time, all data needs to be obtained from OBS because there is no cache. If a large number of documents ar",
- "product_code":"css",
- "title":"Enhanced Cold Data Query Performance",
- "uri":"css_01_0187.html",
- "doc_type":"usermanual",
- "p_code":"81",
- "code":"85"
- },
- {
- "desc":"To clearly display the operations of the storage and compute decoupling plugin in OBS, the real-time OBS rate metric is added to CSS and recorded in the system index.This",
- "product_code":"css",
- "title":"Monitoring OBS Operations",
- "uri":"css_01_0202.html",
- "doc_type":"usermanual",
- "p_code":"81",
- "code":"86"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Enhanced Import Performance",
- "uri":"css_01_0227.html",
- "doc_type":"usermanual",
- "p_code":"71",
- "code":"87"
- },
- {
- "desc":"CSS provides enhanced data import function. It optimizes bulk route, and speeds up processing through indexes and word segmentation, improving import performance and redu",
- "product_code":"css",
- "title":"Context",
- "uri":"css_01_0228.html",
- "doc_type":"usermanual",
- "p_code":"87",
- "code":"88"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Instructions",
- "uri":"css_01_0229.html",
- "doc_type":"usermanual",
- "p_code":"87",
- "code":"89"
- },
- {
- "desc":"According to the default routing rule of Elasticsearch, data in a bulk request is routed to different shards. When massive data is written and a large number of index sha",
- "product_code":"css",
- "title":"Bulk Route Optimization",
- "uri":"css_01_0230.html",
- "doc_type":"usermanual",
- "p_code":"89",
- "code":"90"
- },
- {
- "desc":"You can specify the index.aggr_perf_batch_size configuration item to enable or disable batch import optimization. After the batch import function is enabled, documents in",
- "product_code":"css",
- "title":"Bulk Aggregation Optimization",
- "uri":"css_01_0231.html",
- "doc_type":"usermanual",
- "p_code":"89",
- "code":"91"
- },
- {
- "desc":"You can configure index.native_speed_up to enable or disable text index acceleration. This function optimizes the index process and memory usage to accelerate index build",
- "product_code":"css",
- "title":"Text Index Acceleration",
- "uri":"css_01_0232.html",
- "doc_type":"usermanual",
- "p_code":"89",
- "code":"92"
- },
- {
- "desc":"After the import performance is enhanced, the number of index merge tasks increases accordingly. You can adjust the following configuration to reduce the impact of merge ",
- "product_code":"css",
- "title":"Optimization of Other Parameters",
- "uri":"css_01_0233_0.html",
- "doc_type":"usermanual",
- "p_code":"89",
- "code":"93"
- },
- {
- "desc":"Test environmentCluster: 3 M6 ECSs (8 vCPUs | 64 GB memory)Data: open-source web server access logs and internal service dataset (dns_logs)Configuration: 120 shards, no r",
- "product_code":"css",
- "title":"Performance Data",
- "uri":"css_01_0234.html",
- "doc_type":"usermanual",
- "p_code":"87",
- "code":"94"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Flow Control",
- "uri":"css_01_0200.html",
- "doc_type":"usermanual",
- "p_code":"71",
- "code":"95"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Flow Control 2.0",
- "uri":"css_01_0191.html",
- "doc_type":"usermanual",
- "p_code":"95",
- "code":"96"
- },
- {
- "desc":"CSS can control traffic at the node level. You can configure the blacklist and whitelist, the maximum concurrent HTTP connections, and the maximum HTTP connections for a ",
- "product_code":"css",
- "title":"Context",
- "uri":"css_01_0192.html",
- "doc_type":"usermanual",
- "p_code":"96",
- "code":"97"
- },
- {
- "desc":"You can run commands in Kibana to enable or disable HTTP/HTTPS flow control for your cluster. The command parameters are as follows.Log in to the CSS management console.C",
- "product_code":"css",
- "title":"HTTP/HTTPS Flow Control",
- "uri":"css_01_0193.html",
- "doc_type":"usermanual",
- "p_code":"96",
- "code":"98"
- },
- {
- "desc":"Elasticsearch provides a circuit breaker, which will terminate requests or return the error code 429 if the memory usage exceeds its threshold. However, the circuit break",
- "product_code":"css",
- "title":"Memory Flow Control",
- "uri":"css_01_0194.html",
- "doc_type":"usermanual",
- "p_code":"96",
- "code":"99"
- },
- {
- "desc":"Request sampling can record the access of client IP addresses and the type of requests from the client. Based on the statistics, you can identify the access traffic of cl",
- "product_code":"css",
- "title":"Request Sampling",
- "uri":"css_01_0195.html",
- "doc_type":"usermanual",
- "p_code":"96",
- "code":"100"
- },
- {
- "desc":"You can block all connections in one click, except the connections that passes through O&M APIs, to handle unexpected traffic burst and quickly recover your cluster.Log i",
- "product_code":"css",
- "title":"One-click Traffic Blocking",
- "uri":"css_01_0196.html",
- "doc_type":"usermanual",
- "p_code":"96",
- "code":"101"
- },
- {
- "desc":"Flow control can be implemented via an independent API.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, locate the targe",
- "product_code":"css",
- "title":"Access Statistics and Traffic Control Information Query",
- "uri":"css_01_0198.html",
- "doc_type":"usermanual",
- "p_code":"96",
- "code":"102"
- },
- {
- "desc":"You can check access logs in either of the following ways:Enable and check access logs via an independent API. Configure the API parameters to record the access log time ",
- "product_code":"css",
- "title":"Temporary Access Statistics Logs",
- "uri":"css_01_0199.html",
- "doc_type":"usermanual",
- "p_code":"96",
- "code":"103"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Flow Control 1.0",
- "uri":"css_01_0139.html",
- "doc_type":"usermanual",
- "p_code":"95",
- "code":"104"
- },
- {
- "desc":"CSS can control traffic at the node level. You can configure the blacklist and whitelist, the maximum concurrent HTTP connections, and the maximum HTTP connections for a ",
- "product_code":"css",
- "title":"Context",
- "uri":"css_01_0140.html",
- "doc_type":"usermanual",
- "p_code":"104",
- "code":"105"
- },
- {
- "desc":"You can run commands in Kibana to enable or disable HTTP/HTTPS flow control for your cluster. The command parameters are as follows.Log in to the CSS management console.C",
- "product_code":"css",
- "title":"HTTP/HTTPS Flow Control",
- "uri":"css_01_0141.html",
- "doc_type":"usermanual",
- "p_code":"104",
- "code":"106"
- },
- {
- "desc":"Elasticsearch provides a circuit breaker, which will terminate requests if the memory usage exceeds its threshold. However, Elasticsearch does not check the heap memory u",
- "product_code":"css",
- "title":"Memory Flow Control",
- "uri":"css_01_0142.html",
- "doc_type":"usermanual",
- "p_code":"104",
- "code":"107"
- },
- {
- "desc":"The following table describes the global path whitelist parameters for flow control.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Cl",
- "product_code":"css",
- "title":"Global Path Whitelist for Flow Control",
- "uri":"css_01_0143.html",
- "doc_type":"usermanual",
- "p_code":"104",
- "code":"108"
- },
- {
- "desc":"Request sampling can record the access IP addresses, the number of accessed nodes, request paths, request URLs, and request bodies, which can be used to obtain the IP add",
- "product_code":"css",
- "title":"Request Sampling",
- "uri":"css_01_0144.html",
- "doc_type":"usermanual",
- "p_code":"104",
- "code":"109"
- },
- {
- "desc":"Flow control can be implemented via an independent API.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, locate the targe",
- "product_code":"css",
- "title":"Flow Control",
- "uri":"css_01_0145.html",
- "doc_type":"usermanual",
- "p_code":"104",
- "code":"110"
- },
- {
- "desc":"You can check access logs in either of the following ways:Enable and check access logs via an independent API. Configure the API parameters to record the access log time ",
- "product_code":"css",
- "title":"Access Logs",
- "uri":"css_01_0146.html",
- "doc_type":"usermanual",
- "p_code":"104",
- "code":"111"
- },
- {
- "desc":"CPU flow control can be implemented based on the CPU usage of a node.You can configure the CPU usage threshold of a node to prevent the node from breaking down due to hea",
- "product_code":"css",
- "title":"CPU Flow Control",
- "uri":"css_01_0147.html",
- "doc_type":"usermanual",
- "p_code":"104",
- "code":"112"
- },
- {
- "desc":"You can block all traffic in one click, except the traffic that passes through O&M APIs, to handle unexpected traffic burst and quickly recover your cluster.Log in to the",
- "product_code":"css",
- "title":"One-click Traffic Blocking",
- "uri":"css_01_0148.html",
- "doc_type":"usermanual",
- "p_code":"104",
- "code":"113"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Large Query Isolation",
- "uri":"css_01_0131.html",
- "doc_type":"usermanual",
- "p_code":"71",
- "code":"114"
- },
- {
- "desc":"The large query isolation feature allows you to separately manage large queries. You can isolate query requests that consume a large amount of memory or take a long perio",
- "product_code":"css",
- "title":"Context",
- "uri":"css_01_0132.html",
- "doc_type":"usermanual",
- "p_code":"114",
- "code":"115"
- },
- {
- "desc":"The large query isolation and global timeout features are disabled by default. If you enable them, the configuration will take effect immediately. Perform the following s",
- "product_code":"css",
- "title":"Procedure",
- "uri":"css_01_0133.html",
- "doc_type":"usermanual",
- "p_code":"114",
- "code":"116"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Index Monitoring",
- "uri":"css_01_0134.html",
- "doc_type":"usermanual",
- "p_code":"71",
- "code":"117"
- },
- {
- "desc":"CSS monitors various metrics of the running status and change trend of cluster indexes to measure service usage and handle potential risks in a timely manner, ensuring th",
- "product_code":"css",
- "title":"Context",
- "uri":"css_01_0135.html",
- "doc_type":"usermanual",
- "p_code":"117",
- "code":"118"
- },
- {
- "desc":"Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation colu",
- "product_code":"css",
- "title":"Enabling Index Monitoring",
- "uri":"css_01_0136.html",
- "doc_type":"usermanual",
- "p_code":"117",
- "code":"119"
- },
- {
- "desc":"You can call an API to query the index read and write traffic within a period of time.A cluster has been created and index monitoring has been enabled.Log in to the CSS m",
- "product_code":"css",
- "title":"Checking the Index Read and Write Traffic",
- "uri":"css_01_0137.html",
- "doc_type":"usermanual",
- "p_code":"117",
- "code":"120"
- },
- {
- "desc":"You can check preconfigured index monitoring visualizations on the Dashboard and Visualizations pages of Kibana. You can also customize tables and charts.A cluster has be",
- "product_code":"css",
- "title":"Checking Index Monitoring Information",
- "uri":"css_01_0138.html",
- "doc_type":"usermanual",
- "p_code":"117",
- "code":"121"
- },
- {
- "desc":"The configuration file content of kibana-monitor is as follows. You are advised to save the file as monitoring-kibana.ndjson.",
- "product_code":"css",
- "title":"kibana-monitor",
- "uri":"css_01_0197.html",
- "doc_type":"usermanual",
- "p_code":"117",
- "code":"122"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Enhanced Cluster Monitoring",
- "uri":"css_01_0177.html",
- "doc_type":"usermanual",
- "p_code":"71",
- "code":"123"
- },
- {
- "desc":"The Elasticsearch community only discusses how to monitor the average latency of search requests, which cannot reflect the actual search performance of a cluster. To enha",
- "product_code":"css",
- "title":"P99 Latency Monitoring",
- "uri":"css_01_0178.html",
- "doc_type":"usermanual",
- "p_code":"123",
- "code":"124"
- },
- {
- "desc":"When an external system accesses Elasticsearch through the HTTP protocol, a response and the corresponding status code are returned. The open-source Elasticsearch server ",
- "product_code":"css",
- "title":"HTTP Status Code Monitoring",
- "uri":"css_01_0179.html",
- "doc_type":"usermanual",
- "p_code":"123",
- "code":"125"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Enhanced Aggregation",
- "uri":"css_01_0172.html",
- "doc_type":"usermanual",
- "p_code":"71",
- "code":"126"
- },
- {
- "desc":"The enhanced aggregation is an optimization feature for service awareness. With this feature, you can optimize the aggregation analysis capability of observable services.",
- "product_code":"css",
- "title":"Features",
- "uri":"css_01_0173.html",
- "doc_type":"usermanual",
- "p_code":"126",
- "code":"127"
- },
- {
- "desc":"Low-cardinality fields have high data clustering performance when being sorted, which facilitates vectorized optimization. Assume that the following query statement exist",
- "product_code":"css",
- "title":"Grouping and Aggregation of Low-cardinality Fields",
- "uri":"css_01_0174.html",
- "doc_type":"usermanual",
- "p_code":"126",
- "code":"128"
- },
- {
- "desc":"High-cardinality fields are usually used for histogram grouping and aggregation instead of single-point grouping and aggregation. For example, collecting the statistics o",
- "product_code":"css",
- "title":"High-cardinality Field Histogram Aggregation",
- "uri":"css_01_0175.html",
- "doc_type":"usermanual",
- "p_code":"126",
- "code":"129"
- },
- {
- "desc":"In the scenario where low-cardinality and high-cardinality fields are mixed, assume that the following query statement exists:Group the low-cardinality fields and create ",
- "product_code":"css",
- "title":"Low-cardinality and High-cardinality Field Mixing",
- "uri":"css_01_0176.html",
- "doc_type":"usermanual",
- "p_code":"126",
- "code":"130"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Read/Write Splitting",
- "uri":"css_01_0161.html",
- "doc_type":"usermanual",
- "p_code":"71",
- "code":"131"
- },
- {
- "desc":"CSS supports read/write splitting. Data written to the primary cluster (Leader) can be automatically synchronized to the secondary cluster (Follower). In this way, data i",
- "product_code":"css",
- "title":"Features",
- "uri":"css_01_0162.html",
- "doc_type":"usermanual",
- "p_code":"131",
- "code":"132"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Instructions",
- "uri":"css_01_0164.html",
- "doc_type":"usermanual",
- "p_code":"131",
- "code":"133"
- },
- {
- "desc":"Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation col",
- "product_code":"css",
- "title":"Basic Settings",
- "uri":"css_01_0165.html",
- "doc_type":"usermanual",
- "p_code":"133",
- "code":"134"
- },
- {
- "desc":"Synchronize a single index.The request URL and request body parameters are as follows:After the synchronization function is enabled, indexes in the secondary cluster beco",
- "product_code":"css",
- "title":"Synchronizing Specified Indexes",
- "uri":"css_01_0166.html",
- "doc_type":"usermanual",
- "p_code":"133",
- "code":"135"
- },
- {
- "desc":"The request URL and request body parameters are as follows:The following are two examples:1. Synchronize a single index from the primary cluster to the secondary cluster.",
- "product_code":"css",
- "title":"Matching Index Synchronization",
- "uri":"css_01_0167.html",
- "doc_type":"usermanual",
- "p_code":"133",
- "code":"136"
- },
- {
- "desc":"You can specify multiple indexes or use wildcard to match the target indexes and terminate their synchronization tasks. Subsequent modifications to the indexes in the pri",
- "product_code":"css",
- "title":"Stopping Index Synchronization",
- "uri":"css_01_0168.html",
- "doc_type":"usermanual",
- "p_code":"133",
- "code":"137"
- },
- {
- "desc":"Querying the created patterns.This API is used to query the pattern list and query a specified pattern by name.An example request is as follows:GET auto_sync/pattern\nGET ",
- "product_code":"css",
- "title":"Other Management APIs",
- "uri":"css_01_0169.html",
- "doc_type":"usermanual",
- "p_code":"133",
- "code":"138"
- },
- {
- "desc":"This section describes how to switch from the primary cluster to the secondary cluster when the primary cluster is faulty.1. If the synchronization of specified indexes h",
- "product_code":"css",
- "title":"Best Practices",
- "uri":"css_01_0170.html",
- "doc_type":"usermanual",
- "p_code":"131",
- "code":"139"
- },
- {
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
- "product_code":"css",
- "title":"Importing Data to Elasticsearch",
- "uri":"css_01_0045.html",
- "doc_type":"usermanual",
- "p_code":"",
- "code":"140"
- },
{
"desc":"You can use Logstash to collect data and migrate collected data to Elasticsearch in CSS. This method helps you effectively obtain and manage data through Elasticsearch. D",
"product_code":"css",
"title":"Using Logstash to Import Data to Elasticsearch",
- "uri":"css_01_0048.html",
+ "uri":"en-us_topic_0000001528499137.html",
"doc_type":"usermanual",
- "p_code":"140",
- "code":"141"
+ "p_code":"22",
+ "code":"23"
},
{
"desc":"You can import data in various formats, such as JSON, to Elasticsearch in CSS by using Kibana or APIs.Before importing data, ensure that you can use Kibana to access the ",
"product_code":"css",
"title":"Using Kibana or APIs to Import Data to Elasticsearch",
- "uri":"css_01_0024.html",
+ "uri":"en-us_topic_0000001477899200.html",
"doc_type":"usermanual",
- "p_code":"140",
+ "p_code":"22",
+ "code":"24"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Accessing an Elasticsearch Cluster",
+ "uri":"en-us_topic_0000001555591537.html",
+ "doc_type":"usermanual",
+ "p_code":"16",
+ "code":"25"
+ },
+ {
+ "desc":"Elasticsearch clusters have built-in Kibana and Cerebro components. You can quickly access an Elasticsearch cluster through Kibana and Cerebro.Log in to the CSS managemen",
+ "product_code":"css",
+ "title":"Accessing an Elasticsearch Cluster",
+ "uri":"en-us_topic_0000001528379305.html",
+ "doc_type":"usermanual",
+ "p_code":"25",
+ "code":"26"
+ },
+ {
+ "desc":"You can access a security cluster (Elasticsearch clusters in version 6.5.4 or later support the security mode) that has the HTTPS access enabled through the public IP add",
+ "product_code":"css",
+ "title":"Accessing a Cluster from a Public Network",
+ "uri":"en-us_topic_0000001528299629.html",
+ "doc_type":"usermanual",
+ "p_code":"25",
+ "code":"27"
+ },
+ {
+ "desc":"If the VPC endpoint service is enabled, you can use a private domain name or node IP address generated by the endpoint to access the cluster. When the VPC endpoint servic",
+ "product_code":"css",
+ "title":"Accessing a Cluster Using a VPC Endpoint",
+ "uri":"en-us_topic_0000001477579412.html",
+ "doc_type":"usermanual",
+ "p_code":"25",
+ "code":"28"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"(Optional) Interconnecting with a Dedicated Load Balancer",
+ "uri":"en-us_topic_0000001528379297.html",
+ "doc_type":"usermanual",
+ "p_code":"25",
+ "code":"29"
+ },
+ {
+ "desc":"CSS integrates shared load balancers and allows you to bind public network access and enable the VPC Endpoint service. Dedicated load balancers provide more functions and",
+ "product_code":"css",
+ "title":"Scenario",
+ "uri":"en-us_topic_0000001477739400.html",
+ "doc_type":"usermanual",
+ "p_code":"29",
+ "code":"30"
+ },
+ {
+ "desc":"This section describes how to connect a CSS cluster to a dedicated load balancer.If the target ELB listener uses the HTTP protocol, skip this step.Prepare and upload a se",
+ "product_code":"css",
+ "title":"Connecting to a Dedicated Load Balancer",
+ "uri":"en-us_topic_0000001477739348.html",
+ "doc_type":"usermanual",
+ "p_code":"29",
+ "code":"31"
+ },
+ {
+ "desc":"This section provides the sample code for two-way authentication during the access to a cluster from a Java client.",
+ "product_code":"css",
+ "title":"Sample Code for Two-Way Authentication During the Access to a Cluster",
+ "uri":"en-us_topic_0000001477419788.html",
+ "doc_type":"usermanual",
+ "p_code":"29",
+ "code":"32"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Index Backup and Restoration",
+ "uri":"en-us_topic_0000001633221741.html",
+ "doc_type":"usermanual",
+ "p_code":"16",
+ "code":"33"
+ },
+ {
+ "desc":"You can back up index data in clusters. If data loss occurs or you want to retrieve data of a specified duration, you can restore the index data. Index backup is implemen",
+ "product_code":"css",
+ "title":"Backup and Restoration Overview",
+ "uri":"en-us_topic_0000001633303977.html",
+ "doc_type":"usermanual",
+ "p_code":"33",
+ "code":"34"
+ },
+ {
+ "desc":"Snapshots are automatically created at a specified time according to the rules you create. You can enable or disable the automatic snapshot creation function and set the ",
+ "product_code":"css",
+ "title":"Managing Automatic Snapshot Creation",
+ "uri":"en-us_topic_0000001583460750.html",
+ "doc_type":"usermanual",
+ "p_code":"33",
+ "code":"35"
+ },
+ {
+ "desc":"You can manually create a snapshot at any time to back up all data or data of specified indexes.To use the function of creating or restoring snapshots, the account or IAM",
+ "product_code":"css",
+ "title":"Manually Creating a Snapshot",
+ "uri":"en-us_topic_0000001633220693.html",
+ "doc_type":"usermanual",
+ "p_code":"33",
+ "code":"36"
+ },
+ {
+ "desc":"You can use existing snapshots to restore the backup index data to a specified cluster.To use the function of creating or restoring snapshots, the account or IAM user log",
+ "product_code":"css",
+ "title":"Restoring Data",
+ "uri":"en-us_topic_0000001583300810.html",
+ "doc_type":"usermanual",
+ "p_code":"33",
+ "code":"37"
+ },
+ {
+ "desc":"If you no longer need a snapshot, delete it to release storage resources. If the automatic snapshot creation function is enabled, snapshots that are automatically created",
+ "product_code":"css",
+ "title":"Deleting a Snapshot",
+ "uri":"en-us_topic_0000001583146906.html",
+ "doc_type":"usermanual",
+ "p_code":"33",
+ "code":"38"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Cluster Specification Modification",
+ "uri":"en-us_topic_0000001477739404.html",
+ "doc_type":"usermanual",
+ "p_code":"16",
+ "code":"39"
+ },
+ {
+ "desc":"You can scale in or out a cluster and change cluster specifications. In this way, you can improve cluster efficiency and reduce O&M costs.Scaling Out a ClusterIf a data n",
+ "product_code":"css",
+ "title":"Overview",
+ "uri":"en-us_topic_0000001528379253.html",
+ "doc_type":"usermanual",
+ "p_code":"39",
+ "code":"40"
+ },
+ {
+ "desc":"If the workloads on the data plane of a cluster change, you can scale out the cluster by increasing the number or capacity of its nodes. Services are not interrupted duri",
+ "product_code":"css",
+ "title":"Scaling Out a Cluster",
+ "uri":"en-us_topic_0000001477899164.html",
+ "doc_type":"usermanual",
+ "p_code":"39",
+ "code":"41"
+ },
+ {
+ "desc":"If the workloads on the data plane of a cluster change, you can change its node specifications as needed.The target cluster is available and has no tasks in progress.The ",
+ "product_code":"css",
+ "title":"Changing Specifications",
+ "uri":"en-us_topic_0000001477739368.html",
+ "doc_type":"usermanual",
+ "p_code":"39",
+ "code":"42"
+ },
+ {
+ "desc":"If a cluster can process existing data without fully using its resources, you can scale in the cluster to reduce costs. You are advised to scale in clusters during off-pe",
+ "product_code":"css",
+ "title":"Scaling in a Cluster",
+ "uri":"en-us_topic_0000001528299597.html",
+ "doc_type":"usermanual",
+ "p_code":"39",
+ "code":"43"
+ },
+ {
+ "desc":"If a cluster can process existing data without fully using its nodes, you can remove one or more specified nodes from the cluster to reduce costs. Services will not be in",
+ "product_code":"css",
+ "title":"Removing Specified Nodes",
+ "uri":"en-us_topic_0000001477899184.html",
+ "doc_type":"usermanual",
+ "p_code":"39",
+ "code":"44"
+ },
+ {
+ "desc":"If a node in the cluster is faulty, you can create a new node with the same specifications to replace it. During the replacement of a specified node, data of that node wi",
+ "product_code":"css",
+ "title":"Replacing a Specified Node",
+ "uri":"en-us_topic_0000001477579404.html",
+ "doc_type":"usermanual",
+ "p_code":"39",
+ "code":"45"
+ },
+ {
+ "desc":"If workloads on the data plane of a cluster increase, you can add master or client nodes as needed. Services are not interrupted while they are added.The target cluster i",
+ "product_code":"css",
+ "title":"Adding Master/Client Nodes",
+ "uri":"en-us_topic_0000001477899188.html",
+ "doc_type":"usermanual",
+ "p_code":"39",
+ "code":"46"
+ },
+ {
+ "desc":"After a cluster is created, its security mode can be changed using the following methods:Switching from the Non-Security Mode to Security ModeSwitching from the Security ",
+ "product_code":"css",
+ "title":"Changing the Security Mode",
+ "uri":"en-us_topic_0000001528379285.html",
+ "doc_type":"usermanual",
+ "p_code":"39",
+ "code":"47"
+ },
+ {
+ "desc":"CSS supports cross-AZ deployment. You can add an AZ to obtain more resources or improve cluster availability, and can migrate your current AZ to one with higher specifica",
+ "product_code":"css",
+ "title":"Changing AZs",
+ "uri":"en-us_topic_0000001528299585.html",
+ "doc_type":"usermanual",
+ "p_code":"39",
+ "code":"48"
+ },
+ {
+ "desc":"Same-version upgrade, cross-engine upgrade, and cross-version upgrade are supported. Same-version upgrade is to upgrade the kernel patch of a cluster to fix problems or o",
+ "product_code":"css",
+ "title":"Upgrading the Cluster Version",
+ "uri":"en-us_topic_0000001528659153.html",
+ "doc_type":"usermanual",
+ "p_code":"16",
+ "code":"49"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Cluster Management",
+ "uri":"en-us_topic_0000001477579340.html",
+ "doc_type":"usermanual",
+ "p_code":"16",
+ "code":"50"
+ },
+ {
+ "desc":"The cluster list displays all CSS clusters. If there are a large number of clusters, these clusters will be displayed on multiple pages. You can view clusters of all stat",
+ "product_code":"css",
+ "title":"Cluster List Overview",
+ "uri":"en-us_topic_0000001528299613.html",
+ "doc_type":"usermanual",
+ "p_code":"50",
+ "code":"51"
+ },
+ {
+ "desc":"On the Cluster Information page, you can view the information about a cluster, including the private network address, public IP address, version, and node.Log in to the C",
+ "product_code":"css",
+ "title":"Viewing Basic Cluster Information",
+ "uri":"en-us_topic_0000001528499201.html",
+ "doc_type":"usermanual",
+ "p_code":"50",
+ "code":"52"
+ },
+ {
+ "desc":"Tags are cluster identifiers. Adding tags to clusters can help you identify and manage your cluster resources.You can add tags to a cluster when creating the cluster or a",
+ "product_code":"css",
+ "title":"Managing Tags",
+ "uri":"en-us_topic_0000001528659137.html",
+ "doc_type":"usermanual",
+ "p_code":"50",
+ "code":"53"
+ },
+ {
+ "desc":"CSS provides log backup and search functions to help you locate faults. You can back up cluster logs to OBS buckets and download required log files to analyze and locate ",
+ "product_code":"css",
+ "title":"Managing Logs",
+ "uri":"en-us_topic_0000001477739336.html",
+ "doc_type":"usermanual",
+ "p_code":"50",
+ "code":"54"
+ },
+ {
+ "desc":"You can modify the elasticsearch.yml file.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, click the name of the target ",
+ "product_code":"css",
+ "title":"Configuring YML Parameters",
+ "uri":"en-us_topic_0000001528299601.html",
+ "doc_type":"usermanual",
+ "p_code":"50",
+ "code":"55"
+ },
+ {
+ "desc":"CSS clusters have default plugins. You can view the default plugin information on the console or Kibana.Log in to the CSS management console.In the navigation pane, choos",
+ "product_code":"css",
+ "title":"Viewing the Default Plugin List",
+ "uri":"en-us_topic_0000001477739388.html",
+ "doc_type":"usermanual",
+ "p_code":"50",
+ "code":"56"
+ },
+ {
+ "desc":"You can create enterprise projects based on your organizational structure. Then you can manage resources across different regions by enterprise project, add users and use",
+ "product_code":"css",
+ "title":"Binding an Enterprise Project",
+ "uri":"en-us_topic_0000001528299621.html",
+ "doc_type":"usermanual",
+ "p_code":"50",
+ "code":"57"
+ },
+ {
+ "desc":"If a cluster becomes faulty, you can restart it to check if it can run normally.The target cluster is not frozen and has no task in progress.If a cluster is available, en",
+ "product_code":"css",
+ "title":"Restarting a Cluster",
+ "uri":"en-us_topic_0000001528499141.html",
+ "doc_type":"usermanual",
+ "p_code":"50",
+ "code":"58"
+ },
+ {
+ "desc":"You can delete clusters that you no longer need.If you delete a cluster, the cluster service data will be cleared. Exercise caution when performing this operation.The sna",
+ "product_code":"css",
+ "title":"Deleting a Cluster",
+ "uri":"en-us_topic_0000001477579396.html",
+ "doc_type":"usermanual",
+ "p_code":"50",
+ "code":"59"
+ },
+ {
+ "desc":"In the Failed Tasks dialog box, you can view the failed tasks related to a cluster, such as failing to create, restart, scale out, back up, or restore a cluster. In addit",
+ "product_code":"css",
+ "title":"Managing Failed Tasks",
+ "uri":"en-us_topic_0000001528299581.html",
+ "doc_type":"usermanual",
+ "p_code":"50",
+ "code":"60"
+ },
+ {
+ "desc":"For Elasticsearch 6.5.4 and later versions, Open Distro for Elasticsearch SQL lets you write queries in SQL rather than in the Elasticsearch query domain-specific languag",
+ "product_code":"css",
+ "title":"Using the Open Distro SQL Plug-in to Compile Queries",
+ "uri":"en-us_topic_0000001477899212.html",
+ "doc_type":"usermanual",
+ "p_code":"16",
+ "code":"61"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Using the Open Distro Alarm Plug-in to Configure SMN Alarms",
+ "uri":"en-us_topic_0000001866261281.html",
+ "doc_type":"usermanual",
+ "p_code":"16",
+ "code":"62"
+ },
+ {
+ "desc":"To use the OpenDistro alarm plugin (opendistro_alerting), authorize your Elasticsearch cluster to use SMN to send notifications. For details about how to use the OpenDist",
+ "product_code":"css",
+ "title":"(Optional) Authorizing CSS to Use SMN",
+ "uri":"en-us_topic_0000001564706853.html",
+ "doc_type":"usermanual",
+ "p_code":"62",
+ "code":"63"
+ },
+ {
+ "desc":"By default, the open-source OpenDistro alarm plugin (opendistro_alerting) is integrated into CSS to send notifications when data meets specific conditions. This plugin co",
+ "product_code":"css",
+ "title":"Configuring SMN Alarms",
+ "uri":"en-us_topic_0000001564906577.html",
+ "doc_type":"usermanual",
+ "p_code":"62",
+ "code":"64"
+ },
+ {
+ "desc":"CSS provides you with cold data nodes. You can store data that requires query response in seconds on high-performance nodes and store data that requires query response in",
+ "product_code":"css",
+ "title":"Switching Hot and Cold Data",
+ "uri":"en-us_topic_0000001528659081.html",
+ "doc_type":"usermanual",
+ "p_code":"16",
+ "code":"65"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Managing Indexes",
+ "uri":"en-us_topic_0000001477579380.html",
+ "doc_type":"usermanual",
+ "p_code":"16",
+ "code":"66"
+ },
+ {
+ "desc":"Clusters of version 7.6.2 or later support index status management. ISM is a plugin that allows you to automate periodic and administrative operations based on changes on",
+ "product_code":"css",
+ "title":"Creating and Managing Indexes",
+ "uri":"en-us_topic_0000001477739392.html",
+ "doc_type":"usermanual",
+ "p_code":"66",
+ "code":"67"
+ },
+ {
+ "desc":"You can change any managed index policy. ISM has constraints to ensure that policy changes do not break indexes.If an index is stuck in its current status, never proceedi",
+ "product_code":"css",
+ "title":"Changing Policies",
+ "uri":"en-us_topic_0000001528659085.html",
+ "doc_type":"usermanual",
+ "p_code":"66",
+ "code":"68"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Kibana Platform",
+ "uri":"en-us_topic_0000001528299625.html",
+ "doc_type":"usermanual",
+ "p_code":"16",
+ "code":"69"
+ },
+ {
+ "desc":"After creating a CSS cluster, you can log in to Kibana through the console or public network.You can customize the username, role name, and tenant name in Kibana.Logging ",
+ "product_code":"css",
+ "title":"Logging In to Kibana",
+ "uri":"en-us_topic_0000001477419728.html",
+ "doc_type":"usermanual",
+ "p_code":"69",
+ "code":"70"
+ },
+ {
+ "desc":"For CSS clusters that have security mode enabled, you can enable Kibana public access. After the configuration is complete, an IP address will be provided to access Kiban",
+ "product_code":"css",
+ "title":"Accessing a Cluster from a Kibana Public Network",
+ "uri":"en-us_topic_0000001477419764.html",
+ "doc_type":"usermanual",
+ "p_code":"69",
+ "code":"71"
+ },
+ {
+ "desc":"CSS uses the opendistro_security plug-in to provide security cluster capabilities. The opendistro_security plug-in is built based on the RBAC model. RBAC involves three c",
+ "product_code":"css",
+ "title":"Creating a User and Granting Permissions by Using Kibana",
+ "uri":"en-us_topic_0000001528379273.html",
+ "doc_type":"usermanual",
+ "p_code":"69",
+ "code":"72"
+ },
+ {
+ "desc":"To interconnect user-built Kibana with CSS Elasticsearch clusters, the following conditions must be met:The local environment must support access from external networks.K",
+ "product_code":"css",
+ "title":"Connecting User-Built Kibana to an Elasticsearch Cluster",
+ "uri":"en-us_topic_0000001477419776.html",
+ "doc_type":"usermanual",
+ "p_code":"69",
+ "code":"73"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"OpenSearch",
+ "uri":"en-us_topic_0000001633949601.html",
+ "doc_type":"usermanual",
+ "p_code":"",
+ "code":"74"
+ },
+ {
+ "desc":"This section describes how to create an OpenSearch cluster.Public IP address access and Kibana public access can be used only after security mode is enabled.When creating",
+ "product_code":"css",
+ "title":"Creating a Cluster",
+ "uri":"en-us_topic_0000001584149004.html",
+ "doc_type":"usermanual",
+ "p_code":"74",
+ "code":"75"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Accessing a Cluster",
+ "uri":"en-us_topic_0000001583669892.html",
+ "doc_type":"usermanual",
+ "p_code":"74",
+ "code":"76"
+ },
+ {
+ "desc":"OpenSearch clusters have built-in Kibana and Cerebro components. You can quickly access an OpenSearch cluster through Kibana and Cerebro.Log in to the CSS management cons",
+ "product_code":"css",
+ "title":"Quickly Accessing an OpenSearch Cluster",
+ "uri":"en-us_topic_0000001641003025.html",
+ "doc_type":"usermanual",
+ "p_code":"76",
+ "code":"77"
+ },
+ {
+ "desc":"You can access a security cluster that has the HTTPS access enabled through the public IP address provided by the system.By default, CSS uses a shared load balancer for p",
+ "product_code":"css",
+ "title":"Accessing a Cluster from a Public Network",
+ "uri":"en-us_topic_0000001590963076.html",
+ "doc_type":"usermanual",
+ "p_code":"76",
+ "code":"78"
+ },
+ {
+ "desc":"If the VPC endpoint service is enabled, you can use a private domain name or node IP address generated by the endpoint to access the cluster. When the VPC endpoint servic",
+ "product_code":"css",
+ "title":"Accessing a Cluster Using a VPC Endpoint",
+ "uri":"en-us_topic_0000001590323656.html",
+ "doc_type":"usermanual",
+ "p_code":"76",
+ "code":"79"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"(Optional) Interconnecting with a Dedicated Load Balancer",
+ "uri":"en-us_topic_0000001591285452.html",
+ "doc_type":"usermanual",
+ "p_code":"76",
+ "code":"80"
+ },
+ {
+ "desc":"CSS integrates shared load balancers and allows you to bind public network access and enable the VPC Endpoint service. Dedicated load balancers provide more functions and",
+ "product_code":"css",
+ "title":"Scenario Description",
+ "uri":"en-us_topic_0000001640645481.html",
+ "doc_type":"usermanual",
+ "p_code":"80",
+ "code":"81"
+ },
+ {
+ "desc":"This section describes how to connect a CSS cluster to a dedicated load balancer.If the target ELB listener uses the HTTP protocol, skip this step.Prepare and upload a se",
+ "product_code":"css",
+ "title":"Connecting to a Dedicated Load Balancer",
+ "uri":"en-us_topic_0000001640764229.html",
+ "doc_type":"usermanual",
+ "p_code":"80",
+ "code":"82"
+ },
+ {
+ "desc":"This section provides the sample code for two-way authentication during the access to a cluster from a Java client.",
+ "product_code":"css",
+ "title":"Sample Code for Two-Way Authentication During the Access to a Cluster",
+ "uri":"en-us_topic_0000001590603388.html",
+ "doc_type":"usermanual",
+ "p_code":"80",
+ "code":"83"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Index Backup and Restoration",
+ "uri":"en-us_topic_0000001640883633.html",
+ "doc_type":"usermanual",
+ "p_code":"74",
+ "code":"84"
+ },
+ {
+ "desc":"You can back up index data in clusters. If data loss occurs or you want to retrieve data of a specified duration, you can restore the index data. Index backup is implemen",
+ "product_code":"css",
+ "title":"Backup and Restoration Overview",
+ "uri":"en-us_topic_0000001641003029.html",
+ "doc_type":"usermanual",
+ "p_code":"84",
+ "code":"85"
+ },
+ {
+ "desc":"Snapshots are automatically created at a specified time each day according to the rules you create. You can enable or disable the automatic snapshot creation function and",
+ "product_code":"css",
+ "title":"Managing Automatic Snapshot Creation",
+ "uri":"en-us_topic_0000001590963080.html",
+ "doc_type":"usermanual",
+ "p_code":"84",
+ "code":"86"
+ },
+ {
+ "desc":"You can manually create a snapshot at any time to back up all data or data of specified indexes.To use the function of creating or restoring snapshots, the account or IAM",
+ "product_code":"css",
+ "title":"Manually Creating a Snapshot",
+ "uri":"en-us_topic_0000001590323664.html",
+ "doc_type":"usermanual",
+ "p_code":"84",
+ "code":"87"
+ },
+ {
+ "desc":"You can use existing snapshots to restore the backup index data to a specified cluster.To use the function of creating or restoring snapshots, the account or IAM user log",
+ "product_code":"css",
+ "title":"Restoring Data",
+ "uri":"en-us_topic_0000001591285456.html",
+ "doc_type":"usermanual",
+ "p_code":"84",
+ "code":"88"
+ },
+ {
+ "desc":"If you no longer need a snapshot, delete it to release storage resources. If the automatic snapshot creation function is enabled, snapshots that are automatically created",
+ "product_code":"css",
+ "title":"Deleting a Snapshot",
+ "uri":"en-us_topic_0000001640645485.html",
+ "doc_type":"usermanual",
+ "p_code":"84",
+ "code":"89"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Scaling In/Out a Cluster",
+ "uri":"en-us_topic_0000001637436445.html",
+ "doc_type":"usermanual",
+ "p_code":"74",
+ "code":"90"
+ },
+ {
+ "desc":"You can scale in or out a cluster and change cluster specifications. In this way, you can improve cluster efficiency and reduce O&M costs.Scaling Out a ClusterIf a data n",
+ "product_code":"css",
+ "title":"Overview",
+ "uri":"en-us_topic_0000001640773493.html",
+ "doc_type":"usermanual",
+ "p_code":"90",
+ "code":"91"
+ },
+ {
+ "desc":"If the workloads on the data plane of a cluster change, you can scale out the cluster by increasing the number or capacity of its nodes. Services are not interrupted duri",
+ "product_code":"css",
+ "title":"Scaling Out a Cluster",
+ "uri":"en-us_topic_0000001590332948.html",
+ "doc_type":"usermanual",
+ "p_code":"90",
+ "code":"92"
+ },
+ {
+ "desc":"If the workloads on the data plane of a cluster change, you can change its node specifications as needed.The target cluster is available and has no tasks in progress.The ",
+ "product_code":"css",
+ "title":"Changing Specifications",
+ "uri":"en-us_topic_0000001641012329.html",
+ "doc_type":"usermanual",
+ "p_code":"90",
+ "code":"93"
+ },
+ {
+ "desc":"If a cluster can process existing data without fully using its resources, you can scale in the cluster to reduce costs. You are advised to scale in clusters during off-pe",
+ "product_code":"css",
+ "title":"Scaling in a Cluster",
+ "uri":"en-us_topic_0000001590972372.html",
+ "doc_type":"usermanual",
+ "p_code":"90",
+ "code":"94"
+ },
+ {
+ "desc":"If a cluster can process existing data without fully using its nodes, you can remove one or more specified nodes from the cluster to reduce costs. Services will not be in",
+ "product_code":"css",
+ "title":"Removing Specified Nodes",
+ "uri":"en-us_topic_0000001590612676.html",
+ "doc_type":"usermanual",
+ "p_code":"90",
+ "code":"95"
+ },
+ {
+ "desc":"If a node in the cluster is faulty, you can create a new node with the same specifications to replace it.The target cluster is available and has no tasks in progress.Only",
+ "product_code":"css",
+ "title":"Replacing a Specified Node",
+ "uri":"en-us_topic_0000001640892937.html",
+ "doc_type":"usermanual",
+ "p_code":"90",
+ "code":"96"
+ },
+ {
+ "desc":"If workloads on the data plane of a cluster increase, you can add master or client nodes as needed. Services are not interrupted while they are added.The cluster is in th",
+ "product_code":"css",
+ "title":"Adding Master/Client Nodes",
+ "uri":"en-us_topic_0000001640654793.html",
+ "doc_type":"usermanual",
+ "p_code":"90",
+ "code":"97"
+ },
+ {
+ "desc":"After a cluster is created, its security mode can be changed in the following methods:Switching from the Non-Security Mode to Security ModeSwitching from the Security to ",
+ "product_code":"css",
+ "title":"Changing the Security Mode",
+ "uri":"en-us_topic_0000001591294758.html",
+ "doc_type":"usermanual",
+ "p_code":"90",
+ "code":"98"
+ },
+ {
+ "desc":"CSS supports cross-AZ deployment. You can add an AZ to obtain more resources or improve cluster availability, and can migrate your current AZ to one with higher specifica",
+ "product_code":"css",
+ "title":"Changing AZs",
+ "uri":"en-us_topic_0000001640773505.html",
+ "doc_type":"usermanual",
+ "p_code":"90",
+ "code":"99"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Managing Clusters",
+ "uri":"en-us_topic_0000001583989096.html",
+ "doc_type":"usermanual",
+ "p_code":"74",
+ "code":"100"
+ },
+ {
+ "desc":"On the basic information page of an Opensearch cluster, you can view the private network address, public network address, version, and node of the cluster.Log in to the C",
+ "product_code":"css",
+ "title":"Viewing Basic Information About an Opensearch Cluster",
+ "uri":"en-us_topic_0000001583669884.html",
+ "doc_type":"usermanual",
+ "p_code":"100",
+ "code":"101"
+ },
+ {
+ "desc":"Tags are cluster identifiers. Adding tags to clusters can help you identify and manage your cluster resources.You can add tags to a cluster when creating the cluster or a",
+ "product_code":"css",
+ "title":"Managing Tags",
+ "uri":"en-us_topic_0000001647464345.html",
+ "doc_type":"usermanual",
+ "p_code":"100",
+ "code":"102"
+ },
+ {
+ "desc":"CSS provides log backup and search functions to help you locate faults. You can back up cluster logs to OBS buckets and download required log files to analyze and locate ",
+ "product_code":"css",
+ "title":"Managing Logs",
+ "uri":"en-us_topic_0000001591456866.html",
+ "doc_type":"usermanual",
+ "p_code":"100",
+ "code":"103"
+ },
+ {
+ "desc":"You can modify the elasticsearch.yml file.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, click the name of the target ",
+ "product_code":"css",
+ "title":"Configuring YML Parameters",
+ "uri":"en-us_topic_0000001640777441.html",
+ "doc_type":"usermanual",
+ "p_code":"100",
+ "code":"104"
+ },
+ {
+ "desc":"CSS clusters have default plug-ins. You can view the default plugin information on the console or Kibana.Log in to the CSS management console.In the navigation pane, choo",
+ "product_code":"css",
+ "title":"Viewing the Default Plugin List",
+ "uri":"en-us_topic_0000001591616594.html",
+ "doc_type":"usermanual",
+ "p_code":"100",
+ "code":"105"
+ },
+ {
+ "desc":"If a cluster becomes faulty, you can restart it to check if it can run normally.The target cluster is not frozen and has no task in progress.If a cluster is available, en",
+ "product_code":"css",
+ "title":"Restarting a Cluster",
+ "uri":"en-us_topic_0000001640879293.html",
+ "doc_type":"usermanual",
+ "p_code":"100",
+ "code":"106"
+ },
+ {
+ "desc":"You can delete clusters that you no longer need.If you delete a cluster, the cluster service data will be cleared. Exercise caution when performing this operation.The sna",
+ "product_code":"css",
+ "title":"Deleting a Cluster",
+ "uri":"en-us_topic_0000001640998693.html",
+ "doc_type":"usermanual",
+ "p_code":"100",
+ "code":"107"
+ },
+ {
+ "desc":"By default, CSS has installed the open-source alert plugin opensearch-alerting for OpenSearch clusters to send notifications when data meets specific conditions. This plu",
+ "product_code":"css",
+ "title":"Configuring SMN Alarms",
+ "uri":"en-us_topic_0000001815107861.html",
+ "doc_type":"usermanual",
+ "p_code":"74",
+ "code":"108"
+ },
+ {
+ "desc":"CSS provides you with cold data nodes. You can store data that requires query response in seconds on hot data nodes with high performance and store historical data that r",
+ "product_code":"css",
+ "title":"Switching Hot and Cold Data",
+ "uri":"en-us_topic_0000001591776270.html",
+ "doc_type":"usermanual",
+ "p_code":"74",
+ "code":"109"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Managing Indexes",
+ "uri":"en-us_topic_0000001640658697.html",
+ "doc_type":"usermanual",
+ "p_code":"74",
+ "code":"110"
+ },
+ {
+ "desc":"You can manage the indexes of OpenSearch clusters. ISM is a plugin that allows you to automate periodic and administrative operations based on changes on the index age, i",
+ "product_code":"css",
+ "title":"Creating and Managing Index Policies",
+ "uri":"en-us_topic_0000001591298678.html",
+ "doc_type":"usermanual",
+ "p_code":"110",
+ "code":"111"
+ },
+ {
+ "desc":"You can change any managed index policy. ISM has constraints to ensure that policy changes do not break indexes.If an index is stuck in its current status and you want to",
+ "product_code":"css",
+ "title":"Changing an Index Policy",
+ "uri":"en-us_topic_0000001641016221.html",
+ "doc_type":"usermanual",
+ "p_code":"110",
+ "code":"112"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"OpenSearch Dashboards",
+ "uri":"en-us_topic_0000001587956758.html",
+ "doc_type":"usermanual",
+ "p_code":"74",
+ "code":"113"
+ },
+ {
+ "desc":"An OpenSearch cluster has been created.Logging in to the consoleLog in to the CSS management console.In the navigation pane, choose Clusters > OpenSearch.On the Clusters ",
+ "product_code":"css",
+ "title":"Logging In to the OpenSearch Dashboards",
+ "uri":"en-us_topic_0000001591776274.html",
+ "doc_type":"usermanual",
+ "p_code":"113",
+ "code":"114"
+ },
+ {
+ "desc":"For CSS clusters that have security mode enabled, you can enable Kibana public access. After the configuration is complete, an IP address will be provided to access Kiban",
+ "product_code":"css",
+ "title":"Accessing a Cluster from a Kibana Public Network",
+ "uri":"en-us_topic_0000001640658701.html",
+ "doc_type":"usermanual",
+ "p_code":"113",
+ "code":"115"
+ },
+ {
+ "desc":"The security mode has been enabled for the OpenSearch cluster.You can customize the username, role name, and tenant name in the OpenSearch Dashboards.Log in to the CSS ma",
+ "product_code":"css",
+ "title":"Creating and Authorizing a User on the OpenSearch Dashboards",
+ "uri":"en-us_topic_0000001591298682.html",
+ "doc_type":"usermanual",
+ "p_code":"113",
+ "code":"116"
+ },
+ {
+ "desc":"On the Dashboard page of the CSS management console, you can view information about the status and storage capacity of existing clusters.",
+ "product_code":"css",
+ "title":"Viewing the Cluster Runtime Status and Storage Capacity Status",
+ "uri":"en-us_topic_0000001477579368.html",
+ "doc_type":"usermanual",
+ "p_code":"",
+ "code":"117"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Enhanced Cluster Features",
+ "uri":"en-us_topic_0000001528659089.html",
+ "doc_type":"usermanual",
+ "p_code":"",
+ "code":"118"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Vector Retrieval",
+ "uri":"en-us_topic_0000001477899160.html",
+ "doc_type":"usermanual",
+ "p_code":"118",
+ "code":"119"
+ },
+ {
+ "desc":"Image recognition and retrieval, video search, and personalized recommendation impose high requirements on the latency and accuracy of high-dimensional space vector retri",
+ "product_code":"css",
+ "title":"Description",
+ "uri":"en-us_topic_0000001528299617.html",
+ "doc_type":"usermanual",
+ "p_code":"119",
+ "code":"120"
+ },
+ {
+ "desc":"Off-heap memory is used for index construction and query in vector retrieval. Therefore, the required cluster capacity is related to the index type and off-heap memory si",
+ "product_code":"css",
+ "title":"Cluster Planning for Vector Retrieval",
+ "uri":"en-us_topic_0000001477419716.html",
+ "doc_type":"usermanual",
+ "p_code":"119",
+ "code":"121"
+ },
+ {
+ "desc":"You have created a cluster by referring to Cluster Planning for Vector Retrieval. The cluster must be an Elasticsearch cluster of version 7.6.2 or 7.10.2, or an OpenSearc",
+ "product_code":"css",
+ "title":"Creating a Vector Index",
+ "uri":"en-us_topic_0000001528299557.html",
+ "doc_type":"usermanual",
+ "p_code":"119",
+ "code":"122"
+ },
+ {
+ "desc":"Standard vector query syntax is provided for vector fields with vector indexes. The following command will return n (specified by size/topk) data records that are most cl",
+ "product_code":"css",
+ "title":"Querying Vectors",
+ "uri":"en-us_topic_0000001477899192.html",
+ "doc_type":"usermanual",
+ "p_code":"119",
+ "code":"123"
+ },
+ {
+ "desc":"To reduce the cost of backup, disable the backup function before data import and enable it afterwards.Set refresh_interval to 120s or a larger value. Larger segments can ",
+ "product_code":"css",
+ "title":"Optimizing the Performance of Vector Retrieval",
+ "uri":"en-us_topic_0000001528659117.html",
+ "doc_type":"usermanual",
+ "p_code":"119",
+ "code":"124"
+ },
+ {
+ "desc":"When you perform operations in Creating a Vector Index, if IVF_GRAPH and IVF_GRAPH_PQ index algorithms are selected, you need to pre-build and register the center point v",
+ "product_code":"css",
+ "title":"(Optional) Pre-Building and Registering a Center Point Vector",
+ "uri":"en-us_topic_0000001528299609.html",
+ "doc_type":"usermanual",
+ "p_code":"119",
+ "code":"125"
+ },
+ {
+ "desc":"The vector retrieval engine is developed in C++ and uses off-heap memory. You can use the following APIs to manage the index cache.View cache statistics.GET /_vector/stat",
+ "product_code":"css",
+ "title":"Managing the Vector Index Cache",
+ "uri":"en-us_topic_0000001477899208.html",
+ "doc_type":"usermanual",
+ "p_code":"119",
+ "code":"126"
+ },
+ {
+ "desc":"Elasticsearch provides standard REST APIs and clients developed using Java, Python, and Go.Based on the open-source dataset SIFT1M (http://corpus-texmex.irisa.fr/) and Py",
+ "product_code":"css",
+ "title":"Sample Code for Vector Search on a Client",
+ "uri":"en-us_topic_0000001528499121.html",
+ "doc_type":"usermanual",
+ "p_code":"119",
+ "code":"127"
+ },
+ {
+ "desc":"PV_GRAPH deeply optimizes the HNSW algorithm and supports the vector and scalar joint filtering. When the vector and scalar joint filtering is used, the result filling ra",
+ "product_code":"css",
+ "title":"Using PV_GRAPH to Search for Vector Indexes",
+ "uri":"en-us_topic_0000001814230837.html",
+ "doc_type":"usermanual",
+ "p_code":"119",
+ "code":"128"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Storage-Compute Decoupling",
+ "uri":"en-us_topic_0000001528499153.html",
+ "doc_type":"usermanual",
+ "p_code":"118",
+ "code":"129"
+ },
+ {
+ "desc":"You can store hot data on SSD to achieve the optimal query performance, and store historical data in OBS to reduce data storage costs.A large volume of data is written to",
+ "product_code":"css",
+ "title":"Context",
+ "uri":"en-us_topic_0000001477739384.html",
+ "doc_type":"usermanual",
+ "p_code":"129",
+ "code":"130"
+ },
+ {
+ "desc":"Before freezing an index, ensure no data is being written to it. The index will be set to read only before being frozen, and data write will fail.After an index is frozen",
+ "product_code":"css",
+ "title":"Freezing an Index",
+ "uri":"en-us_topic_0000001528299569.html",
+ "doc_type":"usermanual",
+ "p_code":"129",
+ "code":"131"
+ },
+ {
+ "desc":"After data is dumped to OBS, some data is cached to reduce access to OBS and improve Elasticsearch query performance. Data that is requested for the first time is obtaine",
+ "product_code":"css",
+ "title":"Configuring Cache",
+ "uri":"en-us_topic_0000001528379309.html",
+ "doc_type":"usermanual",
+ "p_code":"129",
+ "code":"132"
+ },
+ {
+ "desc":"When you query data on the Discover page of Kibana for the first time, all data needs to be obtained from OBS because there is no cache. If a large number of documents ar",
+ "product_code":"css",
+ "title":"Enhanced Cold Data Query Performance",
+ "uri":"en-us_topic_0000001477739360.html",
+ "doc_type":"usermanual",
+ "p_code":"129",
+ "code":"133"
+ },
+ {
+ "desc":"To clearly display the operations of the storage and compute decoupling plugin in OBS, the real-time OBS rate metric is added to CSS and recorded in the system index.This",
+ "product_code":"css",
+ "title":"Monitoring OBS Operations",
+ "uri":"en-us_topic_0000001528379301.html",
+ "doc_type":"usermanual",
+ "p_code":"129",
+ "code":"134"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Enhanced Import Performance",
+ "uri":"en-us_topic_0000001533988876.html",
+ "doc_type":"usermanual",
+ "p_code":"118",
+ "code":"135"
+ },
+ {
+ "desc":"CSS provides enhanced data import function. It optimizes bulk route, and speeds up processing through indexes and word segmentation, improving import performance and redu",
+ "product_code":"css",
+ "title":"Context",
+ "uri":"en-us_topic_0000001533829376.html",
+ "doc_type":"usermanual",
+ "p_code":"135",
+ "code":"136"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Instructions",
+ "uri":"en-us_topic_0000001585148465.html",
+ "doc_type":"usermanual",
+ "p_code":"135",
+ "code":"137"
+ },
+ {
+ "desc":"According to the default routing rule of Elasticsearch, data in a bulk request is routed to different shards. When massive data is written and a large number of index sha",
+ "product_code":"css",
+ "title":"Bulk Route Optimization",
+ "uri":"en-us_topic_0000001534148568.html",
+ "doc_type":"usermanual",
+ "p_code":"137",
+ "code":"138"
+ },
+ {
+ "desc":"You can specify the index.aggr_perf_batch_size configuration item to enable or disable batch import optimization. After the batch import function is enabled, documents in",
+ "product_code":"css",
+ "title":"Bulk Aggregation Optimization",
+ "uri":"en-us_topic_0000001534308508.html",
+ "doc_type":"usermanual",
+ "p_code":"137",
+ "code":"139"
+ },
+ {
+ "desc":"You can configure index.native_speed_up to enable or disable text index acceleration. This function optimizes the index process and memory usage to accelerate index build",
+ "product_code":"css",
+ "title":"Text Index Acceleration",
+ "uri":"en-us_topic_0000001584828717.html",
+ "doc_type":"usermanual",
+ "p_code":"137",
+ "code":"140"
+ },
+ {
+ "desc":"After the import performance is enhanced, the number of index merge tasks increases accordingly. You can adjust the following configuration to reduce the impact of merge ",
+ "product_code":"css",
+ "title":"Optimization of Other Parameters",
+ "uri":"en-us_topic_0000001584988497.html",
+ "doc_type":"usermanual",
+ "p_code":"137",
+ "code":"141"
+ },
+ {
+ "desc":"Test environmentCluster: 3 Cloud M6 ECSs (8 vCPUs | 64 GB memory)Data: open-source web server access logs and internal service dataset (dns_logs)Configuration: 120 shards",
+ "product_code":"css",
+ "title":"Performance Data",
+ "uri":"en-us_topic_0000001584708761.html",
+ "doc_type":"usermanual",
+ "p_code":"135",
"code":"142"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
- "title":"Monitoring",
- "uri":"css_01_0041.html",
+ "title":"Flow Control 2.0",
+ "uri":"en-us_topic_0000001477899176.html",
"doc_type":"usermanual",
- "p_code":"",
+ "p_code":"118",
"code":"143"
},
{
- "desc":"You can use Cloud Eye to monitor cluster metrics of CSS in real time and quickly handle exceptions. For details about Cloud Eye, see the Cloud Eye User Guide.Table 1 list",
+ "desc":"CSS can control traffic at the node level. You can configure the blacklist and whitelist, the maximum concurrent HTTP connections, and the maximum HTTP connections for a ",
"product_code":"css",
- "title":"Monitoring Metrics of Elasticsearch Clusters",
- "uri":"css_01_0042.html",
+ "title":"Context",
+ "uri":"en-us_topic_0000001528379257.html",
"doc_type":"usermanual",
"p_code":"143",
"code":"144"
},
{
- "desc":"This topic describes CSS metrics that can be monitored by Cloud Eye as well as their namespaces and dimensions. You can search for the monitoring metrics and alarms gener",
+ "desc":"You can run commands in Kibana to enable or disable HTTP/HTTPS flow control for your cluster.Log in to the CSS management console.Choose Clusters in the navigation pane. ",
"product_code":"css",
- "title":"Monitoring Metrics",
- "uri":"css_01_0246.html",
+ "title":"HTTP/HTTPS Flow Control",
+ "uri":"en-us_topic_0000001477579372.html",
"doc_type":"usermanual",
"p_code":"143",
"code":"145"
},
{
- "desc":"You can use Cloud Eye to monitor the created clusters. After configuring the cluster monitoring, you can log in to the Cloud Eye management console to view cluster metric",
+ "desc":"Elasticsearch provides a circuit breaker, which will terminate requests or return the error code 429 if the memory usage exceeds its threshold. However, the circuit break",
"product_code":"css",
- "title":"Configuring Cluster Monitoring",
- "uri":"css_01_0155.html",
+ "title":"Memory Flow Control",
+ "uri":"en-us_topic_0000001477419736.html",
"doc_type":"usermanual",
"p_code":"143",
"code":"146"
},
{
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "desc":"Request sampling can record the access of client IP addresses and the type of requests from the client. Based on the statistics, you can identify the access traffic of cl",
"product_code":"css",
- "title":"Auditing",
- "uri":"css_01_0049.html",
+ "title":"Request Sampling",
+ "uri":"en-us_topic_0000001477739364.html",
"doc_type":"usermanual",
- "p_code":"",
+ "p_code":"143",
"code":"147"
},
{
- "desc":"Cloud Trace Service (CTS) is available on the public cloud platform. With CTS, you can record operations associated with CSS for later query, audit, and backtrack operati",
+ "desc":"You can block all connections in one click, except the connections that passes through O&M APIs, to handle unexpected traffic burst and quickly recover your cluster.Log i",
"product_code":"css",
- "title":"Key Operations Recorded by CTS",
- "uri":"css_01_0050.html",
+ "title":"One-click Traffic Blocking",
+ "uri":"en-us_topic_0000001477579364.html",
"doc_type":"usermanual",
- "p_code":"147",
+ "p_code":"143",
"code":"148"
},
{
- "desc":"After you enable CTS, it starts recording operations related to CSS. The CTS management console stores the last seven days of operation records. This section describes ho",
+ "desc":"Flow control can be implemented via an independent API.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, locate the targe",
"product_code":"css",
- "title":"Viewing Audit Logs",
- "uri":"css_01_0051.html",
+ "title":"Access Statistics and Traffic Control Information Query",
+ "uri":"en-us_topic_0000001477419720.html",
"doc_type":"usermanual",
- "p_code":"147",
+ "p_code":"143",
"code":"149"
},
{
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "desc":"You can check access logs in either of the following ways:Enable and check access logs via an independent API. Configure the API parameters to record the access log time ",
"product_code":"css",
- "title":"FAQs",
- "uri":"css_02_0001.html",
+ "title":"Temporary Access Statistics Logs",
+ "uri":"en-us_topic_0000001528659149.html",
"doc_type":"usermanual",
- "p_code":"",
+ "p_code":"143",
"code":"150"
},
{
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "desc":"The traffic control function can record cluster access logs and write the logs to background log files. You can back up the logs to OBS for viewing. You can run the follo",
"product_code":"css",
- "title":"General Consulting",
- "uri":"css_02_0051.html",
+ "title":"Recording Access Logs in Files",
+ "uri":"en-us_topic_0000001832788405.html",
"doc_type":"usermanual",
- "p_code":"150",
+ "p_code":"143",
"code":"151"
},
{
- "desc":"A region and availability zone (AZ) identify the location of a data center. You can create resources in a specific region and AZ.A region is a physical data center. Each ",
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
- "title":"What Are Regions and AZs?",
- "uri":"css_02_0034.html",
+ "title":"Flow Control 1.0",
+ "uri":"en-us_topic_0000001477899152.html",
"doc_type":"usermanual",
- "p_code":"151",
+ "p_code":"118",
"code":"152"
},
{
- "desc":"CSS uses network isolation, in addition to various host and data security measures.Network isolationThe entire network is divided into two planes: service plane and manag",
+ "desc":"CSS can control traffic at the node level. You can configure the blacklist and whitelist, the maximum concurrent HTTP connections, and the maximum HTTP connections for a ",
"product_code":"css",
- "title":"How Does CSS Ensure Data and Service Security?",
- "uri":"css_02_0006.html",
+ "title":"Context",
+ "uri":"en-us_topic_0000001477739408.html",
"doc_type":"usermanual",
- "p_code":"151",
+ "p_code":"152",
"code":"153"
},
{
- "desc":"Disk usage and cluster health status are two key metrics that you can focus on. You can log in to Cloud Eye and configure alarm rules for these metrics. If alarms are rep",
+ "desc":"You can run commands in Kibana to enable or disable HTTP/HTTPS flow control for your cluster. The command parameters are as follows.Log in to the CSS management console.C",
"product_code":"css",
- "title":"Which CSS Metrics Should I Focus On?",
- "uri":"css_02_0007.html",
+ "title":"HTTP/HTTPS Flow Control",
+ "uri":"en-us_topic_0000001477579352.html",
"doc_type":"usermanual",
- "p_code":"151",
+ "p_code":"152",
"code":"154"
},
{
- "desc":"CSS uses EVS and local disks to store your indices. During cluster creation, you can specify the EVS disk type and specifications (the EVS disk size).Supported EVS disk t",
+ "desc":"Elasticsearch provides a circuit breaker, which will terminate requests if the memory usage exceeds its threshold. However, Elasticsearch does not check the heap memory u",
"product_code":"css",
- "title":"What Storage Options Does CSS Provide?",
- "uri":"css_02_0008.html",
+ "title":"Memory Flow Control",
+ "uri":"en-us_topic_0000001528499157.html",
"doc_type":"usermanual",
- "p_code":"151",
+ "p_code":"152",
"code":"155"
},
{
- "desc":"You can configure up to 200 nodes for a cluster (each node corresponds to an ECS). The maximum storage capacity of an ECS is the total capacity of EVS disks attached to t",
+ "desc":"The following table describes the global path whitelist parameters for flow control.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Cl",
"product_code":"css",
- "title":"What Is the Maximum Storage Capacity of CSS?",
- "uri":"css_02_0009.html",
+ "title":"Global Path Whitelist for Flow Control",
+ "uri":"en-us_topic_0000001528499165.html",
"doc_type":"usermanual",
- "p_code":"151",
+ "p_code":"152",
"code":"156"
},
{
- "desc":"You can use any of the following three methods to manage CSS or to use search engine APIs. You can initiate requests based on constructed request messages.curlcurl is a c",
+ "desc":"Request sampling can record the access IP addresses, the number of accessed nodes, request paths, request URLs, and request bodies, which can be used to obtain the IP add",
"product_code":"css",
- "title":"How Can I Manage CSS?",
- "uri":"css_02_0017.html",
+ "title":"Request Sampling",
+ "uri":"en-us_topic_0000001477579356.html",
"doc_type":"usermanual",
- "p_code":"151",
+ "p_code":"152",
"code":"157"
},
{
- "desc":"You can store the following logs and files:Log files: Elasticsearch logsData files: Elasticsearch index filesOther files: cluster configuration filesOS: 5% storage space ",
+ "desc":"Flow control can be implemented via an independent API.Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, locate the targe",
"product_code":"css",
- "title":"What Can the Disk Space of a CSS Cluster Be Used For?",
- "uri":"css_02_0010.html",
+ "title":"Flow Control",
+ "uri":"en-us_topic_0000001528299577.html",
"doc_type":"usermanual",
- "p_code":"151",
+ "p_code":"152",
"code":"158"
},
{
- "desc":"Log in to the console.On the Clusters page, click Access Kibana in the Operation column of a cluster.Log in to Kibana and choose Dev Tools.On the Console page, run the GE",
+ "desc":"You can check access logs in either of the following ways:Enable and check access logs via an independent API. Configure the API parameters to record the access log time ",
"product_code":"css",
- "title":"How Do I Check the Numbers of Shards and Replicas in a Cluster on the CSS Console?",
- "uri":"css_02_0093.html",
+ "title":"Access Logs",
+ "uri":"en-us_topic_0000001528659101.html",
"doc_type":"usermanual",
- "p_code":"151",
+ "p_code":"152",
"code":"159"
},
{
- "desc":"CSS supports two data compression algorithms: LZ4 (by default) and best_compression.LZ4 algorithmLZ4 is the default compression algorithm of Elasticsearch. This algorithm",
+ "desc":"CPU flow control can be implemented based on the CPU usage of a node.You can configure the CPU usage threshold of a node to prevent the node from breaking down due to hea",
"product_code":"css",
- "title":"What Data Compression Algorithms Does CSS Use?",
- "uri":"css_02_0041.html",
+ "title":"CPU Flow Control",
+ "uri":"en-us_topic_0000001477419744.html",
"doc_type":"usermanual",
- "p_code":"151",
+ "p_code":"152",
"code":"160"
},
{
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "desc":"You can block all traffic in one click, except the traffic that passes through O&M APIs, to handle unexpected traffic burst and quickly recover your cluster.Log in to the",
"product_code":"css",
- "title":"Functions",
- "uri":"css_02_0055.html",
+ "title":"One-click Traffic Blocking",
+ "uri":"en-us_topic_0000001477739380.html",
"doc_type":"usermanual",
- "p_code":"150",
+ "p_code":"152",
"code":"161"
},
{
- "desc":"Elasticsearch does not support direct data migration between different VPCs. You can use either of the following methods to migrate data.Use the backup and restoration fu",
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
- "title":"Can Elasticsearch Data Be Migrated Between VPCs?",
- "uri":"css_02_0058.html",
+ "title":"Large Query Isolation",
+ "uri":"en-us_topic_0000001477899220.html",
"doc_type":"usermanual",
- "p_code":"161",
+ "p_code":"118",
"code":"162"
},
{
- "desc":"CSS clusters cannot be directly migrated. You can back up a cluster to an OBS bucket and restore it to a new region.If the OBS bucket is in the same region as your CSS cl",
+ "desc":"The large query isolation feature allows you to separately manage large queries. You can isolate query requests that consume a large amount of memory or take a long perio",
"product_code":"css",
- "title":"How Do I Migrate a CSS Cluster Across Regions?",
- "uri":"css_02_0094.html",
+ "title":"Context",
+ "uri":"en-us_topic_0000001477739376.html",
"doc_type":"usermanual",
- "p_code":"161",
+ "p_code":"162",
"code":"163"
},
{
- "desc":"The slow query log settings of CSS are the same as those of Elasticsearch. You can configure slow query logs via the _settings API. For example, you can run the following",
+ "desc":"The large query isolation and global timeout features are disabled by default. If you enable them, the configuration will take effect immediately. Perform the following s",
"product_code":"css",
- "title":"How Do I Configure the Threshold for CSS Slow Query Logs?",
- "uri":"css_02_0096.html",
+ "title":"Procedure",
+ "uri":"en-us_topic_0000001477579388.html",
"doc_type":"usermanual",
- "p_code":"161",
+ "p_code":"162",
"code":"164"
},
- {
- "desc":"The CSS lifecycle is implemented using the Index State Management (ISM) of Open Distro. For details about how to configure policies related to the ISM template, see the O",
- "product_code":"css",
- "title":"How Do I Update the CSS Lifecycle Policy?",
- "uri":"css_02_0119.html",
- "doc_type":"usermanual",
- "p_code":"161",
- "code":"165"
- },
- {
- "desc":"Log in to the Kibana page of the cluster. In the navigation pane, choose Dev Tools.Modify and run the PUT /*/_settings{\"number_of_replicas\":0} command.Do not directly run",
- "product_code":"css",
- "title":"How Do I Set the Numbers of Index Copies to 0 in Batches?",
- "uri":"css_02_0118.html",
- "doc_type":"usermanual",
- "p_code":"161",
- "code":"166"
- },
- {
- "desc":"The possible causes are as follows:Shards were unevenly distributed in previous index allocations, and the predominate parameter in the latest indexed shard allocation wa",
- "product_code":"css",
- "title":"Why All New Index Shards Are Allocated to the Same Node?",
- "uri":"css_02_0042.html",
- "doc_type":"usermanual",
- "p_code":"161",
- "code":"167"
- },
- {
- "desc":"The snapshot function has been enabled for the cluster and snapshot information has been configured.Log in to the CSS management console, and click Clusters in the naviga",
- "product_code":"css",
- "title":"How Do I Query Snapshot Information?",
- "uri":"css_02_0043.html",
- "doc_type":"usermanual",
- "p_code":"161",
- "code":"168"
- },
- {
- "desc":"A cluster cannot be directly upgraded. You can purchase a cluster of a later version and migrate your data to it.Creating a Cluster: Create a cluster of a later version i",
- "product_code":"css",
- "title":"Can I Upgrade a Cluster from an Earlier Version to a Later Version?",
- "uri":"css_02_0052.html",
- "doc_type":"usermanual",
- "p_code":"161",
- "code":"169"
- },
- {
- "desc":"Yes. You can use a snapshot stored in OBS to restore a cluster. A deleted cluster that has no snapshots in OBS cannot be restored. Exercise caution when deleting a cluste",
- "product_code":"css",
- "title":"Can I Restore a Deleted Cluster?",
- "uri":"css_02_0120.html",
- "doc_type":"usermanual",
- "p_code":"161",
- "code":"170"
- },
- {
- "desc":"You can modify TLS algorithms in CSS 7.6.2 and later versions.Log in to the CSS management console.In the navigation pane, choose Clusters. The cluster list is displayed.",
- "product_code":"css",
- "title":"Can I Modify the TLS Algorithm of an Elasticsearch Cluster?",
- "uri":"css_02_0101.html",
- "doc_type":"usermanual",
- "p_code":"161",
- "code":"171"
- },
- {
- "desc":"If the query results on shards exceed the upper limit of records that can be returned (default value: 10000), you need to increase the limit by changing the value of sear",
- "product_code":"css",
- "title":"How Do I Set the search.max_buckets Parameter for an ES Cluster?",
- "uri":"css_02_0102.html",
- "doc_type":"usermanual",
- "p_code":"161",
- "code":"172"
- },
- {
- "desc":"If the value of node.roles of a client node is i, then is this client node an injest node?Are there coordinating only nodes in clusters? Are the client requests distribut",
- "product_code":"css",
- "title":"Does the Value i of node.roles Indicate an Injest Node?",
- "uri":"css_02_0127.html",
- "doc_type":"usermanual",
- "p_code":"161",
- "code":"173"
- },
- {
- "desc":"In Elasticsearch 7.x and later versions, types cannot be created for indexes.If you need to use types, add include_type_name=true to the command. For example:After the co",
- "product_code":"css",
- "title":"How Do I Create a Type Under an Index in an Elasticsearch 7.x Cluster?",
- "uri":"css_02_0132.html",
- "doc_type":"usermanual",
- "p_code":"161",
- "code":"174"
- },
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
- "title":"Clusters in Security Mode",
- "uri":"css_02_0063.html",
+ "title":"Index Monitoring",
+ "uri":"en-us_topic_0000001528499133.html",
"doc_type":"usermanual",
- "p_code":"150",
+ "p_code":"118",
+ "code":"165"
+ },
+ {
+ "desc":"CSS monitors various metrics of the running status and change trend of cluster indexes to measure service usage and handle potential risks in a timely manner, ensuring th",
+ "product_code":"css",
+ "title":"Context",
+ "uri":"en-us_topic_0000001528499125.html",
+ "doc_type":"usermanual",
+ "p_code":"165",
+ "code":"166"
+ },
+ {
+ "desc":"Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation colu",
+ "product_code":"css",
+ "title":"Enabling Index Monitoring",
+ "uri":"en-us_topic_0000001477579408.html",
+ "doc_type":"usermanual",
+ "p_code":"165",
+ "code":"167"
+ },
+ {
+ "desc":"You can call an API to query the index read and write traffic within a period of time.A cluster has been created and index monitoring has been enabled.Log in to the CSS m",
+ "product_code":"css",
+ "title":"Checking the Index Read and Write Traffic",
+ "uri":"en-us_topic_0000001528659105.html",
+ "doc_type":"usermanual",
+ "p_code":"165",
+ "code":"168"
+ },
+ {
+ "desc":"You can check preconfigured index monitoring visualizations on the Dashboard and Visualizations pages of Kibana. You can also customize tables and charts.A cluster has be",
+ "product_code":"css",
+ "title":"Checking Index Monitoring Information",
+ "uri":"en-us_topic_0000001528499197.html",
+ "doc_type":"usermanual",
+ "p_code":"165",
+ "code":"169"
+ },
+ {
+ "desc":"The configuration file content of kibana-monitor is as follows. You are advised to save the file as monitoring-kibana.ndjson.",
+ "product_code":"css",
+ "title":"kibana-monitor",
+ "uri":"en-us_topic_0000001528499129.html",
+ "doc_type":"usermanual",
+ "p_code":"165",
+ "code":"170"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Enhanced Cluster Monitoring",
+ "uri":"en-us_topic_0000001477419712.html",
+ "doc_type":"usermanual",
+ "p_code":"118",
+ "code":"171"
+ },
+ {
+ "desc":"The Elasticsearch community only discusses how to monitor the average latency of search requests, which cannot reflect the actual search performance of a cluster. To enha",
+ "product_code":"css",
+ "title":"P99 Latency Monitoring",
+ "uri":"en-us_topic_0000001477419748.html",
+ "doc_type":"usermanual",
+ "p_code":"171",
+ "code":"172"
+ },
+ {
+ "desc":"When an external system accesses Elasticsearch through the HTTP protocol, a response and the corresponding status code are returned. The open-source Elasticsearch server ",
+ "product_code":"css",
+ "title":"HTTP Status Code Monitoring",
+ "uri":"en-us_topic_0000001477419772.html",
+ "doc_type":"usermanual",
+ "p_code":"171",
+ "code":"173"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Enhanced Aggregation",
+ "uri":"en-us_topic_0000001528659121.html",
+ "doc_type":"usermanual",
+ "p_code":"118",
+ "code":"174"
+ },
+ {
+ "desc":"The enhanced aggregation is an optimization feature for service awareness. With this feature, you can optimize the aggregation analysis capability of observable services.",
+ "product_code":"css",
+ "title":"Features",
+ "uri":"en-us_topic_0000001477419760.html",
+ "doc_type":"usermanual",
+ "p_code":"174",
"code":"175"
},
{
- "desc":"Non-security mode: no restrictions.Cluster in security mode: The Filebeat OSS version must match the cluster version. For details on how to download the Filebeat OSS vers",
+ "desc":"Low-cardinality fields have high data clustering performance when being sorted, which facilitates vectorized optimization. Assume that the following query statement exist",
"product_code":"css",
- "title":"What Is the Relationship Between the Filebeat Version and Cluster Version?",
- "uri":"css_02_0064.html",
+ "title":"Grouping and Aggregation of Low-cardinality Fields",
+ "uri":"en-us_topic_0000001528659125.html",
"doc_type":"usermanual",
- "p_code":"175",
+ "p_code":"174",
"code":"176"
},
{
- "desc":"The security certificate (CloudSearchService.cer) can be downloaded only for security clusters that have enabled HTTPS access.Log in to the CSS management console.In the ",
+ "desc":"High-cardinality fields are usually used for histogram grouping and aggregation instead of single-point grouping and aggregation. For example, collecting the statistics o",
"product_code":"css",
- "title":"How Do I Obtain the Security Certificate of CSS?",
- "uri":"css_02_0106.html",
+ "title":"High-cardinality Field Histogram Aggregation",
+ "uri":"en-us_topic_0000001528499145.html",
"doc_type":"usermanual",
- "p_code":"175",
+ "p_code":"174",
"code":"177"
},
{
- "desc":"The security certificate (CloudSearchService.cer) can be downloaded only for security clusters that have enabled HTTPS access. Most software supports certificates in the ",
+ "desc":"In the scenario where low-cardinality and high-cardinality fields are mixed, assume that the following query statement exists:Group the low-cardinality fields and create ",
"product_code":"css",
- "title":"How Do I Convert the Format of a CER Security Certificate?",
- "uri":"css_02_0128.html",
+ "title":"Low-cardinality and High-cardinality Field Mixing",
+ "uri":"en-us_topic_0000001528659141.html",
"doc_type":"usermanual",
- "p_code":"175",
+ "p_code":"174",
"code":"178"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
- "title":"Resource Usage and Change",
- "uri":"css_02_0066.html",
+ "title":"Read/Write Splitting",
+ "uri":"en-us_topic_0000001528379249.html",
"doc_type":"usermanual",
- "p_code":"150",
+ "p_code":"118",
"code":"179"
},
{
- "desc":"Run the following command to delete a single index data record.curl -XDELETE http://IP:9200/Index_nameIP: the IP address of any node in the clustercurl -XDELETE http://",
+ "desc":"CSS supports read/write splitting. Data written to the primary cluster (Leader) can be automatically synchronized to the secondary cluster (Follower). In this way, data i",
"product_code":"css",
- "title":"How Do I Clear Expired Data to Release Storage Space?",
- "uri":"css_02_0067.html",
+ "title":"Features",
+ "uri":"en-us_topic_0000001528379321.html",
"doc_type":"usermanual",
"p_code":"179",
"code":"180"
},
{
- "desc":"Run GET _cat/indices?v in Kibana to check the number of cluster replicas. If the value of rep is 1, the cluster has two replicas.If the value of rep is not 1, run the fol",
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
- "title":"How Do I Configure a Two-Replica CSS Cluster?",
- "uri":"css_02_0068.html",
+ "title":"Instructions",
+ "uri":"en-us_topic_0000001477579376.html",
"doc_type":"usermanual",
"p_code":"179",
"code":"181"
},
{
- "desc":"Manually: Run the DELETE /my_index command in Kibana.Automatically: Create scheduled tasks to call the index deletion request and periodically execute the tasks. CSS supp",
+ "desc":"Log in to the CSS management console.Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation col",
"product_code":"css",
- "title":"How Do I Delete Index Data?",
- "uri":"css_02_0069.html",
+ "title":"Basic Settings",
+ "uri":"en-us_topic_0000001528379313.html",
"doc_type":"usermanual",
- "p_code":"179",
+ "p_code":"181",
"code":"182"
},
{
- "desc":"Once an index is created, the number of primary shards cannot be changed.You can run the following command in Kibana to change the number of replicas:index specifies the ",
+ "desc":"Synchronize a single index.The request URL and request body parameters are as follows:After the synchronization function is enabled, indexes in the secondary cluster beco",
"product_code":"css",
- "title":"Can I Change the Number of Shards to Four with Two Replicas When There Is One Shard Set in the JSON File?",
- "uri":"css_02_0089.html",
+ "title":"Synchronizing Specified Indexes",
+ "uri":"en-us_topic_0000001528659157.html",
"doc_type":"usermanual",
- "p_code":"179",
+ "p_code":"181",
"code":"183"
},
{
- "desc":"A large number of shards in a cluster slows down shard creation.If automatic index creation is enabled, slow index creation may cause a large number of write requests to ",
+ "desc":"The request URL and request body parameters are as follows:The following are two examples:1. Synchronize a single index from the primary cluster to the secondary cluster.",
"product_code":"css",
- "title":"What Are the Impacts If an Elasticsearch Cluster Has Too Many Shards?",
- "uri":"css_02_0124.html",
+ "title":"Matching Index Synchronization",
+ "uri":"en-us_topic_0000001528379265.html",
"doc_type":"usermanual",
- "p_code":"179",
+ "p_code":"181",
"code":"184"
},
{
- "desc":"Method 1Open Kibana and run the following commands on the Dev Tools page:PUT _all/_settings?preserve_existing=true\n{\n\"index.max_result_window\" : \"10000000\"\n}Open Kibana a",
+ "desc":"You can specify multiple indexes or use wildcard to match the target indexes and terminate their synchronization tasks. Subsequent modifications to the indexes in the pri",
"product_code":"css",
- "title":"How Do I Set the Default Maximum Number of Records Displayed on a Page for an Elasticsearch Cluster",
- "uri":"css_02_0125.html",
+ "title":"Stopping Index Synchronization",
+ "uri":"en-us_topic_0000001477899224.html",
"doc_type":"usermanual",
- "p_code":"179",
+ "p_code":"181",
"code":"185"
},
{
- "desc":"Running the delete_by_query command can only add a deletion mark to the target data instead of really deleting it. When you search for data, all data is searched and the ",
+ "desc":"Querying the created patterns.This API is used to query the pattern list and query a specified pattern by name.An example request is as follows:GET auto_sync/pattern\nGET ",
"product_code":"css",
- "title":"Why Does the Disk Usage Increase After the delete_by_query Command Was Executed to Delete Data?",
- "uri":"css_02_0126.html",
+ "title":"Other Management APIs",
+ "uri":"en-us_topic_0000001528499169.html",
"doc_type":"usermanual",
- "p_code":"179",
+ "p_code":"181",
"code":"186"
},
{
- "desc":"Clear the fielddataDuring aggregation and sorting, data are converted to the fielddata structure, which occupies a large amount of memory.Run the following commands on Ki",
+ "desc":"This section describes how to switch from the primary cluster to the secondary cluster when the primary cluster is faulty.1. If the synchronization of specified indexes h",
"product_code":"css",
- "title":"How Do I Clear the Cache of a CSS Cluster?",
- "uri":"css_02_0130.html",
+ "title":"Best Practices",
+ "uri":"en-us_topic_0000001477739356.html",
"doc_type":"usermanual",
"p_code":"179",
"code":"187"
},
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Monitoring",
+ "uri":"en-us_topic_0000001477419740.html",
+ "doc_type":"usermanual",
+ "p_code":"",
+ "code":"188"
+ },
+ {
+ "desc":"You can use Cloud Eye to monitor cluster metrics of CSS in real time and quickly handle exceptions. For details about Cloud Eye, see the Cloud Eye User Guide.Table 1 list",
+ "product_code":"css",
+ "title":"Monitoring Metrics of Clusters",
+ "uri":"en-us_topic_0000001528659133.html",
+ "doc_type":"usermanual",
+ "p_code":"188",
+ "code":"189"
+ },
+ {
+ "desc":"This topic describes CSS metrics that can be monitored by Cloud Eye as well as their namespaces and dimensions. You can search for the monitoring metrics and alarms gener",
+ "product_code":"css",
+ "title":"Monitoring Metrics",
+ "uri":"en-us_topic_0000001599872681.html",
+ "doc_type":"usermanual",
+ "p_code":"188",
+ "code":"190"
+ },
+ {
+ "desc":"You can use Cloud Eye to monitor the created clusters. After configuring the cluster monitoring, you can log in to the Cloud Eye management console to view cluster metric",
+ "product_code":"css",
+ "title":"Configuring Cluster Monitoring",
+ "uri":"en-us_topic_0000001477579336.html",
+ "doc_type":"usermanual",
+ "p_code":"188",
+ "code":"191"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Auditing",
+ "uri":"en-us_topic_0000001528499181.html",
+ "doc_type":"usermanual",
+ "p_code":"",
+ "code":"192"
+ },
+ {
+ "desc":"Cloud Trace Service (CTS) is available on the public cloud platform. With CTS, you can record operations associated with CSS for later query, audit, and backtrack operati",
+ "product_code":"css",
+ "title":"Key Operations Recorded by CTS",
+ "uri":"en-us_topic_0000001528299573.html",
+ "doc_type":"usermanual",
+ "p_code":"192",
+ "code":"193"
+ },
+ {
+ "desc":"After you enable CTS and the management tracker is created, CTS starts recording operations on cloud resources. CTS stores operation records generated in the last seven d",
+ "product_code":"css",
+ "title":"Querying Real-Time Traces",
+ "uri":"en-us_topic_0000001720964408.html",
+ "doc_type":"usermanual",
+ "p_code":"192",
+ "code":"194"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"FAQs",
+ "uri":"en-us_topic_0000001876048066.html",
+ "doc_type":"usermanual",
+ "p_code":"",
+ "code":"195"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"General Consulting",
+ "uri":"en-us_topic_0000001477137534.html",
+ "doc_type":"usermanual",
+ "p_code":"195",
+ "code":"196"
+ },
+ {
+ "desc":"A region and availability zone (AZ) identify the location of a data center. You can create resources in a specific region and AZ.A region is a physical data center. Each ",
+ "product_code":"css",
+ "title":"What Are Regions and AZs?",
+ "uri":"en-us_topic_0000001528097305.html",
+ "doc_type":"usermanual",
+ "p_code":"196",
+ "code":"197"
+ },
+ {
+ "desc":"CSS uses network isolation, in addition to various host and data security measures.Network isolationThe entire network is divided into two planes: service plane and manag",
+ "product_code":"css",
+ "title":"How Does CSS Ensure Data and Service Security?",
+ "uri":"en-us_topic_0000001476977546.html",
+ "doc_type":"usermanual",
+ "p_code":"196",
+ "code":"198"
+ },
+ {
+ "desc":"Disk usage and cluster health status are two key metrics that you can focus on. You can log in to Cloud Eye and configure alarm rules for these metrics. If alarms are rep",
+ "product_code":"css",
+ "title":"Which CSS Metrics Should I Focus On?",
+ "uri":"en-us_topic_0000001528097297.html",
+ "doc_type":"usermanual",
+ "p_code":"196",
+ "code":"199"
+ },
+ {
+ "desc":"CSS uses EVS and local disks to store your indices. During cluster creation, you can specify the EVS disk type and specifications (the EVS disk size).Supported EVS disk t",
+ "product_code":"css",
+ "title":"What Storage Options Does CSS Provide?",
+ "uri":"en-us_topic_0000001528097293.html",
+ "doc_type":"usermanual",
+ "p_code":"196",
+ "code":"200"
+ },
+ {
+ "desc":"You can configure up to 200 nodes for a cluster (each node corresponds to an ECS). The maximum storage capacity of an ECS is the total capacity of EVS disks attached to t",
+ "product_code":"css",
+ "title":"What Is the Maximum Storage Capacity of CSS?",
+ "uri":"en-us_topic_0000001527937337.html",
+ "doc_type":"usermanual",
+ "p_code":"196",
+ "code":"201"
+ },
+ {
+ "desc":"You can use any of the following three methods to manage CSS or to use search engine APIs. You can initiate requests based on constructed request messages.curlcurl is a c",
+ "product_code":"css",
+ "title":"How Can I Manage CSS?",
+ "uri":"en-us_topic_0000001477137546.html",
+ "doc_type":"usermanual",
+ "p_code":"196",
+ "code":"202"
+ },
+ {
+ "desc":"You can store the following logs and files:Log files: Elasticsearch logsData files: Elasticsearch index filesOther files: cluster configuration filesOS: 5% storage space ",
+ "product_code":"css",
+ "title":"What Can the Disk Space of a CSS Cluster Be Used For?",
+ "uri":"en-us_topic_0000001477297350.html",
+ "doc_type":"usermanual",
+ "p_code":"196",
+ "code":"203"
+ },
+ {
+ "desc":"Log in to the console.On the Clusters page, click Access Kibana in the Operation column of a cluster.Log in to Kibana and choose Dev Tools.On the Console page, run the GE",
+ "product_code":"css",
+ "title":"How Do I Check the Numbers of Shards and Replicas in a Cluster on the CSS Console?",
+ "uri":"en-us_topic_0000001527697797.html",
+ "doc_type":"usermanual",
+ "p_code":"196",
+ "code":"204"
+ },
+ {
+ "desc":"CSS supports two data compression algorithms: LZ4 (by default) and best_compression.LZ4 algorithmLZ4 is the default compression algorithm of Elasticsearch. This algorithm",
+ "product_code":"css",
+ "title":"What Data Compression Algorithms Does CSS Use?",
+ "uri":"en-us_topic_0000001477297354.html",
+ "doc_type":"usermanual",
+ "p_code":"196",
+ "code":"205"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Functions",
+ "uri":"en-us_topic_0000001477137530.html",
+ "doc_type":"usermanual",
+ "p_code":"195",
+ "code":"206"
+ },
+ {
+ "desc":"Elasticsearch does not support direct data migration between different VPCs. You can use either of the following methods to migrate data.Use the backup and restoration fu",
+ "product_code":"css",
+ "title":"Can Elasticsearch Data Be Migrated Between VPCs?",
+ "uri":"en-us_topic_0000001527937341.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"207"
+ },
+ {
+ "desc":"CSS clusters cannot be directly migrated. You can back up a cluster to an OBS bucket and restore it to a new region.If the OBS bucket is in the same region as your CSS cl",
+ "product_code":"css",
+ "title":"How Do I Migrate a CSS Cluster Across Regions?",
+ "uri":"en-us_topic_0000001528097309.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"208"
+ },
+ {
+ "desc":"The slow query log settings of CSS are the same as those of Elasticsearch. You can configure slow query logs via the _settings API. For example, you can run the following",
+ "product_code":"css",
+ "title":"How Do I Configure the Threshold for CSS Slow Query Logs?",
+ "uri":"en-us_topic_0000001476977558.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"209"
+ },
+ {
+ "desc":"The CSS lifecycle is implemented using the Index State Management (ISM) of Open Distro. For details about how to configure policies related to the ISM template, see the O",
+ "product_code":"css",
+ "title":"How Do I Update the CSS Lifecycle Policy?",
+ "uri":"en-us_topic_0000001477297334.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"210"
+ },
+ {
+ "desc":"Log in to the Kibana page of the cluster. In the navigation pane, choose Dev Tools.Modify and run the PUT /*/_settings{\"number_of_replicas\":0} command.Do not directly run",
+ "product_code":"css",
+ "title":"How Do I Set the Numbers of Index Copies to 0 in Batches?",
+ "uri":"en-us_topic_0000001477297358.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"211"
+ },
+ {
+ "desc":"The possible causes are as follows:Shards were unevenly distributed in previous index allocations, and the predominate parameter in the latest indexed shard allocation wa",
+ "product_code":"css",
+ "title":"Why All New Index Shards Are Allocated to the Same Node?",
+ "uri":"en-us_topic_0000001527777449.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"212"
+ },
+ {
+ "desc":"The snapshot function has been enabled for the cluster and snapshot information has been configured.Log in to the CSS management console, and click Clusters in the naviga",
+ "product_code":"css",
+ "title":"How Do I Query Snapshot Information?",
+ "uri":"en-us_topic_0000001527697777.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"213"
+ },
+ {
+ "desc":"A cluster cannot be directly upgraded. You can purchase a cluster of a later version and migrate your data to it.Creating a Cluster: Create a cluster of a later version i",
+ "product_code":"css",
+ "title":"Can I Upgrade a Cluster from an Earlier Version to a Later Version?",
+ "uri":"en-us_topic_0000001477137542.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"214"
+ },
+ {
+ "desc":"Yes. You can use a snapshot stored in OBS to restore a cluster. A deleted cluster that has no snapshots in OBS cannot be restored. Exercise caution when deleting a cluste",
+ "product_code":"css",
+ "title":"Can I Restore a Deleted Cluster?",
+ "uri":"en-us_topic_0000001476817914.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"215"
+ },
+ {
+ "desc":"You can modify TLS algorithms in CSS 7.6.2 and later versions.Log in to the CSS management console.In the navigation pane, choose Clusters. The cluster list is displayed.",
+ "product_code":"css",
+ "title":"Can I Modify the TLS Algorithm of an Elasticsearch Cluster?",
+ "uri":"en-us_topic_0000001527777437.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"216"
+ },
+ {
+ "desc":"If the query results on shards exceed the upper limit of records that can be returned (default value: 10000), you need to increase the limit by changing the value of sear",
+ "product_code":"css",
+ "title":"How Do I Set the search.max_buckets Parameter for an ES Cluster?",
+ "uri":"en-us_topic_0000001476817910.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"217"
+ },
+ {
+ "desc":"If the value of node.roles of a client node is i, then is this client node an injest node?Are there coordinating only nodes in clusters? Are the client requests distribut",
+ "product_code":"css",
+ "title":"Does the Value i of node.roles Indicate an Injest Node?",
+ "uri":"en-us_topic_0000001477137526.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"218"
+ },
+ {
+ "desc":"In Elasticsearch 7.x and later versions, types cannot be created for indexes.If you need to use types, add include_type_name=true to the command. For example:After the co",
+ "product_code":"css",
+ "title":"How Do I Create a Type Under an Index in an Elasticsearch 7.x Cluster?",
+ "uri":"en-us_topic_0000001528097313.html",
+ "doc_type":"usermanual",
+ "p_code":"206",
+ "code":"219"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Clusters in Security Mode",
+ "uri":"en-us_topic_0000001527777433.html",
+ "doc_type":"usermanual",
+ "p_code":"195",
+ "code":"220"
+ },
+ {
+ "desc":"Non-security mode: no restrictions.Cluster in security mode: The Filebeat OSS version must match the cluster version. For details on how to download the Filebeat OSS vers",
+ "product_code":"css",
+ "title":"What Is the Relationship Between the Filebeat Version and Cluster Version?",
+ "uri":"en-us_topic_0000001477137538.html",
+ "doc_type":"usermanual",
+ "p_code":"220",
+ "code":"221"
+ },
+ {
+ "desc":"The security certificate (CloudSearchService.cer) can be downloaded only for security clusters that have enabled HTTPS access.Log in to the CSS management console.In the ",
+ "product_code":"css",
+ "title":"How Do I Obtain the Security Certificate of CSS?",
+ "uri":"en-us_topic_0000001476817894.html",
+ "doc_type":"usermanual",
+ "p_code":"220",
+ "code":"222"
+ },
+ {
+ "desc":"The security certificate (CloudSearchService.cer) can be downloaded only for security clusters that have enabled HTTPS access. Most software supports certificates in the ",
+ "product_code":"css",
+ "title":"How Do I Convert the Format of a CER Security Certificate?",
+ "uri":"en-us_topic_0000001476817906.html",
+ "doc_type":"usermanual",
+ "p_code":"220",
+ "code":"223"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"css",
+ "title":"Resource Usage and Change",
+ "uri":"en-us_topic_0000001527697785.html",
+ "doc_type":"usermanual",
+ "p_code":"195",
+ "code":"224"
+ },
+ {
+ "desc":"Run the following command to delete a single index data record.curl -XDELETE http://IP:9200/Index_nameIP: the IP address of any node in the clustercurl -XDELETE http://",
+ "product_code":"css",
+ "title":"How Do I Clear Expired Data to Release Storage Space?",
+ "uri":"en-us_topic_0000001528097289.html",
+ "doc_type":"usermanual",
+ "p_code":"224",
+ "code":"225"
+ },
+ {
+ "desc":"Run GET _cat/indices?v in Kibana to check the number of cluster replicas. If the value of rep is 1, the cluster has two replicas.If the value of rep is not 1, run the fol",
+ "product_code":"css",
+ "title":"How Do I Configure a Two-Replica CSS Cluster?",
+ "uri":"en-us_topic_0000001527937333.html",
+ "doc_type":"usermanual",
+ "p_code":"224",
+ "code":"226"
+ },
+ {
+ "desc":"Manually: Run the DELETE /my_index command in Kibana.Automatically: Create scheduled tasks to call the index deletion request and periodically execute the tasks. CSS supp",
+ "product_code":"css",
+ "title":"How Do I Delete Index Data?",
+ "uri":"en-us_topic_0000001527777445.html",
+ "doc_type":"usermanual",
+ "p_code":"224",
+ "code":"227"
+ },
+ {
+ "desc":"Once an index is created, the number of primary shards cannot be changed.You can run the following command in Kibana to change the number of replicas:index specifies the ",
+ "product_code":"css",
+ "title":"Can I Change the Number of Shards to Four with Two Replicas When There Is One Shard Set in the JSON File?",
+ "uri":"en-us_topic_0000001476977562.html",
+ "doc_type":"usermanual",
+ "p_code":"224",
+ "code":"228"
+ },
+ {
+ "desc":"A large number of shards in a cluster slows down shard creation.If automatic index creation is enabled, slow index creation may cause a large number of write requests to ",
+ "product_code":"css",
+ "title":"What Are the Impacts If an Elasticsearch Cluster Has Too Many Shards?",
+ "uri":"en-us_topic_0000001476977554.html",
+ "doc_type":"usermanual",
+ "p_code":"224",
+ "code":"229"
+ },
+ {
+ "desc":"Method 1Open Kibana and run the following commands on the Dev Tools page:PUT _all/_settings?preserve_existing=true\n{\n\"index.max_result_window\" : \"10000000\"\n}Open Kibana a",
+ "product_code":"css",
+ "title":"How Do I Set the Default Maximum Number of Records Displayed on a Page for an Elasticsearch Cluster",
+ "uri":"en-us_topic_0000001527697781.html",
+ "doc_type":"usermanual",
+ "p_code":"224",
+ "code":"230"
+ },
+ {
+ "desc":"Running the delete_by_query command can only add a deletion mark to the target data instead of really deleting it. When you search for data, all data is searched and the ",
+ "product_code":"css",
+ "title":"Why Does the Disk Usage Increase After the delete_by_query Command Was Executed to Delete Data?",
+ "uri":"en-us_topic_0000001527777425.html",
+ "doc_type":"usermanual",
+ "p_code":"224",
+ "code":"231"
+ },
+ {
+ "desc":"Clear the fielddataDuring aggregation and sorting, data are converted to the fielddata structure, which occupies a large amount of memory.Run the following commands on Ki",
+ "product_code":"css",
+ "title":"How Do I Clear the Cache of a CSS Cluster?",
+ "uri":"en-us_topic_0000001528097317.html",
+ "doc_type":"usermanual",
+ "p_code":"224",
+ "code":"232"
+ },
{
"desc":"The cluster monitoring result shows that the average memory usage of a cluster is 98%. Does it affect cluster performance?In an ES cluster, 50% of the memory is occupied ",
"product_code":"css",
"title":"The Average Memory Usage of an Elasticsearch Cluster Reaches 98%",
- "uri":"css_02_0131.html",
+ "uri":"en-us_topic_0000001527777429.html",
"doc_type":"usermanual",
- "p_code":"179",
- "code":"188"
+ "p_code":"224",
+ "code":"233"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
"title":"Components",
- "uri":"css_02_0070.html",
+ "uri":"en-us_topic_0000001527697789.html",
"doc_type":"usermanual",
- "p_code":"150",
- "code":"189"
+ "p_code":"195",
+ "code":"234"
},
{
"desc":"CSS does not currently support installation of Search Guard.CSS provides clusters in security mode, which have the same functions as Search Guard.",
"product_code":"css",
"title":"Can I Install Search Guard on CSS?",
- "uri":"css_02_0088.html",
+ "uri":"en-us_topic_0000001476977550.html",
"doc_type":"usermanual",
- "p_code":"189",
- "code":"190"
+ "p_code":"234",
+ "code":"235"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
"title":"Kibana",
- "uri":"css_02_0073.html",
+ "uri":"en-us_topic_0000001527937345.html",
"doc_type":"usermanual",
- "p_code":"150",
- "code":"191"
+ "p_code":"195",
+ "code":"236"
},
{
"desc":"Exporting data from Kibana requires the SQL Workbench plugin. Currently, you can only export data from Kibana 7.6.2 or later.In SQL Workbench of Kibana, you can enter Ela",
"product_code":"css",
"title":"Can I Export Data from Kibana?",
- "uri":"css_02_0098.html",
+ "uri":"en-us_topic_0000001527937329.html",
"doc_type":"usermanual",
- "p_code":"191",
- "code":"192"
+ "p_code":"236",
+ "code":"237"
},
{
"desc":"Run the following command to query index data through an API on Kibana:The returned data is shown in the following figure.took: How many milliseconds the query cost.time_",
"product_code":"css",
"title":"How Do I Query Index Data on Kibana in an ES Cluster?",
- "uri":"css_02_0099.html",
+ "uri":"en-us_topic_0000001527777441.html",
"doc_type":"usermanual",
- "p_code":"191",
- "code":"193"
+ "p_code":"236",
+ "code":"238"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
"title":"Clusters",
- "uri":"css_02_0077.html",
+ "uri":"en-us_topic_0000001476817902.html",
"doc_type":"usermanual",
- "p_code":"150",
- "code":"194"
+ "p_code":"195",
+ "code":"239"
},
{
"desc":"Perform the following steps to troubleshoot this problem:Check whether the ECS instance and cluster are in the same VPC.If they are, go to 2.If they are not, create an EC",
"product_code":"css",
"title":"Why Does My ECS Fail to Connect to a Cluster?",
- "uri":"css_02_0025.html",
+ "uri":"en-us_topic_0000001476977542.html",
"doc_type":"usermanual",
- "p_code":"194",
- "code":"195"
+ "p_code":"239",
+ "code":"240"
},
{
"desc":"No.",
"product_code":"css",
"title":"Can a New Cluster Use the IP Address of the Old Cluster?",
- "uri":"css_02_0078.html",
+ "uri":"en-us_topic_0000001477297362.html",
"doc_type":"usermanual",
- "p_code":"194",
- "code":"196"
+ "p_code":"239",
+ "code":"241"
},
{
"desc":"No. To access a cluster from the Internet, see Public IP Address Access.",
"product_code":"css",
"title":"Can I Associate My EIP If I Want to Access the Cluster from the Internet?",
- "uri":"css_02_0079.html",
+ "uri":"en-us_topic_0000001477297338.html",
"doc_type":"usermanual",
- "p_code":"194",
- "code":"197"
+ "p_code":"239",
+ "code":"242"
},
{
"desc":"No. Currently, CSS does not integrate the x-pack component.",
"product_code":"css",
"title":"Can I Use x-pack-sql-jdbc to Access CSS Clusters and Query Data?",
- "uri":"css_02_0081.html",
+ "uri":"en-us_topic_0000001477297346.html",
"doc_type":"usermanual",
- "p_code":"194",
- "code":"198"
+ "p_code":"239",
+ "code":"243"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
"title":"Ports",
- "uri":"css_02_0082.html",
+ "uri":"en-us_topic_0000001476817918.html",
"doc_type":"usermanual",
- "p_code":"150",
- "code":"199"
+ "p_code":"195",
+ "code":"244"
},
{
"desc":"Yes. Port 9200 is used by external systems to access CSS clusters, and port 9300 is used for communication between nodes.The methods for accessing port 9300 are as follow",
"product_code":"css",
"title":"Do Ports 9200 and 9300 Both Open?",
- "uri":"css_02_0083.html",
+ "uri":"en-us_topic_0000001527697793.html",
"doc_type":"usermanual",
- "p_code":"199",
- "code":"200"
+ "p_code":"244",
+ "code":"245"
},
{
"desc":"Currently to access Kibana dashboard of CSS Service, a user has to login to OTC consoleand navigate to Kibana login page.To make the access convenient a user can utilize ",
"product_code":"css",
"title":"How to access Kibana from outside cloud using ELB?",
- "uri":"css_02_0201.html",
+ "uri":"en-us_topic_0000001562137917.html",
"doc_type":"usermanual",
- "p_code":"199",
- "code":"201"
+ "p_code":"244",
+ "code":"246"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"css",
"title":"Change History",
- "uri":"css_01_0055.html",
+ "uri":"en-us_topic_0000001477899216.html",
"doc_type":"usermanual",
"p_code":"",
- "code":"202"
+ "code":"247"
}
]
\ No newline at end of file
diff --git a/docs/css/umn/css_01_0001.html b/docs/css/umn/css_01_0001.html
deleted file mode 100644
index 83d2381d..00000000
--- a/docs/css/umn/css_01_0001.html
+++ /dev/null
@@ -1,31 +0,0 @@
-
-
-
-
Overview
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0006.html b/docs/css/umn/css_01_0006.html
deleted file mode 100644
index 8743b2e7..00000000
--- a/docs/css/umn/css_01_0006.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-Getting Started
-
-
-
diff --git a/docs/css/umn/css_01_0007.html b/docs/css/umn/css_01_0007.html
deleted file mode 100644
index ed58ed9c..00000000
--- a/docs/css/umn/css_01_0007.html
+++ /dev/null
@@ -1,302 +0,0 @@
-
-
-Getting Started with Elasticsearch
-This section describes how to use Elasticsearch for product search. You can use the Elasticsearch search engine of CSS to search for data based on the scenario example. The basic operation process is as follows:
-
-
Scenario Description
A women's clothing brand builds an e-commerce website. It uses traditional databases to provide a product search function for users. However, due to an increase in the number of users and business growth, the traditional databases have slow response and low accuracy. To improve user experience and user retention, the e-commerce website plans to use Elasticsearch to provide the product search function for users.
-
This section describes how to use Elasticsearch to provide the search function for users.
-
Assume that the e-commerce website provides the following data:
-
{
-"products":[
-{"productName":"Latest art shirts for women in 2017 autumn","size":"L"}
-{"productName":"Latest art shirts for women in 2017 autumn","size":"M"}
-{"productName":"Latest art shirts for women in 2017 autumn","size":"S"}
-{"productName":"Latest jeans for women in spring 2018","size":"M"}
-{"productName":"Latest jeans for women in spring 2018","size":"S"}
-{"productName":"Latest casual pants for women in spring 2017","size":"L"}
-{"productName":"Latest casual pants for women in spring 2017","size":"S"}
-]
-}
-
-
Step 1: Create a Cluster
Create a cluster using Elasticsearch as the search engine. In this example, suppose that you create a cluster named Es-xfx. This cluster is used only for getting started with Elasticsearch. For this cluster, you are advised to select css.medium.8 for Node Specifications, Common I/O for Node Storage Type, and 40 GB for Node Storage Capacity. For details, see Creating an Elasticsearch Cluster in Non-Security Mode.
-
After you create the cluster, switch to the cluster list to view the created cluster. If the Status of the cluster is Available, the cluster is created successfully.
-
Figure 1 Creating a cluster
-
-
Step 2: Import Data
CSS supports importing data to Elasticsearch using Logstash, Kibana, or APIs. Kibana lets you visualize your Elasticsearch data. The following procedure illustrates how to import data to Elasticsearch using Kibana.
-
- On the Clusters page, locate the target cluster and click Access Kibana in the Operation column to go to the Kibana login page.
- Non-security cluster: The Kibana console is displayed.
- Security cluster: Enter the username and password on the login page and click Log In to go to the Kibana console. The default username is admin and the password is the one specified during cluster creation.
- - In the navigation pane of Kibana on the left, choose Dev Tools, as shown in Figure 2.
The text box on the left is the input box. The triangle icon in the upper right corner of the input box is the command execution button. The text box on the right area is the result output box.
-Figure 2 Console page
-
The Kibana UI varies depending on the Kibana version.
-
- - On the Console page, run the following command to create index named my_store:
(Versions earlier than 7.
x)
PUT /my_store
-{
- "settings": {
- "number_of_shards": 1
- },
- "mappings": {
- "products": {
- "properties": {
- "productName": {
- "type": "text",
- "analyzer": "ik_smart"
- },
- "size": {
- "type": "keyword"
- }
- }
- }
- }
-}
-
-(Versions later than 7.x)
-PUT /my_store
-{
- "settings": {
- "number_of_shards": 1
- },
- "mappings": {
- "properties": {
- "productName": {
- "type": "text",
- "analyzer": "ik_smart"
- },
- "size": {
- "type": "keyword"
- }
- }
- }
- }
-The command output is similar to the following:
-{
- "acknowledged" : true,
- "shards_acknowledged" : true,
- "index" : "my_store"
-}
- - On the Console page, run the following command to import data to index named my_store:
(Versions earlier than 7.x)
-POST /my_store/products/_bulk
-{"index":{}}
-{"productName":"Latest art shirts for women in 2017 autumn","size":"L"}
-{"index":{}}
-{"productName":"Latest art shirts for women in 2017 autumn","size":"M"}
-{"index":{}}
-{"productName":"Latest art shirts for women in 2017 autumn","size":"S"}
-{"index":{}}
-{"productName":"Latest jeans for women in spring 2018","size":"M"}
-{"index":{}}
-{"productName":"Latest jeans for women in spring 2018","size":"S"}
-{"index":{}}
-{"productName":"Latest casual pants for women in spring 2017","size":"L"}
-{"index":{}}
-{"productName":"Latest casual pants for women in spring 2017","size":"S"}
-
-(Versions later than 7.x)
-POST /my_store/_doc/_bulk
-{"index":{}}
-{"productName":"Latest art shirts for women in 2017 autumn","size":"L"}
-{"index":{}}
-{"productName":"Latest art shirts for women in 2017 autumn","size":"M"}
-{"index":{}}
-{"productName":"Latest art shirts for women in 2017 autumn","size":"S"}
-{"index":{}}
-{"productName":"Latest jeans for women in spring 2018","size":"M"}
-{"index":{}}
-{"productName":"Latest jeans for women in spring 2018","size":"S"}
-{"index":{}}
-{"productName":"Latest casual pants for women in spring 2017","size":"L"}
-{"index":{}}{"productName":"Latest casual pants for women in spring 2017","size":"S"}
-If the value of the errors field in the command output is false, the data is imported successfully.
-
-
-
Step 3: Search for Data
- Full-text search
If you access the e-commerce website and want to search for commodities whose names include "spring jeans", enter "spring jeans" to begin your search. The following example shows the command to be executed on Kibana and the command output.
-Command to be executed on Kibana:
-(Versions earlier than 7.x)
-GET /my_store/products/_search
-{
- "query": {"match": {
- "productName": "spring jeans"
- }}
-}
-(Versions later than 7.x)
-GET /my_store/_search
-{
- "query": {"match": {
- "productName": "spring jeans"
- }}
-}
-The command output is similar to the following:
-{
- "took": 80,
- "timed_out": false,
- "_shards": {
- "total": 1,
- "successful": 1,
- "skipped": 0,
- "failed": 0
- },
- "hits": {
- "total": 4,
- "max_score": 1.8069603,
- "hits": [
- {
- "_index": "my_store",
- "_type": "products",
- "_id": "yTG1QWUBRuneTTG2KJSq",
- "_score": 1.8069603,
- "_source": {
- "productName": "Latest jeans for women in spring 2018",
- "size": "M"
- }
- },
- {
- "_index": "my_store",
- "_type": "products",
- "_id": "yjG1QWUBRuneTTG2KJSq",
- "_score": 1.8069603,
- "_source": {
- "productName": "Latest jeans for women in spring 2018",
- "size": "S"
- }
- },
- {
- "_index": "my_store",
- "_type": "products",
- "_id": "yzG1QWUBRuneTTG2KJSq",
- "_score": 0.56677663,
- "_source": {
- "productName": "Latest casual pants for women in spring 2017",
- "size": "L"
- }
- },
- {
- "_index": "my_store",
- "_type": "products",
- "_id": "zDG1QWUBRuneTTG2KJSq",
- "_score": 0.56677663,
- "_source": {
- "productName": "Latest casual pants for women in spring 2017",
- "size": "S"
- }
- }
- ]
- }
-}
-- Elasticsearch supports full-text search. The preceding command searches for the information about all commodities whose names include "spring" or "jeans".
- Unlike traditional databases, Elasticsearch can return results in milliseconds by using inverted indexes.
- Elasticsearch supports sorting by score. In the command output, information about the first two commodities contains both "spring" and "jeans", while that about the last two products contain only "spring". Therefore, the first two commodities rank prior to the last two due to high keyword match.
-
-
- Aggregation result display
The e-commerce website provides the function of displaying aggregation results. For example, it classifies commodities corresponding to "spring" based on the size so that you can collect the number of products of different sizes. The following example shows the command to be executed on Kibana and the command output.
-Command to be executed on Kibana:
-(Versions earlier than 7.x)
-GET /my_store/products/_search
-{
-"query": {
-"match": { "productName": "spring" }
-},
-"size": 0,
-"aggs": {
-"sizes": {
-"terms": { "field": "size" }
-}
-}
-}
-(Versions later than 7.x)
-GET /my_store/_search
-{
-"query": {
-"match": { "productName": "spring" }
-},
-"size": 0,
-"aggs": {
-"sizes": {
-"terms": { "field": "size" }
-}
-}
-}
-The command output is similar to the following:
-(Versions earlier than 7.x)
-{
- "took": 66,
- "timed_out": false,
- "_shards": {
- "total": 1,
- "successful": 1,
- "skipped": 0,
- "failed": 0
- },
- "hits": {
- "total": 4,
- "max_score": 0,
- "hits": []
- },
- "aggregations": {
- "sizes": {
- "doc_count_error_upper_bound": 0,
- "sum_other_doc_count": 0,
- "buckets": [
- {
- "key": "S",
- "doc_count": 2
- },
- {
- "key": "L",
- "doc_count": 1
- },
- {
- "key": "M",
- "doc_count": 1
- }
- ]
- }
- }
-}
-(Versions later than 7.x)
-{
- "took" : 27,
- "timed_out" : false,
- "_shards" : {
- "total" : 1,
- "successful" : 1,
- "skipped" : 0,
- "failed" : 0
- },
- "hits" : {
- "total" : {
- "value" : 3,
- "relation" : "eq"
- },
- "max_score" : null,
- "hits" : [ ]
- },
- "aggregations" : {
- "sizes" : {
- "doc_count_error_upper_bound" : 0,
- "sum_other_doc_count" : 0,
- "buckets" : [
- {
- "key" : "L",
- "doc_count" : 1
- },
- {
- "key" : "M",
- "doc_count" : 1
- },
- {
- "key" : "S",
- "doc_count" : 1
- }
- ]
- }
- }
-}
-
-
-
Step 4: Delete the Cluster
Once you understand the process and method of using Elasticsearch, you can perform the following steps to delete the cluster you created for the example and its data to avoid resource wastage.
-
After you delete a cluster, its data cannot be restored. Exercise caution when deleting a cluster.
-
-
- Log in to the CSS management console. In the navigation pane on the left, choose Clusters > Elasticsearch.
- Locate the row that contains cluster Es-xfx and click More > Delete in the Operation column.
- In the displayed dialog box, enter the name of the cluster to be deleted and click OK.
-
-
-
-
diff --git a/docs/css/umn/css_01_0008.html b/docs/css/umn/css_01_0008.html
deleted file mode 100644
index cd1beabb..00000000
--- a/docs/css/umn/css_01_0008.html
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-Creating an Elasticsearch Cluster
-
-
-
diff --git a/docs/css/umn/css_01_0009.html b/docs/css/umn/css_01_0009.html
deleted file mode 100644
index 51f2f544..00000000
--- a/docs/css/umn/css_01_0009.html
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-Managing Elasticsearch Clusters
-
-
-
diff --git a/docs/css/umn/css_01_0011.html b/docs/css/umn/css_01_0011.html
deleted file mode 100644
index 4084bbd5..00000000
--- a/docs/css/umn/css_01_0011.html
+++ /dev/null
@@ -1,343 +0,0 @@
-
-
-Creating an Elasticsearch Cluster in Security Mode
-This section describes how to create an Elasticsearch cluster in security mode.
-
Public IP address access and Kibana public access can be used only after security mode is enabled.
-
-
Context
- When creating a cluster, the number of nodes that can be added varies according to the node type. For details, see Table 1.
-
Table 1 Number of nodes in different typesNode Type
- |
-Number
- |
-
-
-ess
- |
-ess: 1-32
- |
-
-ess, ess-master
- |
-ess: 1-200
-ess-master: an odd number ranging from 3 to 9
- |
-
-ess, ess-client
- |
-ess: 1-32
-ess-client: 1-32
- |
-
-ess, ess-cold
- |
-ess: 1-32
-ess-cold: 1-32
- |
-
-ess, ess-master, ess-client
- |
-ess: 1-200
-ess-master: an odd number ranging from 3 to 9
-ess-client: 1-32
- |
-
-ess, ess-master, ess-cold
- |
-ess: 1-200
-ess-master: an odd number ranging from 3 to 9
-ess-cold: 1-32
- |
-
-ess, ess-client, ess-cold
- |
-ess: 1-32
-ess-client: 1-32
-ess-cold: 1-32
- |
-
-ess, ess-master, ess-client, ess-cold
- |
-ess: 1-200
-ess-master: an odd number ranging from 3 to 9
-ess-client: 1-32
-ess-cold: 1-32
- |
-
-Details about the four node types: - ess: the default node type that is mandatory for cluster creation. The other three node types are optional.
- ess-master: master node
- ess-client: client node
- ess-cold: cold data node
-
- |
-
-
-
-
-
-
-
Procedure
- Log in to the CSS management console.
- On the Dashboard page, click Create Cluster in the upper right corner. The Create page is displayed.
Alternatively, choose Clusters > Elasticsearch in the navigation tree on the left. Click Create Cluster in the upper right corner. The Create page is displayed.
- - Specify Region and AZ.
-
Table 2 Region and AZ parametersParameter
- |
-Description
- |
-
-
-Region
- |
-Select a region for the cluster from the drop-down list on the right. Currently, only eu-de and eu-nl are supported.
- |
-
-AZ
- |
-Select AZs associated with the cluster region.
-You can select a maximum of three AZs. For details, see Deploying a Cross-AZ Cluster.
- |
-
-
-
-
- - Configure basic cluster information.
-
Table 3 Basic parametersParameter
- |
-Description
- |
-
-
-Version
- |
-Select a cluster version from the drop-down list box.
- |
-
-Name
- |
-Cluster name, which contains 4 to 32 characters. Only letters, numbers, hyphens (-), and underscores (_) are allowed and the value must start with a letter.
- NOTE: After a cluster is created, you can modify the cluster name as required. Click the name of a cluster to be modified. On the displayed Basic Information page, click next to the cluster name. After the modification is completed, click to save the modification. If you want to cancel the modification, click .
-
- |
-
-
-
-
- - Configure cluster specifications.
-
Table 4 Parameter descriptionParameter
- |
-Description
- |
-
-
-Nodes
- |
-Number of nodes in a cluster. Select a number from 1 to 32. You are advised to configure three or more nodes to ensure high availability of the cluster.
-- If neither a master node nor client node is enabled, the nodes specified by this parameter are used to serve as both the master node and client node. Nodes provide the cluster management, data storage, cluster access, and data analysis functions. To ensure data stability in the cluster, it is recommended that you set this parameter to a value no less than 3.
- If only the master node function is enabled, nodes specified by this parameter are used to store data and provide functions of client nodes.
- If both the master and client node functions are enabled, the nodes specified by this parameter are only used for storing data.
- If only the client node function is enabled, nodes specified by this parameter are used to store data and provide functions of the master node.
- |
-
-CPU Architecture
- |
-Currently, support x86. The supported type is determined by the actual regional environment.
- |
-
-Node Specifications
- |
-Specifications of nodes in a cluster. You can select a specified specification based on your needs. Each cluster supports only one specification.
-After you select a flavor, the CPU and memory corresponding to the current specification are displayed below the parameter. For example, if you select css.medium.8, then 1 vCPUs | 8 GB will be displayed, indicating that the node flavor you select contains one vCPU and 8 GB memory.
- |
-
-Node Storage Type
- |
-In the current version, the following options are available: Common I/O, High I/O, and Ultra-high I/O.
- |
-
-Node Storage Capacity
- |
-Storage space. Its value varies with node specifications.
-The node storage capacity must be a multiple of 20.
- |
-
-Disk Encryption
- |
-If you select this option, the nodes in the cluster you create will use encrypted EVS disks to protect data. By default, this option is not selected. Note that you cannot modify this setting after the cluster is created. Therefore, exercise caution when performing the setting.
-After you select this option, you need to select an available key from the Key Name drop-down list. If no key is available, click Create/View Key to go to the KMS management console and create or modify a key. For details, see Creating a CMK.
-Enabling disk encryption has no impact on your operations on a cluster (such as accessing the cluster and importing data to the cluster). However, after you enable disk encryption, operation performance deteriorates by about 10%.
- NOTE: - If the cluster is in the Available status and the key used for disk encryption is in the Pending deletion or disable status or has been deleted after a cluster is created, cluster scale-out is not allowed. However, other operations on the cluster, such as restarting the cluster, creating snapshots, restoring the cluster, and importing data to the cluster are not affected. In addition, this key cannot be used for cluster creation in the future.
- After a cluster is created, do not delete the key used by the cluster. Otherwise, the cluster will become unavailable.
- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
-
- |
-
-Master node
- |
-The master node manages all nodes in the cluster. If more than 20 nodes are required to store and analyze a large amount of data, you are advised to enable the master node to ensure cluster stability. Otherwise, you are advised to set only the Nodes parameter and use the nodes as both master and client nodes.
-After enabling the master node, specify Node Specifications, Nodes, and Node Storage Type. The value of Nodes must be an odd number equal to or greater than 3. Up to nine nodes are supported. The value of Node Storage Capacity is fixed. You can select a storage type based on your needs.
- |
-
-Client node
- |
-The client node allows clients to access clusters and analyze data. If more than 20 nodes are required to store and analyze a large amount of data, you are advised to enable the client node to ensure cluster stability. Otherwise, you are advised to set only the Nodes parameter and use the nodes as both master and client nodes.
-After enabling the client node, specify Node Specifications, Nodes and Node Storage Type. The value of Nodes ranges from 1 to 32. The value of Node Storage Capacity is fixed. You can select a storage type based on your needs.
- |
-
-Cold data node
- |
-The cold data node is used to store historical data, for which query responses can be returned in minutes. If you do not quire a quick query response, store historical data on cold data nodes to reduce costs.
-After enabling cold data node, configure Node Specifications, Nodes, Node Storage Type, and Node Storage Capacity. The value of Nodes ranges from 1 to 32. Select Node Storage Type and Node Storage Capacity as requirement.
-After the cold data node is enabled, CSS automatically adds cold and hot tags to related nodes.
- |
-
-
-
-
-Figure 1 Configuring host specifications
- - Set the enterprise project.
When creating a CSS cluster, you can bind an enterprise project to the cluster if you have enabled the enterprise project function. You can select an enterprise project created by the current user from the drop-down list on the right or click View Project Management to go to the Enterprise Project Management console and create a new project or view existing projects.
- - Click Next: Configure Network. Configure the cluster network.
-
Table 5 Network configuration parametersParameter
- |
-Description
- |
-
-
-VPC
- |
-A VPC is a secure, isolated, and logical network environment.
-Select the target VPC. Click View VPC to enter the VPC management console and view the created VPC names and IDs. If no VPCs are available, create one.
- NOTE: The VPC must contain CIDRs. Otherwise, cluster creation will fail. By default, a VPC will contain CIDRs.
-
- |
-
-Subnet
- |
-A subnet provides dedicated network resources that are isolated from other networks, improving network security.
-Select the target subnet. You can access the VPC management console to view the names and IDs of the existing subnets in the VPC.
- |
-
-Security Group
- |
-A security group implements access control for ECSs that have the same security protection requirements in a VPC. To view more details about the security group, click View Security Group.
- NOTE: - For cluster access purposes, ensure that the security group contains port 9200.
- If your cluster version is 7.6.2 or later, ensure that all the ports used for communication between nodes in the same security group are allowed. If such settings cannot be configured, ensure at least the access to port 9300 is allowed.
- After the port 9300 is enabled, if the cluster disk usage is high, delete expired data to release the disk storage space.
-
- |
-
-Security Mode
- |
-After the security mode is enabled, communication will be encrypted and authentication required for the cluster.
-- The default Administrator Username is admin.
- Set and confirm the Administrator Password. This password will be required when you access this cluster.
- |
-
-HTTPS Access
- |
-HTTPS access can be enabled only after the security mode of the cluster is enabled. After HTTPS access is enabled, communication is encrypted when you access the cluster.
- NOTE: Security clusters use HTTPS for communication, which is much slower than non-security clusters that use HTTP for communication. If you want fast read performance and the permission provided by the security mode to isolate resources (such as indexes, documents, and fields), you can disable the HTTPS Access function. After HTTPS Access is disabled, HTTP protocol is used for cluster communication. In this case, data security cannot be ensured and public IP address cannot be used.
-
- |
-
-Public IP Address
- |
-If HTTPS Access is enabled, you can configure Public Network Access and obtain an IP address for public network access. This IP address can be used to access this security cluster through the public network. For details, see Accessing a Cluster from a Public Network.
- |
-
-
-
-
-Figure 2 Configuring network specifications
- - Click Next: Configure Advanced Settings. Configure the automatic snapshot creation and other functions.
- Configure Cluster Snapshot. Set basic configuration and snapshot configuration.
The cluster snapshot function is enabled by default. You can also disable this function as required. To store automatic snapshots in OBS, an agency will be created to access OBS. Additional cost will be incurred if snapshots are stored in standard storage.
-
-Table 6 Cluster snapshot parameterParameter
- |
-Description
- |
-
-
-OBS bucket
- |
-Select an OBS bucket for storing snapshots from the drop-down list box. You can also click Create Bucket on the right to create an OBS bucket. For details, see Creating a Bucket.
-The created or existing OBS bucket must meet the following requirements:
-- Storage Class is Standard or Warm.
- Region must be the same as that of the created cluster.
- |
-
-Backup Path
- |
-Storage path of the snapshot in the OBS bucket.
-The backup path configuration rules are as follows: - The backup path cannot contain the following characters: \:*?"<>|
- The backup path cannot start with a slash (/).
- The backup path cannot start or end with a period (.).
- The backup path cannot contain more than 1,023 characters.
-
- |
-
-IAM Agency
- |
-IAM agency authorized by the current account to CSS access or maintain data stored in the OBS bucket. You can also click Create IAM Agency on the right to create an IAM agency. For details, see Creating an Agency.
-The created or existing IAM agency must meet the following requirements:
-- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- The agency must have the Tenant Administrator permission for the OBS(S3) project in OBS(S3).
- |
-
-Snapshot Encryption
- |
-Indicates whether to enable the snapshot encryption function. Enabling the snapshot encryption function ensures the security of your snapshot data.
-After the snapshot encryption function is enabled, select a key from the Key Name drop-down list. If no key is available, click Create/View Key to switch to the KMS management console to create or modify a key. For details, see Creating a CMK.
-- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
- If a snapshot has been stored in the OBS bucket, you cannot modify the parameters used for encrypting the snapshot.
- If the key used for encryption is in the Pending deletion or disable status, you cannot perform backup and restoration operations on the cluster. Specifically, you cannot create new snapshots for the cluster, or use existing snapshots to restore clusters. In this case, switch to the KMS management console and change the status of the target key to enable so that backup and restore operations are allowed on the cluster.
- If you delete the key used for encryption, you cannot perform backup and restore operations on the cluster. In addition, you cannot restore the deleted key. Therefore, exercise caution when deleting a key. If the key is deleted or is in the Pending deletion or disable state, automatic snapshot creation is allowed based on the configured snapshot policy. However, all automatic snapshot creation tasks will fail, and the failed tasks are displayed in the failed task list in the Failed Tasks dialog box. In such scenario, you are advised to disable the automatic snapshot creation function.
- |
-
-
-
-
-
-Table 7 Automatic snapshot creation parameterParameter
- |
-Description
- |
-
-
-Snapshot Name Prefix
- |
-The snapshot name prefix contains 1 to 32 characters and must start with a lowercase letter. Only lowercase letters, digits, hyphens (-), and underscores (_) are allowed. A snapshot name consists of a snapshot name prefix and a timestamp, for example, snapshot-1566921603720.
- |
-
-Time Zone
- |
-Time zone for the backup time, which cannot be changed. Specify backup started time based on the time zone.
- |
-
-Backup Start Time
- |
-The time when the backup starts automatically every day. You can specify this parameter only in full hours, for example, 00:00 or 01:00. The value ranges from 00:00 to 23:00. Select a time from the drop-down list.
- |
-
-Retention Period (days)
- |
-The number of days that snapshots are retained in the OBS bucket. The value ranges from 1 to 90. You can specify this parameter as required. The system automatically deletes expired snapshots every hour at half past the hour.
- |
-
-
-
-
-Figure 3 Setting parameters for automatic snapshot creation
- - Configure advanced settings for the cluster.
- Default: The VPC Endpoint Service, Kibana Public Access, and Tag functions are disabled by default. You can manually enable these functions after the cluster is created.
- Custom: You can enable the VPC Endpoint Service, Kibana Public Access, and Tag functions as required.
-
-Table 8 Parameters for advanced settingsParameter
- |
-Description
- |
-
-
-VPC Endpoint Service
- |
-After enabling this function, you can obtain a private domain name for accessing the cluster in the same VPC. For details, see Accessing a Cluster Using a VPC Endpoint.
- |
-
-Kibana Public Access
- |
-You can configure this parameter only when security mode is enabled for a cluster. After enabling this function, you can obtain a public IP address for accessing Kibana. For details, see Accessing a Cluster from a Kibana Public Network.
- |
-
-Tag
- |
-Adding tags to clusters can help you identify and manage your cluster resources. You can customize tags or use tags predefined by Tag Management Service (TMS). For details, see Managing Tags.
- |
-
-
-
-
-
- - Click Next: Confirm. Check the configuration and click Next to create a cluster.
- Click Back to Cluster List to switch to the Clusters page. The cluster you created is listed on the displayed page and its status is Creating. If the cluster is successfully created, its status will change to Available.
If the cluster creation fails, create the cluster again.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0014.html b/docs/css/umn/css_01_0014.html
deleted file mode 100644
index 2bcae056..00000000
--- a/docs/css/umn/css_01_0014.html
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-Restarting a Cluster
-If a cluster becomes faulty, you can restart it to check if it can run normally.
-
Prerequisites
- The target cluster is not frozen and has no task in progress.
- If a cluster is available, ensure that it has stopped processing service requests (such as importing data and searching for data). Otherwise, data may be lost when the cluster is restarted. You are advised to perform this operation during off-peak hours.
-
-
Context
CSS supports quick restart and rolling restart.
-
Quick Restart- All clusters support this function.
- If you select a node type for quick restart, all nodes of the selected type will be restarted together.
- If you select a node name for quick restart, only the specified node will be restarted.
- The cluster is unavailable during quick restart.
-
-
Rolling Restart- Rolling restart is supported only when a cluster has at least three nodes (including master nodes, client nodes, and cold data nodes).
- Rolling restart can be performed only by specifying node types. If you select a node type for rolling restart, the nodes of the selected type will be restarted in sequence.
- During the rolling restart, only the nodes that are being restarted are unavailable and other nodes can run normally.
- When the data volume is large, rolling restart will take a long time.
-
-
-
Quick Restart
- Log in to the CSS management console.
- In the navigation tree on the left, select a cluster type. The cluster management list page is displayed.
- In the Operation column of the target cluster, choose More > Restart.
- On the Restart Cluster page, select Quick Restart.
You can quick restart nodes by Node type or Node name. If you select Node type, then you can select multiple node types and perform quick restart at the time. If you select Node name, you can perform quick restart only on one node at a time.
- - Refresh the page and check the cluster status. During the restart, the cluster status is Processing, and the task status is Restarting. If the cluster status changes to Available, the cluster has been restarted successfully.
-
-
Rolling Restart
- Log in to the CSS management console.
- In the navigation tree on the left, select a cluster type. The cluster management list page is displayed.
- In the Operation column of the target cluster, choose More > Restart.
- On the Restart Cluster page, select Rolling Restart.
You can perform rolling restart by Node type. Select specific node types for restart.
- - Refresh the page and check the cluster status. During the restart, the cluster status is Processing, and the task status is Restarting. If the cluster status changes to Available, the cluster has been restarted successfully.
-
-
-
-
diff --git a/docs/css/umn/css_01_0015.html b/docs/css/umn/css_01_0015.html
deleted file mode 100644
index d33ab41a..00000000
--- a/docs/css/umn/css_01_0015.html
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
-Deleting a Cluster
-You can delete clusters that you no longer need.
-
- If you delete a cluster, the cluster service data will be cleared. Exercise caution when performing this operation.
- The snapshots of a cluster stored in OBS are not deleted with the cluster. You can restore a deleted cluster using its snapshots stored in the OBS bucket.
-
-
Procedure
- Log in to the CSS management console.
- In the navigation tree on the left, select a cluster type. The cluster list page is displayed.
- Locate the target cluster and click More > Delete in the Operation column.
- In the displayed dialog box, enter the name of the cluster to be deleted and click OK.
-
-
-
-
diff --git a/docs/css/umn/css_01_0024.html b/docs/css/umn/css_01_0024.html
deleted file mode 100644
index 9f0f7c7f..00000000
--- a/docs/css/umn/css_01_0024.html
+++ /dev/null
@@ -1,144 +0,0 @@
-
-
-Using Kibana or APIs to Import Data to Elasticsearch
-You can import data in various formats, such as JSON, to Elasticsearch in CSS by using Kibana or APIs.
-
Importing Data Using Kibana
Before importing data, ensure that you can use Kibana to access the cluster. The following procedure illustrates how to use the
POST command to import data.
- Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters > Elasticsearch to switch to the Clusters page.
- Choose Clusters in the navigation pane. Locate the target cluster and click Access Kibana in the Operation column to log in to Kibana.
- Click Dev Tools in the navigation tree on the left.
- (Optional) On the Console page, run the related command to create an index for storing data and specify a custom mapping to define the data type.
If there is an available index in the cluster where you want to import data, skip this step. If there is no available index, create an index by referring to the following sample code.
-For example, on the Console page of Kibana, run the following command to create an index named my_store and specify a user-defined mapping to define the data type:
-Versions earlier than 7.
xPUT /my_store
-{
- "settings": {
- "number_of_shards": 1
- },
- "mappings": {
- "products": {
- "properties": {
- "productName": {
- "type": "text"
- },
- "size": {
- "type": "keyword"
- }
- }
- }
- }
-}
-
-Versions later than 7.x
-PUT /my_store
-{
- "settings": {
- "number_of_shards": 1
- },
- "mappings": {
- "properties": {
- "productName": {
- "type": "text"
- },
- "size": {
- "type": "keyword"
- }
- }
- }
-}
- - Run commands to import data. For example, run the following command to import a piece of data:
Versions earlier than 7.
xPOST /my_store/products/_bulk
-{"index":{}}
-{"productName":"Latest art shirts for women in 2017 autumn","size":"L"}
-
-Versions later than 7.x
-POST /my_store/_bulk
-{"index":{}}
-{"productName":"Latest art shirts for women in 2017 autumn","size":"L"}
-The command output is similar to that shown in Figure 1. If the value of the errors field in the result is false, the data is successfully imported.
-Figure 1 Response message
-
-
-
-
Importing Data Using APIs
You can call the bulk API using the cURL command to import a JSON data file.
-
You are advised to import a file smaller than 50 MB.
-
-
- Log in to the ECS that you use to access the cluster.
- Run the following command to import JSON data:
In the command, replace the value of {
Private network address and port number of the node} with the private network address and port number of a node in the cluster. If the node fails to work, the command will fail to be executed. If the cluster contains multiple nodes, you can replace the value of {
Private network address and port number of the node} with the private network address and port number of any available node in the cluster. If the cluster contains only one node, restore the node and execute the command again.
test.json indicates the JSON file whose data is to be imported.
curl -X PUT "http://{Private network address and port number of the node} /_bulk" -H 'Content-Type: application/json' --data-binary @test.json
-
-If communication encryption has been enabled on the cluster where you will import data, you need to send HTTPS requests and add -k to the cURL command.
-curl -X PUT -k "https://{Private network address and port number of the node} /_bulk" -H 'Content-Type: application/json' --data-binary @test.json
-
The value of the -X parameter is a command and that of the -H parameter is a message header. In the preceding command, PUT is the value of the -X parameter and 'Content-Type: application/json' --data-binary @test.json is the value of the -H parameter. Do not add -k between a parameter and its value.
-
-Example 1: In this example, assume that you need to import data in the testdata.json file to an Elasticsearch cluster, where communication encryption is disabled and the private network address and port number of one node are 192.168.0.90 and 9200 respectively. The data in the testdata.json file is as follows:
-Versions earlier than 7.x
-{"index": {"_index":"my_store","_type":"products"}}
-{"productName":"Autumn new woman blouses 2019","size":"M"}
-{"index": {"_index":"my_store","_type":"products"}}
-{"productName":"Autumn new woman blouses 2019","size":"L"}
-Versions later than 7.x
-{"index": {"_index":"my_store"}}
-{"productName":"Autumn new woman blouse 2019","size":"M"}
-{"index": {"_index":"my_store"}}
-{"productName":"Autumn new woman blouse 2019","size":"L"}
-Perform the following steps to import the data:
-- Run the following command to create an index named my_store:
Versions earlier than 7.
xcurl -X PUT http://192.168.0.90:9200/my_store -H 'Content-Type: application/json' -d '
- {
- "settings": {
- "number_of_shards": 1
- },
- "mappings": {
- "products": {
- "properties": {
- "productName": {
- "type": "text"
- },
- "size": {
- "type": "keyword"
- }
- }
- }
- }
- }'
-
-Versions later than 7.x
-curl -X PUT http://192.168.0.90:9200/my_store -H 'Content-Type: application/json' -d '
-{
- "settings": {
- "number_of_shards": 1
- },
- "mappings": {
- "properties": {
- "productName": {
- "type": "text"
- },
- "size": {
- "type": "keyword"
- }
- }
- }
-}'
- - Run the following command to import the data in the testdata.json file:
curl -X PUT "http://192.168.0.90:9200/_bulk" -H 'Content-Type: application/json' --data-binary @testdata.json
-
-Example 2: In this example, assume that you need to import data in the testdata.json file to an Elasticsearch cluster, where communication encryption has been enabled and the node access address and content in the testdata.json are the same as those in example 1. Perform the following steps to import the data:
-- Run the following command to create an index named my_store:
curl -X PUT -k https://192.168.0.90:9200/my_store -H 'Content-Type: application/json' -d '
- {
- "settings": {
- "number_of_shards": 1
- },
- "mappings": {
- "products": {
- "properties": {
- "productName": {
- "type": "text"
- },
- "size": {
- "type": "keyword"
- }
- }
- }
- }
- }'
- - Run the following command to import the data in the testdata.json file:
curl -X PUT -k "https://192.168.0.90:9200/_bulk" -H 'Content-Type: application/json' --data-binary @testdata.json
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0033.html b/docs/css/umn/css_01_0033.html
deleted file mode 100644
index 4b190232..00000000
--- a/docs/css/umn/css_01_0033.html
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-Backup and Restoration Overview
-You can back up index data in clusters. If data loss occurs or you want to retrieve data of a specified duration, you can restore the index data. Index backup is implemented by creating cluster snapshots. When creating a backup for the first time, you are advised to back up data of all indexes.
-
- Managing Automatic Snapshot Creation: Snapshots are automatically created at a specified time each day according to the rules you create. You can enable or disable the automatic snapshot creation function and set the automatic snapshot creation policy.
- Manually Creating a Snapshot: You can manually create a snapshot at any time to back up all data or data of specified indexes.
- Restoring Data: You can use existing snapshots to restore the backup index data to a specified cluster.
- Deleting a Snapshot: Delete snapshots you do not require and release resources.
-
-
-
diff --git a/docs/css/umn/css_01_0041.html b/docs/css/umn/css_01_0041.html
deleted file mode 100644
index c6d9b3c1..00000000
--- a/docs/css/umn/css_01_0041.html
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
-Monitoring
-
-
-
diff --git a/docs/css/umn/css_01_0042.html b/docs/css/umn/css_01_0042.html
deleted file mode 100644
index 04607b88..00000000
--- a/docs/css/umn/css_01_0042.html
+++ /dev/null
@@ -1,52 +0,0 @@
-
-
-Monitoring Metrics of Elasticsearch Clusters
-You can use Cloud Eye to monitor cluster metrics of CSS in real time and quickly handle exceptions. For details about Cloud Eye, see the Cloud Eye User Guide.
-
Table 1 lists the metrics supported by CSS.
-
-
Table 1 Supported metricsMetric
- |
-Description
- |
-Formula
- |
-Value Range
- |
-Monitoring Interval
- |
-
-
-Disk Usage
- |
-Calculates the disk usage of a CSS cluster.
-Unit: %
- |
-Used disk space of a cluster/Total disk space of a cluster
- |
-0 to 100%
- |
-1 minute
- |
-
-Cluster Health Status
- |
-Measures the health status of a CSS cluster.
- |
--
- |
-Available values are 0, 1, and 2.
-- 0: The cluster is 100% available.
- 1: The data is complete while some replicas are missing. Exceptions may occur because the high availability is compromised. This is a warning that should prompt investigation.
- 2: Data is missing and the cluster fails to work.
- |
-1 minute
- |
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0045.html b/docs/css/umn/css_01_0045.html
deleted file mode 100644
index 35bc14b1..00000000
--- a/docs/css/umn/css_01_0045.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-Importing Data to Elasticsearch
-
-
-
diff --git a/docs/css/umn/css_01_0048.html b/docs/css/umn/css_01_0048.html
deleted file mode 100644
index 5b005c65..00000000
--- a/docs/css/umn/css_01_0048.html
+++ /dev/null
@@ -1,189 +0,0 @@
-
-
-Using Logstash to Import Data to Elasticsearch
-You can use Logstash to collect data and migrate collected data to Elasticsearch in CSS. This method helps you effectively obtain and manage data through Elasticsearch. Data files can be in the JSON or CSV format.
-
Logstash is an open-source, server-side data processing pipeline that ingests data from multiple sources simultaneously, transforms data, and then sends data to Elasticsearch. For details about Logstash, visit the following website: https://www.elastic.co/guide/en/logstash/current/getting-started-with-logstash.html
-
The following two scenarios are involved depending on the Logstash deployment:
-
-
-
Importing Data When Logstash Is Deployed on the External Network
Figure 1 illustrates how data is imported when Logstash is deployed on an external network.
-
Figure 1 Importing data when Logstash is deployed on an external network
-
-
- Create a jump host and configure it as follows:
- The jump host is an ECS running the Linux OS and has been bound with an EIP.
- The jump host resides in the same VPC as the CSS cluster.
- SSH local port forwarding is configured for the jump host to forward requests from a chosen local port to port 9200 on one node of the CSS cluster.
- Refer to SSH documentation for the local port forwarding configuration.
- - Use PuTTY to log in to the created jump host with the EIP.
- Run the following command to perform port mapping and transfer the request sent to the port on the jump host to the target cluster:
ssh -g -L <Local port of the jump host:Private network address and port number of a node> -N -f root@<Private IP address of the jump host>
-
- In the preceding command, <Local port of the jump host> refers to the port obtained in 1.
- In the preceding command, <Private network address and port number of a node> refers to the private network address and port number of a node in the cluster. If the node is faulty, the command execution will fail. If the cluster contains multiple nodes, you can replace the value of <private network address and port number of a node> with the private network address and port number of any available node in the cluster. If the cluster contains only one node, restore the node and execute the command again.
- Replace <Private IP address of the jump host> in the preceding command with the IP address (with Private IP) of the created jump host in the IP Address column in the ECS list on the ECS management console.
-
-For example, port 9200 on the jump host is assigned external network access permissions, the private network address and port number of the node are 192.168.0.81 and 9200, respectively, and the private IP address of the jump host is 192.168.0.227. You need to run the following command to perform port mapping:
-ssh -g -L 9200:192.168.0.81:9200 -N -f root@192.168.0.227
- - Log in to the server where Logstash is deployed and store the data files to be imported on the server.
For example, data file access_20181029_log needs to be imported, the file storage path is /tmp/access_log/, and the data file includes the following data:
-
Create the access_log folder if it does not exist.
-
-| All | Heap used for segments | | 18.6403 | MB |
-| All | Heap used for doc values | | 0.119289 | MB |
-| All | Heap used for terms | | 17.4095 | MB |
-| All | Heap used for norms | | 0.0767822 | MB |
-| All | Heap used for points | | 0.225246 | MB |
-| All | Heap used for stored fields | | 0.809448 | MB |
-| All | Segment count | | 101 | |
-| All | Min Throughput | index-append | 66232.6 | docs/s |
-| All | Median Throughput | index-append | 66735.3 | docs/s |
-| All | Max Throughput | index-append | 67745.6 | docs/s |
-| All | 50th percentile latency | index-append | 510.261 | ms |
-
- - In the server where Logstash is deployed, run the following command to create configuration file logstash-simple.conf in the Logstash installation directory:
cd /<Logstash installation directory>/
-vi logstash-simple.conf
- - Input the following content in logstash-simple.conf:
input {
-Location of data
-}
-filter {
-Related data processing
-}
-output {
- elasticsearch {
- hosts => "<EIP of the jump host>:<Number of the port assigned external network access permissions on the jump host>"
- (Optional) If communication encryption has been enabled on the cluster, you need to add the following configuration:
- ssl => true
- ssl_certificate_verification => false
- }
-}
-- The input parameter indicates the data source. Set this parameter based on the actual conditions. For details about the input parameter and parameter usage, visit the following website: https://www.elastic.co/guide/en/logstash/current/input-plugins.html
- The filter parameter specifies the mode in which data is processed. For example, extract and process logs to convert unstructured information into structured information. For details about the filter parameter and parameter usage, visit the following website: https://www.elastic.co/guide/en/logstash/current/filter-plugins.html
- The output parameter indicates the destination address of the data. For details about the output parameter and parameter usage, visit https://www.elastic.co/guide/en/logstash/current/output-plugins.html. Replace <EIP address of the jump host> with the IP address (with EIP) of the created jump host in the IP Address column in the ECS list on the ECS management console. <Number of the port assigned external network access permissions on the jump host> is the number of the port obtained in 1, for example, 9200.
-Consider the data files in the /tmp/access_log/ path mentioned in 4 as an example. Assume that data import starts from data in the first row of the data file, the filtering condition is left unspecified (indicating no data processing operations are performed), the public IP address and port number of the jump host are 192.168.0.227 and 9200, respectively, and the name of the target index is myindex. Edit the configuration file as follows, and enter :wq to save the configuration file and exit.
-input {
- file{
- path => "/tmp/access_log/*"
- start_position => "beginning"
- }
-}
-filter {
-}
-output {
- elasticsearch {
- hosts => "192.168.0.227:9200"
- index => "myindex"
-
- }
-}
-
If a license error is reported, set ilm_enabled to false.
-
-If the cluster has the security mode enabled, you need to download a certificate first.
-- Download a certificate on the Basic Information page of the cluster.
Figure 2 Downloading a certificate
- - Store the certificate to the server where Logstash is deployed.
- Modify the logstash-simple.conf configuration file.
Consider the data files in the
/tmp/access_log/ path mentioned in
4 as an example. Assume that data import starts from data in the first row of the data file, the filtering condition is left unspecified (indicating no data processing operations are performed), and the public IP address and port number of the jump host are
192.168.0.227 and
9200, respectively. The name of the index for importing data is
myindex, and the certificate is stored in
/logstash/logstash6.8/config/CloudSearchService.cer. Edit the configuration file as follows, and enter
:wq to save the configuration file and exit.
input{
- file {
- path => "/tmp/access_log/*"
- start_position => "beginning"
- }
-}
-filter {
- }
-output{
- elasticsearch{
- hosts => ["https://192.168.0.227:9200"]
- index => "myindex"
- user => "admin"
- password => "******"
- cacert => "/logstash/logstash6.8/config/CloudSearchService.cer"
- }
-}
-
password: password for logging in to the cluster
-
-
-
- - Run the following command to import the data collected by Logstash to the cluster:
./bin/logstash -f logstash-simple.conf
-
This command must be executed in the directory where the logstash-simple.conf file is stored. For example, if the logstash-simple.conf file is stored in /root/logstash-7.1.1/, go to the directory before running the command.
-
- - Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters > Elasticsearch to switch to the Clusters page.
- From the cluster list, locate the row that contains the cluster to which you want to import data and click Access Kibana in the Operation column.
- In the Kibana navigation pane on the left, choose Dev Tools.
- On the Console page of Kibana, search for the imported data.
On the Console page of Kibana, run the following command to search for data. View the search results. If the searched data is consistent with the imported data, the data has been imported successfully.
-GET myindex/_search
-
-
-
Importing Data When Logstash Is Deployed on an ECS
Figure 3 illustrates how data is imported when Logstash is deployed on an ECS that resides in the same VPC as the cluster to which data is to be imported.
-
Figure 3 Importing data when Logstash is deployed on an ECS
-
- Ensure that the ECS where Logstash is deployed and the cluster to which data is to be imported reside in the same VPC, port 9200 of the ECS security group has been assigned external network access permissions, and an EIP has been bound to the ECS.
- If there are multiple servers in a VPC, you do not need to associate EIPs to other servers as long as one server is associated with an EIP. Switch to the node where Logstash is deployed from the node with which the EIP is associated.
- If a private line or VPN is available, you do not need to associate an EIP.
-
- - Use PuTTY to log in to the ECS.
For example, data file
access_20181029_log is stored in the
/tmp/access_log/ path of the ECS, and the data file includes the following data:
| All | Heap used for segments | | 18.6403 | MB |
-| All | Heap used for doc values | | 0.119289 | MB |
-| All | Heap used for terms | | 17.4095 | MB |
-| All | Heap used for norms | | 0.0767822 | MB |
-| All | Heap used for points | | 0.225246 | MB |
-| All | Heap used for stored fields | | 0.809448 | MB |
-| All | Segment count | | 101 | |
-| All | Min Throughput | index-append | 66232.6 | docs/s |
-| All | Median Throughput | index-append | 66735.3 | docs/s |
-| All | Max Throughput | index-append | 67745.6 | docs/s |
-| All | 50th percentile latency | index-append | 510.261 | ms |
-
- - Run the following command to create configuration file logstash-simple.conf in the Logstash installation directory:
cd /<Logstash installation directory>/
-vi logstash-simple.conf
-Input the following content in
logstash-simple.conf:
input {
-Location of data
-}
-filter {
-Related data processing
-}
-output {
- elasticsearch{
- hosts => "<Private network address and port number of the node>"}
- (Optional) If communication encryption has been enabled on the cluster, you need to add the following configuration:
- ssl => true
- ssl_certificate_verification => false
-}
-
- The input parameter indicates the data source. Set this parameter based on the actual conditions. For details about the input parameter and parameter usage, visit the following website: https://www.elastic.co/guide/en/logstash/current/input-plugins.html
- The filter parameter specifies the mode in which data is processed. For example, extract and process logs to convert unstructured information into structured information. For details about the filter parameter and parameter usage, visit the following website: https://www.elastic.co/guide/en/logstash/current/filter-plugins.html
- The output parameter indicates the destination address of the data. For details about the output parameter and parameter usage, visit https://www.elastic.co/guide/en/logstash/current/output-plugins.html. <private network address and port number of a node> refers to the private network address and port number of a node in the cluster.
If the cluster contains multiple nodes, you are advised to replace the value of <Private network address and port number of a node> with the private network addresses and port numbers of all nodes in the cluster to prevent node faults. Use commas (,) to separate the nodes' private network addresses and port numbers. The following is an example:
-hosts => ["192.168.0.81:9200","192.168.0.24:9200"]
-If the cluster contains only one node, the format is as follows:
-hosts => "192.168.0.81:9200"
-
-
-Consider the data files in the /tmp/access_log/ path mentioned in 2 as an example. Assume that data import starts from data in the first row of the data file, the filtering condition is left unspecified (indicating no data processing operations are performed), the private network address and port number of the node in the cluster where data is to be imported are 192.168.0.81 and 9200, respectively, and the name of the target index is myindex. Edit the configuration file as follows, and enter :wq to save the configuration file and exit.
-input {
- file{
- path => "/tmp/access_log/*"
- start_position => "beginning"
- }
-}
-filter {
-}
-output {
- elasticsearch {
- hosts => "192.168.0.81:9200"
- index => "myindex"
-
- }
-}
-If the cluster has the security mode enabled, you need to download a certificate first.
-- Download a certificate on the Basic Information page of the cluster.
Figure 4 Downloading a certificate
- - Store the certificate to the server where Logstash is deployed.
- Modify the logstash-simple.conf configuration file.
Consider the data files in the
/tmp/access_log/ path mentioned in step
2 as an example. Assume that data import starts from data in the first row of the data file, the filtering condition is left unspecified (indicating no data processing operations are performed), the public IP address and port number of the jump host are
192.168.0.227 and
9200, respectively. The name of the index for importing data is
myindex, and the certificate is stored in
/logstash/logstash6.8/config/CloudSearchService.cer. Edit the configuration file as follows, and enter
:wq to save the configuration file and exit.
input{
- file {
- path => "/tmp/access_log/*"
- start_position => "beginning"
- }
-}
-filter {
- }
-output{
- elasticsearch{
- hosts => ["https://192.168.0.227:9200"]
- index => "myindex"
- user => "admin"
- password => "******"
- cacert => "/logstash/logstash6.8/config/CloudSearchService.cer"
- }
-}
-
password: password for logging in to the cluster
-
-
-
-
- - Run the following command to import the ECS data collected by Logstash to the cluster:
./bin/logstash -f logstash-simple.conf
- - Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters > Elasticsearch to switch to the Clusters page.
- From the cluster list, locate the row that contains the cluster to which you want to import data and click Access Kibana in the Operation column.
- In the Kibana navigation pane on the left, choose Dev Tools.
- On the Console page of Kibana, search for the imported data.
On the Console page of Kibana, run the following command to search for data. View the search results. If the searched data is consistent with the imported data, the data has been imported successfully.
-GET myindex/_search
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0049.html b/docs/css/umn/css_01_0049.html
deleted file mode 100644
index 0fc9b87c..00000000
--- a/docs/css/umn/css_01_0049.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-Auditing
-
-
-
diff --git a/docs/css/umn/css_01_0050.html b/docs/css/umn/css_01_0050.html
deleted file mode 100644
index 12e57ae8..00000000
--- a/docs/css/umn/css_01_0050.html
+++ /dev/null
@@ -1,103 +0,0 @@
-
-
-Key Operations Recorded by CTS
-Cloud Trace Service (CTS) is available on the public cloud platform. With CTS, you can record operations associated with CSS for later query, audit, and backtrack operations.
-
Prerequisites
CTS has been enabled. For details, see Enabling CTS.
-
-
Key Operations Recorded by CTS
-
Table 1 Key operations recorded by CTSOperation
- |
-Resource Type
- |
-Event Name
- |
-
-
-Creating a cluster
- |
-cluster
- |
-createCluster
- |
-
-Deleting a cluster
- |
-cluster
- |
-deleteCluster
- |
-
-Expanding the cluster capacity
- |
-cluster
- |
-roleExtendCluster
- |
-
-Restarting a cluster
- |
-cluster
- |
-rebootCluster
- |
-
-Performing basic configurations for a cluster snapshot
- |
-cluster
- |
-updateSnapshotPolicy
- |
-
-Setting the automatic snapshot creation policy
- |
-cluster
- |
-updateAutoSnapshotPolicy
- |
-
-Upgrading a cluster
- |
-cluster
- |
-upgradeCluster
- |
-
-Retrying the upgrade
- |
-cluster
- |
-retryAction
- |
-
-Manually creating a snapshot
- |
-snapshot
- |
-createSnapshot
- |
-
-Restoring a snapshot
- |
-snapshot
- |
-restoreSnapshot
- |
-
-Deleting a snapshot
- |
-snapshot
- |
-deleteSnapshot
- |
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0051.html b/docs/css/umn/css_01_0051.html
deleted file mode 100644
index 73759332..00000000
--- a/docs/css/umn/css_01_0051.html
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-Viewing Audit Logs
-After you enable CTS, it starts recording operations related to CSS. The CTS management console stores the last seven days of operation records. This section describes how to query the last seven days of operation records on the CTS management console.
-
Procedure
- Log in to the CTS management console.
- Click
in the upper left corner and select a region. - In the navigation pane on the left, click Trace List.
- You can use filters to query traces. The following four filter criteria are available:
- Trace Source, Resource Type, and Search By
Select a filter criterion from the drop-down list.
-When you select Trace name for Search By, select a specific trace name.
-When you select Resource ID for Search By, enter a specific resource ID.
-When you select Resource name for Search By, select or enter a specific resource name.
- - Operator: Select a specific operator (at user level rather than tenant level).
- Trace Status: Available options include All trace statuses, normal, warning, and incident. You can only select one of them.
- Time Range: You can query traces generated during any time range of the last seven days.
- - Click
on the left of a trace to expand its details. - Click View Trace in the Operation column. In the displayed View Trace dialog box, the trace structure details are displayed.
For details about the key fields in the CTS trace structure, see Cloud Trace Service User Guide.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0053.html b/docs/css/umn/css_01_0053.html
deleted file mode 100644
index 0dca9c95..00000000
--- a/docs/css/umn/css_01_0053.html
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-Viewing the Cluster Runtime Status and Storage Capacity Status
-On the Dashboard page of the CSS management console, you can view information about the status and storage capacity of existing clusters.
-
-
Table 1 Cluster status descriptionStatus
- |
-Description
- |
-
-
-Available
- |
-The cluster is running properly and is providing services.
- |
-
-Abnormal
- |
-The cluster creation failed or the cluster is unavailable.
-If a cluster is in the unavailable status, you can delete the cluster or use snapshots created when the cluster is available to restore data to other clusters. However, operations such as expanding cluster capacity, accessing Kibana, creating snapshots, and restoring snapshots to the cluster are not allowed. When a cluster is in the unavailable status, data importing is not recommended to avoid data loss. You can view the cluster metrics or restart the cluster. However, the operations may fail. If the operations fail, contact technical support in a timely manner.
- |
-
-Processing
- |
-The cluster is being restarted, scaled, backed up, or recovered.
- |
-
-Creating
- |
-The cluster is being created.
- |
-
-
-
-
-
-
Table 2 Cluster storage capacity status descriptionStatus
- |
-Description
- |
-
-
-Normal
- |
-The storage capacity usage of all nodes in a cluster is less than 50%.
- |
-
-Warning
- |
-The storage capacity usage of any node in a cluster is greater than or equal to 50% and less than 80%.
- |
-
-Danger
- |
-The storage capacity usage of any node in a cluster is greater than or equal to 80%. You are advised to increase the storage space of the cluster to achieve normal data search or analysis.
- |
-
-Abnormal
- |
-The cluster storage capacity usage is unknown. For example, if the status of a cluster is Abnormal due to faults, the storage space status of the cluster will be Abnormal.
- |
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0055.html b/docs/css/umn/css_01_0055.html
deleted file mode 100644
index a6b9a621..00000000
--- a/docs/css/umn/css_01_0055.html
+++ /dev/null
@@ -1,146 +0,0 @@
-
-
-Change History
-
-
Released On
- |
-Description
- |
-
-
-2024-02-07
- |
-Deleted:
-
-Added:
-New examples in section Optimization of Other Parameters.
- |
-
-2023-12-15
- |
-Delete the following sections:
-- Using the OpenDistro Alarm Plugin
- (Optional) Service Authorization
- Logstash
- Intelligent O&M
- |
-
-2023-9-25
- |
-Added the following sections:
-
- |
-
-2023-03-28
- |
-- Optimized the content structure of the following sections:
- What Is Cloud Search Service?
- Related Services
- Clusters in Security Mode
- - Updated the procedure description in:
-
- Added:
-
- Deleted the following sections:
- What Is Kibana?
- What is Cerebro?
- Suggestions on Using Elasticsearch
- Viewing Monitoring Metrics
- Creating Alarm Rules
-
- |
-
-2022-09-14
- |
-Updated: Index Backup and Restoration
- |
-
-2022-07-28
- |
-Added the cluster version 7.10.2.
- |
-
-2022-06-30
- |
-Added the description about cluster version 7.9.3.
-Supported the VPC endpoint service: Accessing a Cluster Using a VPC Endpoint
-Optimized:
-
- |
-
-2020-08-30
- |
-- Added: cluster version 7.6.2.
- Added:
-
- |
-
-2019-03-15
- |
-
- |
-
-2019-01-26
- |
-Accepted in OTC-3.2/Agile-01.2019.
- |
-
-2019-01-17
- |
-
- |
-
-2018-12-17
- |
-
- |
-
-2018-12-03
- |
-Updated the screenshots in section "Why Does My ECS Fail to Connect to a Cluster?".
- |
-
-2018-11-02
- |
-Updated descriptions in the following sections based on software function changes:
-Index Backup and Restoration
- |
-
-2018-10-19
- |
-Added parameter descriptions and modified the description of operations in the following section:
-Index Backup and Restoration
- |
-
-2018-10-12
- |
-Added the description about the automatic snapshot deletion time in the following section:
-Index Backup and Restoration
- |
-
-2018-10-08
- |
-Accepted in OTC-3.2.
- |
-
-2018-09-28
- |
-
- |
-
-2018-09-14
- |
-Added Cluster List Overview.
- |
-
-2018-08-20
- |
-
- |
-
-2018-07-31
- |
-This issue is the first official release.
- |
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0056.html b/docs/css/umn/css_01_0056.html
deleted file mode 100644
index b73a9d9a..00000000
--- a/docs/css/umn/css_01_0056.html
+++ /dev/null
@@ -1,64 +0,0 @@
-
-
-Cluster List Overview
-The cluster list displays all CSS clusters. If there are a large number of clusters, these clusters will be displayed on multiple pages. You can view clusters of all statuses from the cluster list.
-
Clusters are listed in chronological order by default in the cluster list, with the most recent cluster displayed at the top. Table 1 shows the cluster parameters.
-
In the upper right corner of the cluster list, you can enter the cluster name, or cluster ID and click
to search for a cluster. You can also click
in the upper right corner to refresh the cluster list. Click
to download the cluster list.
-
-
Table 1 Cluster list parameter descriptionParameter
- |
-Description
- |
-
-
-Name/ID
- |
-Name and ID of a cluster. You can click a cluster name to switch to the Basic Information page. The cluster ID is automatically generated by the system and uniquely identifies a cluster.
- |
-
-Cluster Status
- |
-Status of a cluster. For details about the cluster status, see Viewing the Cluster Runtime Status and Storage Capacity Status.
- |
-
-Task Status
- |
-Status of a task, such as cluster restart, cluster capacity expansion, cluster backup, and cluster restoration.
- |
-
-Version
- |
-Elasticsearch version of the cluster.
- |
-
-Created
- |
-Time when the cluster is created.
- |
-
-Enterprise Project
- |
-Enterprise project that a cluster belongs to.
- |
-
-Private Network Address
- |
-Private network address and port number of the cluster. You can use these parameters to access the cluster. If the cluster has multiple nodes, the private network addresses and port numbers of all nodes are displayed.
- |
-
-Billing Mode
- |
-Billing mode of a cluster.
- |
-
-Operation
- |
-Operations that can be performed on a cluster, including accessing Kibana, checking metrics, restarting a cluster, and deleting a cluster. If an operation is not allowed, the button is gray.
- |
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0058.html b/docs/css/umn/css_01_0058.html
deleted file mode 100644
index aa08e1b3..00000000
--- a/docs/css/umn/css_01_0058.html
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-Binding an Enterprise Project
-You can create enterprise projects based on your organizational structure. Then you can manage resources across different regions by enterprise project, add users and user groups to enterprise projects, and grant different permissions to the users and user groups. This section describes how to bind a CSS cluster to an enterprise project and how tp modify an enterprise project.
-
Prerequisites
Before binding an enterprise project, you have created an enterprise project.
-
-
Binding an Enterprise Project
When creating a cluster, you can bind an existing enterprise project to the cluster, or click View Enterprise Project to go to the enterprise project management console and create a new project or view existing projects.
-
-
Modifying an Enterprise Project
For a cluster that has been created, you can modify its enterprise project based on the site requirements.
-
- Log in to the CSS management console.
- In the navigation pane on the left, select a cluster type. The cluster management page is displayed.
- In the cluster list on the displayed page, click the target cluster name to switch to the Cluster Information page.
- On the Cluster Information page, click the enterprise project name on the right of Enterprise Project. The project management page is displayed.
- On the Resources tab page, select the region of the current cluster, and select CSS for Service. In this case, the corresponding CSS cluster is displayed in the resource list.
- Select the cluster whose enterprise project you want to modify and click Remove.
- On the Remove Resource page, specify Mode and select Destination Enterprise Project, and click OK.
- After the resource is removed, you can view the modified enterprise project information on the Clusters page.
-
-
-
-
diff --git a/docs/css/umn/css_01_0060.html b/docs/css/umn/css_01_0060.html
deleted file mode 100644
index 9c671e36..00000000
--- a/docs/css/umn/css_01_0060.html
+++ /dev/null
@@ -1,88 +0,0 @@
-
-
-Managing Failed Tasks
-In the Failed Tasks dialog box, you can view the failed tasks related to a cluster, such as failing to create, restart, scale out, back up, or restore a cluster. In addition, you can view the failure cause of each task and choose to delete one or all failed tasks.
-
Viewing Failed Tasks
- Log in to the CSS management console.
- Click Clusters to switch to the Clusters page. Click the digit next to Failed Tasks to switch to the Failed Tasks dialog box.
Figure 1 Clicking the digit next to Failed Tasks
- - In the Failed Tasks dialog box, view all failed tasks of the current account. The following information about the failed tasks is displayed: Name/ID, Task Status, and Failure Time.
- Click the question mark in the Task Status column to view the failure cause of a task. You are advised to troubleshoot faults based on failure causes. For details about failure causes, see Error Code.
Figure 2 Viewing the failure cause of a task
-
-
-
Deleting a Failed Task
You can delete one or all failed tasks at a time.
-
- To delete a failed task, perform the following operations: Locate the row that contains the target task and click Delete in the Operation column. In the displayed dialog box, confirm the task you want to delete and click Yes.
- To delete all failed tasks, perform the following operations: In the Failed Tasks dialog box, click Delete All. In the displayed dialog box, confirm the information about all failed tasks and click Yes.
-
Figure 3 Deleting a failed task
-
-
Error Code
-
Table 1 Failure causesError Code
- |
-Failure Cause
- |
-Solution
- |
-
-
-CSS.6000
- |
-Failed to create the cluster because of an internal error. Please try again later. If the problem persists, contact customer service.
- |
-Please try again later or contact customer service.
- |
-
-CSS.6001
- |
-Failed to scale out the cluster because of an internal error. Please try again later. If the problem persists, contact customer service.
- |
-
-CSS.6002
- |
-Failed to restart the cluster because of an internal error. Please try again later. If the problem persists, contact customer service.
- |
-
-CSS.6003
- |
-Failed to restore the cluster because of an internal error. Please try again later. If the problem persists, contact customer service.
- |
-
-CSS.6004
- |
-Failed to create the node because of ECS exceptions (<ECS error code>). Please try again later. If the problem persists, contact customer service.
- NOTE: <ECS error code> indicates the error information reported by ECS. For details about the cause and solution, see ECS Error Code Description.
-
- |
-
-CSS.6005
- |
-Failed to initialize the service because of an internal error. Please try again later. If the problem persists, contact customer service.
- |
-
-CSS.6007
- |
-Failed to create the snapshot because of an internal error. Please try again later. If the problem persists, contact customer service.
- |
-
-CSS.6008
- |
-Failed to create the snapshot because the OBS bucket you select does not exist or has been deleted.
- |
-Modify the OBS bucket.
- |
-
-CSS.6009
- |
-Failed to restore the snapshot because the OBS bucket you select does not exist or has been deleted.
- |
-
-CSS.6010
- |
-Failed to restore the snapshot because the OBS object does not exist or has been deleted.
- |
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0061.html b/docs/css/umn/css_01_0061.html
deleted file mode 100644
index dc70004e..00000000
--- a/docs/css/umn/css_01_0061.html
+++ /dev/null
@@ -1,325 +0,0 @@
-
-
-Using the Open Distro SQL Plugin
-For Elasticsearch 6.5.4 and later versions, Open Distro for Elasticsearch SQL lets you write queries in SQL rather than in the Elasticsearch query domain-specific language (DSL).
-
If you are already familiar with SQL and do not want to learn query DSL, this feature is a great option.
-
Basic Operations
- Kibana (recommended)
- Log in to Kibana and send requests using request parameters or request body to _opendistro/_sqlURI in the Dev Tools page.
POST _opendistro/_sql
-{
- "query": "SELECT * FROM my-index LIMIT 50"
-}
- - By default, the result is returned in the JSON structure. If you want the result to be returned in the CSV format, run the following command:
POST _opendistro/_sql?format=csv
-{
- "query": "SELECT * FROM my-index LIMIT 50"
-}
-When data is returned in the CSV format, each row corresponds to a document and each column corresponds to a field.
-
- - cURL commands
You can also run cURL commands in ECS to execute SQL statements.
-curl -XPOST https://localhost:9200/_opendistro/_sql -u username:password -k -d '{"query": "SELECT * FROM kibana_sample_data_flights LIMIT 10"}' -H 'Content-Type: application/json'
-
-
-
-
Supported Operations
Open Distro for Elasticsearch supports the following SQL operations: statements, conditions, aggregations, include and exclude fields, common functions, joins, and show.
-
- Statements
-
Table 1 StatementsStatement
- |
-Example
- |
-
-
-Select
- |
-SELECT * FROM my-index
- |
-
-Delete
- |
-DELETE FROM my-index WHERE _id=1
- |
-
-Where
- |
-SELECT * FROM my-index WHERE ['field']='value'
- |
-
-Order by
- |
-SELECT * FROM my-index ORDER BY _id asc
- |
-
-Group by
- |
-SELECT * FROM my-index GROUP BY range(age, 20,30,39)
- |
-
-Limit
- |
-SELECT * FROM my-index LIMIT 50 (default is 200)
- |
-
-Union
- |
-SELECT * FROM my-index1 UNION SELECT * FROM my-index2
- |
-
-Minus
- |
-SELECT * FROM my-index1 MINUS SELECT * FROM my-index2
- |
-
-
-
-
-
As with any complex query, large UNION and MINUS statements can strain or even crash your cluster.
-
-
-
- Conditions
-
Table 2 ConditionsCondition
- |
-Example
- |
-
-
-Like
- |
-SELECT * FROM my-index WHERE name LIKE 'j%'
- |
-
-And
- |
-SELECT * FROM my-index WHERE name LIKE 'j%' AND age > 21
- |
-
-Or
- |
-SELECT * FROM my-index WHERE name LIKE 'j%' OR age > 21
- |
-
-Count distinct
- |
-SELECT count(distinct age) FROM my-index
- |
-
-In
- |
-SELECT * FROM my-index WHERE name IN ('alejandro', 'carolina')
- |
-
-Not
- |
-SELECT * FROM my-index WHERE name NOT IN ('jane')
- |
-
-Between
- |
-SELECT * FROM my-index WHERE age BETWEEN 20 AND 30
- |
-
-Aliases
- |
-SELECT avg(age) AS Average_Age FROM my-index
- |
-
-Date
- |
-SELECT * FROM my-index WHERE birthday='1990-11-15'
- |
-
-Null
- |
-SELECT * FROM my-index WHERE name IS NULL
- |
-
-
-
-
- - Aggregations
-
Table 3 AggregationsAggregation
- |
-Example
- |
-
-
-avg()
- |
-SELECT avg(age) FROM my-index
- |
-
-count()
- |
-SELECT count(age) FROM my-index
- |
-
-max()
- |
-SELECT max(age) AS Highest_Age FROM my-index
- |
-
-min()
- |
-SELECT min(age) AS Lowest_Age FROM my-index
- |
-
-sum()
- |
-SELECT sum(age) AS Age_Sum FROM my-index
- |
-
-
-
-
-
-
- Include and exclude fields
-
Table 4 Include and exclude fieldsPattern
- |
-Example
- |
-
-
-include()
- |
-SELECT include('a*'), exclude('age') FROM my-index
- |
-
-exclude()
- |
-SELECT exclude('*name') FROM my-index
- |
-
-
-
-
-
-
- Functions
-
Table 5 FunctionsFunction
- |
-Example
- |
-
-
-floor
- |
-SELECT floor(number) AS Rounded_Down FROM my-index
- |
-
-trim
- |
-SELECT trim(name) FROM my-index
- |
-
-log
- |
-SELECT log(number) FROM my-index
- |
-
-log10
- |
-SELECT log10(number) FROM my-index
- |
-
-substring
- |
-SELECT substring(name, 2,5) FROM my-index
- |
-
-round
- |
-SELECT round(number) FROM my-index
- |
-
-sqrt
- |
-SELECT sqrt(number) FROM my-index
- |
-
-concat_ws
- |
-SELECT concat_ws(' ', age, height) AS combined FROM my-index
- |
-
-/
- |
-SELECT number / 100 FROM my-index
- |
-
-%
- |
-SELECT number % 100 FROM my-index
- |
-
-date_format
- |
-SELECT date_format(date, 'Y') FROM my-index
- |
-
-
-
-
-
You must enable fielddata in the document mapping for most string functions to work properly.
-
-
-
-
-
-
Joins
Open Distro for Elasticsearch SQL supports inner joins, left outer joins and cross joins. Joins have the following constraints:
-
- You can only join two indexes.
-
- You must use an alias for an index (for example, people p).
-
- In an ON clause, you can only use the AND conditions.
-
-
- You cannot use GROUP BY or ORDER BY to obtain results.
-
- LIMIT with OFFSET (for example, LIMIT 25 OFFSET 25) is not supported.
-
-
JDBC Driver
The Java Database Connectivity (JDBC) driver allows you to integrate Open Distro for Elasticsearch with your business intelligence (BI) applications.
-
For details about how to download and use JAR files, see GitHub Repositories.
-
-
-
-
diff --git a/docs/css/umn/css_01_0070.html b/docs/css/umn/css_01_0070.html
deleted file mode 100644
index a3cb0e92..00000000
--- a/docs/css/umn/css_01_0070.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-Permissions Management
-
-
-
diff --git a/docs/css/umn/css_01_0072.html b/docs/css/umn/css_01_0072.html
deleted file mode 100644
index e0da4e5c..00000000
--- a/docs/css/umn/css_01_0072.html
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-Creating a User and Granting Permissions
-This section describes how to use a group to grant permissions to a user. Figure 1 shows the process for granting permissions.
-
CSS has two types of user permissions: CSS administrator permission and read-only permission.
-
Prerequisites
Before assigning permissions to user groups, you have learned about the system policies listed in Permissions Management.
-
-
Process Flow
Figure 1 Process of granting CSS permissions
-
- Create a user group and assign permissions.
Create a user group on the IAM console, and assign the CSS permission to the group.
- - Create an IAM user and add it to a user group.
Create a user on the IAM console and add the user to the group created in 1. Create a user group and assign permissions.
- - Log in and verify permissions.
In the authorized region, perform the following operations:
-- Choose Service List > Cloud Search Service. Then click Create Cluster on the CSS console. If the cluster cannot be bought (assuming that the current permissions include only CSS ReadOnlyAccess), the CSS ReadOnlyAccess policy has already taken effect.
- Choose another service from Service List. If a message appears indicating that you have insufficient permissions to access the service (assuming that the current policy contains only CSS ReadOnlyAccess), the ECS ReadOnlyAccess policy is in effect.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0075.html b/docs/css/umn/css_01_0075.html
deleted file mode 100644
index 1ebc9434..00000000
--- a/docs/css/umn/css_01_0075.html
+++ /dev/null
@@ -1,57 +0,0 @@
-
-
-Managing Tags
-Tags are cluster identifiers. Adding tags to clusters can help you identify and manage your cluster resources.
-
You can add tags to a cluster when creating the cluster or add them on the details page of the created cluster.
-
Managing Tags of a New Cluster
- Log in to the CSS management console.
- Click Create Cluster in the upper right corner. The Create Cluster page is displayed.
- On the Create Cluster page, set Advanced Settings to Custom. Add tags for a cluster.
You can select a predefined tag and set Tag value for the tag. You can click View Predefined Tag to switch to the TMS management console and view existing tags.
-You can also create new tags by specifying Tag key and Tag value.
-You can add a maximum of 10 tags for a CSS cluster. If the entered tag is incorrect, you can click Delete on the right of the tag to delete the tag.
-
-Table 1 Naming rules for a tag key and valueParameter
- |
-Description
- |
-
-
-Tag key
- |
-Must be unique in a cluster.
-The value cannot contain more than 64 characters.
-It can contain only numbers, letters, and the following special characters: _.:=+-@ The value cannot start or end with a space.
-Cannot be left blank.
- |
-
-Tag value
- |
-The value cannot contain more than 64 characters.
-It can contain only numbers, letters, and the following special characters: _.:=+-@ The value cannot start or end with a space.
-Cannot be left blank.
- |
-
-
-
-
-
-
-
Managing Tags of Existing Clusters
You can modify, delete, or add tags for a cluster.
-
- Log in to the CSS management console.
- On the Clusters page, click the name of a cluster for which you want to manage tags.
The Basic Information page is displayed.
- - In the navigation pane on the left, choose the Tags tab. You can add, modify, or delete tags.
- View
On the Tags page, you can view details about tags of the cluster, including the number of tags and the key and value of each tag.
- - Add
Click Add in the upper left corner. In the displayed Add Tag dialog box, enter the key and value of the tag to be added, and click OK.
- - Modify
You can only change the value of an existing tag.
-In the Operation column of a tag, click Edit. In the displayed Edit Tag page, enter a new tag value and click OK.
- - Delete
In the Operation column of a tag, click Delete. After confirmation, click Yes on the displayed Delete Tag page.
-
-
-
-
Searching for Clusters by Tag
- Log in to the CSS management console.
- On the Clusters page, click Search by Tag in the upper right corner of the cluster list.
- Select or enter the tag key and tag value you want to search for, and click Add to add the tag to the search text box.
You can select a tag key or tag value from their drop-down lists. The system returns a list of clusters that exactly match the tag key or tag value. If you enter multiple tags, the cluster that meets requirements of all the tags will be filtered.
-You can add a maximum of 10 tags at one time.
- - Click Search.
The system searches for the target cluster by tag key and value.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0076.html b/docs/css/umn/css_01_0076.html
deleted file mode 100644
index e33e4b44..00000000
--- a/docs/css/umn/css_01_0076.html
+++ /dev/null
@@ -1,57 +0,0 @@
-
-
-Accessing a Cluster from a Public Network
-You can access a security cluster (Elasticsearch clusters in version 6.5.4 or later support the security mode) that has the HTTPS access enabled through the public IP address provided by the system.
-
By default, CSS uses a shared load balancer for public network access. You can use a dedicated load balancer to improve performance. For details about its configuration, see Connecting to a Dedicated Load Balancer.
-
If public network access is enabled for CSS, then EIP and bandwidth resources will be used and billed.
-
-
Configuring Public Network Access
- Log in to the CSS management console.
- On the Create Cluster page, enable Security Mode. Set the administrator password and enable HTTPS access.
- Select Automatically assign for Public IP Address and set related parameters.
-
Table 1 Public network access parametersParameter
- |
-Description
- |
-
-
-Bandwidth
- |
-Bandwidth for accessing Kibana with the public IP address
- |
-
-Access Control
- |
-If you disable this function, all IP addresses can access the cluster through the public IP address. If you enable access control, only IP addresses in the whitelist can access the cluster through the public IP address.
- |
-
-Whitelist
- |
-IP address or IP address range allowed to access a cluster. Use commas (,) to separate multiple addresses. This parameter can be configured only when Access Control is enabled.
- |
-
-
-
-
-
-
-
Managing Public Network Access
You can configure, modify, view the public network access of, or disassociate the public IP address from a cluster.
-
- Log in to the CSS management console.
- On the Clusters page, click the name of the target cluster. On the Basic Information page that is displayed, manage the public network access configurations.
- Configuring public network access
If you enabled HTTPS but did not configure the public network access during security cluster creation, you can configure it on the Basic Information page after configuring the cluster.
-Click Associate next to Public IP Address, set the access bandwidth, and click OK.
-If the association fails, wait for several minutes and try again.
- - Modifying public network access
For a cluster for which you have configured public network access, you can click Edit next to Bandwidth to modify the bandwidth, or you can click Set next to Access Control to set the access control function and the whitelist for access.
- - Viewing public network access
On the Basic Information page, you can view the public IP address associated with the current cluster.
- - Disassociating a public IP address from a cluster
To disassociate the public IP address, click Disassociate next to Public IP Address.
-
-
-
-
Accessing a Cluster Through the Public IP Address
After configuring the public IP address, you can use it to access the cluster.
-
For example, run the following cURL commands to view the index information in the cluster. In this example, the public access IP address of one node in the cluster is
10.62.179.32 and the port number is
9200.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0077.html b/docs/css/umn/css_01_0077.html
deleted file mode 100644
index 40d95727..00000000
--- a/docs/css/umn/css_01_0077.html
+++ /dev/null
@@ -1,109 +0,0 @@
-
-
-Managing Logs
-CSS provides log backup and search functions to help you locate faults. You can back up cluster logs to OBS buckets and download required log files to analyze and locate faults.
-
Log Query
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster. The cluster information page is displayed.
- In the navigation pane on the left, choose Log Management.
- Query logs on the log management page.
Select the node, log type, and log level you want to query, and then click
. The query result is displayed.
-When you search for logs, the latest 10,000 logs are matched. A maximum of 100 logs are displayed.
-
-
-
Enabling Log Backup
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster. The cluster information page is displayed.
- Click the Logs tab and toggle on the Log Management switch.
- In the Edit Log Backup Configuration dialog box, set the parameters.
In the displayed dialog box, OBS Bucket and IAM Agency are automatically created for log backup. You can change the default value by referring to Table 1.
-If the Log Management function has been enabled for the cluster, you can click
on the right of Log Backup Configuration and modify the configuration in the displayed Edit Log Backup Configuration dialog box. For details, see Table 1.
-
-Table 1 Parameters for configuring log backupParameter
- |
-Description
- |
-Remarks
- |
-
-
-OBS Bucket
- |
-Select an OBS bucket from the drop-down list for storing logs. You can also click Create Bucket on the right to create an OBS bucket.
- |
-The OBS bucket and the cluster must be in the same region.
- NOTE: To let an IAM user access an OBS bucket, you need to grant the GetBucketStoragePolicy, GetBucketLocation, ListBucket, and ListAllMyBuckets permissions to the user.
-
- |
-
-Backup Path
- |
-Storage path of logs in the OBS bucket
- |
-The backup path configuration rules are as follows: - The backup path cannot contain the following characters: \:*?"<>|
- The backup path cannot start with a slash (/).
- The backup path cannot start or end with a period (.).
- The total length of the backup path cannot exceed 1,023 characters.
-
- |
-
-IAM Agency
- |
-IAM agency authorized by the current account for CSS to access or maintain data stored in the OBS bucket. You can also click Create IAM Agency on the right to create an IAM agency.
- |
-The IAM agency must meet the following requirements:
-- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- Mandatory policies: Tenant Administrator
- |
-
-
-
-
- - Back up logs.
- Automatically backing up logs
Click the icon on the right of Auto Backup to enable the auto backup function.
-After the automatic backup function is enabled, set the backup start time in the Configure Auto Backup dialog box. When the scheduled time arrives, the system will back up logs automatically.
-After the Automatic Snapshot Creation function is enabled, you can click
on the right of the parameter to change the backup start time.
- - Manually backing up logs
On the Log Backup tab page, click Back Up. On the displayed page, click Yes to start backup.
-If Task Status in the log backup list is Successful, the backup is successful.
-
All logs in the cluster are copied to a specified OBS path. You can view or download log files from the path of the OBS bucket.
-
-
- - Search for logs.
On the Log Search page, select the target node, log type, and log level, and click
. The search results are displayed.
-When you search for logs, the latest 10,000 logs are matched. A maximum of 100 logs are displayed.
-
-
-
Viewing Logs
After backing up logs, you can click Backup Path to go to the OBS console and view the logs.
-
Backed up logs mainly include deprecation logs, run logs, index slow logs, and search slow logs. Table 2 lists the storage types of the OBS bucket.
-
-
Table 2 Log typesLog Name
- |
-Description
- |
-
-
-clustername_deprecation.log
- |
-Deprecation log
- |
-
-clustername_index_indexing_slowlog.log
- |
-Search slow log
- |
-
-clustername_index_search_slowlog.log
- |
-Index slow log
- |
-
-clustername.log
- |
-Elasticsearch run log
- |
-
-clustername_access.log
- |
-Access log
- |
-
-clustername_audit.log
- |
-Audit log
- |
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0078.html b/docs/css/umn/css_01_0078.html
deleted file mode 100644
index 22681384..00000000
--- a/docs/css/umn/css_01_0078.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-Viewing the Default Plugin List
-CSS clusters have default plugins. You can view the default plugin information on the console or Kibana.
-
Viewing Plugins on the Console
- Log in to the CSS management console.
- In the navigation pane, choose Clusters. Click the target cluster name and go to the Basic Information page of the cluster.
- Click the Plugins tab.
- On the Default page, view default plugins supported by the current version.
-
-
Viewing Plugins on the Kibana
- Log in to the CSS management console.
- In the navigation pane, choose Clusters. Locate the target cluster and click Access Kibana in the Operation column to log in to Kibana.
- Non-security cluster: The Kibana console is displayed.
- Security cluster: Enter the username and password on the login page and click Log In to go to the Kibana console. The default username is admin and the password is the administrator password you specified during cluster creation.
- - Go to Dev Tools and run the following command to view the cluster plugin information:
GET _cat/plugins?v
-The following is an example of the response body:
-name component version
-css-test-ess-esn-1-1 analysis-dynamic-synonym 7.6.2-xxxx-ei-css-v1.0.1
-css-test-ess-esn-1-1 analysis-icu 7.6.2-xxxx-ei-css-v1.1.6
-css-test-ess-esn-1-1 analysis-ik 7.6.2-xxxx-ei-css-v1.0.1
-......
-name indicates the cluster node name, component indicates the plugin name, and version indicates the plugin version.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0079.html b/docs/css/umn/css_01_0079.html
deleted file mode 100644
index c62228c5..00000000
--- a/docs/css/umn/css_01_0079.html
+++ /dev/null
@@ -1,52 +0,0 @@
-
-
-Hot and Cold Data Node Switchover
-CSS provides you with cold data nodes. You can store data that requires query response in seconds on high-performance nodes and store data that requires query response in minutes on cold data nodes with large capacity and low specifications.
-
- When creating a cluster, you need to configure nodes as data nodes. When you enable the cold data node function, data nodes become hot nodes.
- You can enable the cold data node, master node, and client node functions at the same time.
- You can increase nodes and expand storage capacity of cold data nodes. The maximum storage capacity is determined by the node specifications. Local disks do not support storage capacity expansion.
-
-
Hot and Cold Data Node Switchover
If you enable cold data nodes when creating a cluster, the cold data nodes are labeled with cold. Other data nodes become hot nodes and are labeled with hot. You can specify indexes to allocate data to cold or hot nodes.
-
You can configure a template to store indices on the specified cold or hot node.
-
The following figure shows this process. Log in to the Kibana Console page of the cluster, modify the template by configuring the index starting with myindex, and store the indexes on the cold node. In this case, the myindex* date is stored on the cold data node by modifying the template.
-
- For the 5.x version, run the following command to create a template:
PUT _template/test
-{
- "order": 1,
- "template": "myindex*",
- "settings": {
- "index": {
- "refresh_interval": "30s",
- "number_of_shards": "3",
- "number_of_replicas": "1",
- "routing.allocation.require.box_type": "cold"
- }
- }
-}
- - For 6.x or later versions, run the following command to create a template:
PUT _template/test
-{
- "order": 1,
- "index_patterns": "myindex*",
- "settings": {
- "refresh_interval": "30s",
- "number_of_shards": "3",
- "number_of_replicas": "1",
- "routing.allocation.require.box_type": "cold"
- }
-}
-
-
You can perform operations on the created index.
-
PUT myindex/_settings
- {
- "index.routing.allocation.require.box_type": "cold"
- }
-
You can cancel the configurations of hot and cold data nodes.
-
PUT myindex/_settings
-{
- "index.routing.allocation.require.box_type": null
- }
-
-
-
-
diff --git a/docs/css/umn/css_01_0080.html b/docs/css/umn/css_01_0080.html
deleted file mode 100644
index 77107849..00000000
--- a/docs/css/umn/css_01_0080.html
+++ /dev/null
@@ -1,111 +0,0 @@
-
-
-Configuring YML Parameters
-You can modify the elasticsearch.yml file.
-
Modifying Parameter Configurations
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster. The cluster information page is displayed.
- Click Parameter Configurations and click Edit to modify module parameters as required.
-
Table 1 Module parametersModule Name
- |
-Parameter
- |
-Description
- |
-
-
-Cross-domain Access
- |
-http.cors.allow-credentials
- |
-Whether to return the Access-Control-Allow-Credentials of the header during cross-domain access
-Value: true or false
-Default value: false
- |
-
-http.cors.allow-origin
- |
-Origin IP address allowed for cross-domain access, for example, 122.122.122.122:9200
- |
-
-http.cors.max-age
- |
-Cache duration of the browser. The cache is automatically cleared after the time range you specify.
-Unit: s
-Default value: 1,728,000
- |
-
-http.cors.allow-headers
- |
-Headers allowed for cross-domain access, including X-Requested-With, Content-Type, and Content-Length. Use commas (,) and spaces to separate headers.
- |
-
-http.cors.enabled
- |
-Whether to allow cross-domain access
-Value: true or false
-Default value: false
- |
-
-http.cors.allow-methods
- |
-Methods allowed for cross-domain access, including OPTIONS, HEAD, GET, POST, PUT, and DELETE. Use commas (,) and spaces to separate methods.
- |
-
-Reindexing
- |
-reindex.remote.whitelist
- |
-Configured for migrating data from the current cluster to the target cluster through the reindex API. The example value is 122.122.122.122:9200.
- |
-
-Custom Cache
- |
-indices.queries.cache.size
- |
-Cache size in the query phase
-Value range: 1 to 100
-Unit: %
-Default value: 10%
- |
-
-Queue Size in a Thread Pool
- |
-thread_pool.bulk.queue_size
- |
-Queue size in the bulk thread pool. The value is an integer. You need to customize this parameter.
-Default value: 200
- |
-
-thread_pool.write.queue_size
- |
-Queue size in the write thread pool. The value is an integer. You need to customize this parameter.
-Default value: 200
- |
-
-thread_pool.force_merge.size
- |
-Queue size in the force merge thread pool. The value is an integer.
-Default value: 1
- |
-
-Customize
- |
-You can add parameters based on your needs.
- |
-Customized parameters
- NOTE: - Enter multiple values in the format as [value1, value2, value3...].
- Separate values by commas (,) and spaces.
- Colons (:) are not allowed.
-
- |
-
-
-
-
- - After the modification is complete, click Submit.In the displayed Submit Configuration dialog box, select the box indicating "I understand that the modification will take effect after the cluster is restarted." and click Yes.
If the Status is Succeeded in the parameter modification list, the modification has been saved. Up to 20 modification records can be displayed.
- - Return to the cluster list and choose More > Restart in the Operation column to restart the cluster and make the modification take effect.
- You need to restart the cluster after modification, or Configuration unupdated will be displayed in the Task Status column on the Clusters page.
- If you restart the cluster after the modification, and Task Status displays Configuration error, the parameter configuration file fails to be modified.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0082.html b/docs/css/umn/css_01_0082.html
deleted file mode 100644
index efa3dc17..00000000
--- a/docs/css/umn/css_01_0082.html
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
-Accessing a Cluster Using a VPC Endpoint
-If the VPC endpoint service is enabled, you can use a private domain name or node IP address generated by the endpoint to access the cluster. When the VPC endpoint service is enabled, a VPC endpoint will be created by default. You can select Private Domain Name Creation as required. VPC endpoint creation requires specific permissions. For details, see "VPCEP Permissions".
-
VPC Endpoint uses a shared load balancer for intranet access. If your workloads require quick access, you are advised to connect a dedicated load balancer to the cluster. For details, see Connecting to a Dedicated Load Balancer.
-
The public IP address access and VPC endpoint service share a load balancer. If you have configured a public access whitelist, public and private IP addresses that access the cluster through VPCEP are restricted because the public IP address access shares the load balancer with the VPC endpoint service. In this case, you need to add IP address 198.19.128.0/17 to the public access whitelist to allow traffic through VPCEP.
-
-
Enabling the VPC Endpoint Service
- Log in to the CSS management console.
- Click Create Cluster in the upper right corner.
- On the Create Cluster page, set Advanced Settings to Custom. Enable the VPC endpoint service.
- Private Domain Name Creation: If you enable this function, the system automatically creates a private domain name for you, which you can use to access the cluster.
- VPC Endpoint Service Whitelist: You can add an authorized account ID to the VPC endpoint service whitelist. Then you can access the cluster using the domain name or the node IP address.
- You can click Add to add multiple accounts.
- Click Delete in the Operation column to delete the accounts that are not allowed to access the cluster.
-
- If the authorized account ID is set to *, all users are allowed to access the cluster.
- You can view authorized account IDs on the My Credentials page.
-
-
-
-
Managing VPC Endpoint Service
You can enable the VPC endpoint service while creating a cluster, and also enable it by performing the following steps after cluster creation.
-
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster.
- Click the VPC Endpoint Service tab, and turn on the button next to VPC Endpoint Service.
In the displayed dialog box, you can determine whether to enable the private domain name. Click Yes to enable the VPC endpoint service.
-
- - (Optional) Click Modify next to VPC Endpoint Service Whitelist to update the existing whitelist.
- Manage VPC endpoints.
The VPC Endpoint Service page displays all VPC endpoints connected to the current VPC endpoint service.
-Figure 1 Managing VPC endpoints
-Click Accept or Reject in the Operation column to change the node status. If you reject the connection with a VPC endpoint, you cannot access the cluster through the private domain name generated by that VPC endpoint.
-
-
-
Accessing the Cluster Using the Private Domain Name or Node IP Address
- Obtain the private domain name or node IP address.
Log in to the CSS console, click the target cluster name and go to the Cluster Information page. Click the VPC Endpoint Service tab and view the private domain name.
-
- - Run the cURL command to execute the API or call the API by using a program before accessing the cluster. For details about Elasticsearch operations and APIs, see the Elasticsearch Reference.
The ECS must meet the following requirements:
-- Sufficient disk space is allocated for the ECS.
- The ECS and the cluster must be in the same VPC. After enabling the VPC endpoint service, you can access the cluster from the ECS even when the cluster is not in the same VPC as the ECS.
- The security group of the ECS must be the same as that of the cluster.
If this requirement is not met, modify the ECS security group or configure the inbound and outbound rules of the ECS security group to allow the ECS security group to be accessed by all security groups of the cluster. For details, see Configuring Security Group Rules.
- - Configure security group rule settings of the target CSS cluster. Set Protocol to TCP and Port Range to 9200 or a port range including port 9200 for both the outbound and inbound directions.
-- If the cluster you access does not have the security mode enabled, run the following command:
curl 'http://vpcep-7439f7f6-2c66-47d4-b5f3-790db4204b8d.region01.xxxx.com:9200/_cat/indices'
- - If the cluster you access has the security mode enabled, access the cluster using HTTPS and add the username, password and -u to the cURL command.
curl -u username:password -k 'https://vpcep-7439f7f6-2c66-47d4-b5f3-790db4204b8d.region01.xxxx.com:9200/_cat/indices'
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0086.html b/docs/css/umn/css_01_0086.html
deleted file mode 100644
index 585694ea..00000000
--- a/docs/css/umn/css_01_0086.html
+++ /dev/null
@@ -1,83 +0,0 @@
-
-
-CSS Custom Policies
-Custom policies can be created to supplement the system-defined policies of CSS. For the actions supported for custom policies, see section "Permissions Policies and Supported Actions" in the Cloud Search Service API Reference.
-
You can create custom policies in either of the following ways:
-
- Visual editor: Select cloud services, actions, resources, and request conditions. You do not need to have knowledge of the policy syntax.
- JSON: Create a JSON policy or edit based on an existing policy.
-
For details about how to create custom policies, see Creating a Custom Policy. The following section provides examples of common CSS custom policies.
-
IAM permissions and data plane cluster permissions are managed separately. To enable the security capability of the data plane, you need to use the security mode.
-
-
Example Custom Policies
To let an IAM user access an OBS bucket, you need to grant the GetBucketStoragePolicy, GetBucketLocation, ListBucket, and ListAllMyBuckets permissions to the user.
-
-
Example 1: Allowing users to create a CSS cluster
{
- "Version": "1.1",
- "Statement": [
- {
- "Action": [
- "css:cluster:create",
- "vpc:securityGroups:get",
- "vpc:securityGroups:create",
- "vpc:securityGroups:delete",
- "vpc:securityGroupRules:get",
- "vpc:securityGroupRules:create",
- "vpc:securityGroupRules:delete",
- "vpc:vpcs:list",
- "vpc:privateIps:list",
- "vpc:ports:get",
- "vpc:ports:create",
- "vpc:ports:update",
- "vpc:ports:delete",
- "vpc:quotas:list",
- "vpc:subnets:get",
- "ecs:cloudServerFlavors:get",
- "ecs:serverInterfaces:use",
- "ecs:cloudServers:addNics",
- "ecs:quotas:get",
- "evs:types:get",
- "evs:quotas:get"
- ],
- "Effect": "Allow"
- }
- ]
-}
-
-
-
Example 2: Denying cluster deletion
-
A policy with only Deny permissions must be used in conjunction with other policies for it to take effect. If the permissions assigned to a user contain both Allow and Deny, the Deny permissions take precedence over the Allow permissions.
-
The following method can be used if you need to assign permissions of the CSS Admin policy to a user but you want to prevent the user from deleting clusters. Create a custom policy for denying cluster deletion, and attach both policies to the group to which the user belongs. Then, the user can perform all operations on CSS except deleting clusters. The following is an example of a deny policy:
-
{
- "Version": "1.1",
- "Statement": [
- {
- "Effect": "Deny",
- "Action": [
- "css:cluster:delete"
- ]
- }
- ]
-}
-
Example 3: Defining permissions for multiple services in a policy
-
A custom policy can contain the actions of multiple services that are of the global or project-level type. The following is an example policy containing actions of multiple services:
-
{
- "Version": "1.1",
- "Statement": [
- {
- "Action": [
- "ecs:cloudServers:resize",
- "ecs:cloudServers:delete",
- "ecs:cloudServers:delete",
- "css:cluster:restart",
- "css:*:get*",
- "css:*:list*"
- ],
- "Effect": "Allow"
- }
- ]
-}
-
-
-
diff --git a/docs/css/umn/css_01_0088.html b/docs/css/umn/css_01_0088.html
deleted file mode 100644
index 754082d5..00000000
--- a/docs/css/umn/css_01_0088.html
+++ /dev/null
@@ -1,86 +0,0 @@
-
-
-Accessing a Cluster from a Kibana Public Network
-For CSS clusters that have security mode enabled, you can enable Kibana public access. After the configuration is complete, an IP address will be provided to access Kibana of this cluster over the Internet.
-
You can configure Kibana public access during cluster creation, or after a cluster in security mode is created.
-
- You can enable Security Mode for clusters of version 6.5.4 and later versions.
- Kibana public access cannot be configured for Elasticsearch clusters created in security mode before this function was rolled out (before June 2020).
- The whitelist for Kibana public network access depends on the ELB whitelist. After you updated the whitelist, the new settings take effect immediately for new connections. For existing persistent connections using the IP addresses that have been removed from the whitelist, the new settings take effect about 1 minute after these connections are stopped.
-
-
Configuring Kibana Public Access When Creating a Cluster
- Log in to the CSS management console.
- Click Create Cluster in the upper right corner. The Create Cluster page is displayed.
- On the Create Cluster page, enable Security Mode.
- Set Advanced Settings to Custom, enable Kibana Public Access, and set parameters.
-
Table 1 Kibana public access parametersParameter
- |
-Description
- |
-
-
-Bandwidth
- |
-Bandwidth for accessing Kibana with the public IP address
-Value range: 1 to 100
-Unit: Mbit/s
- |
-
-Access Control
- |
-If you disable this function, all IP addresses can access Kibana through the public IP address. If you enable this function, only IP addresses or IP address in the whitelist can access Kibana through the public IP address.
- |
-
-Whitelist
- |
-IP address or IP address range allowed to access a cluster. Use commas (,) to separate multiple addresses. This parameter can be configured only when Access Control is enabled.
-You are advised to enable this function.
- |
-
-
-
-
-After the cluster is created, click the cluster name to go to the Basic Information page. On the Kibana Public Access page, you can view the Kibana public IP address.
-
-
-
Configuring Kibana Public Access for an Existing Cluster
You can enable, disable, modify, and view Kibana public access for an existing cluster that has security mode enabled.
-
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster.
- Click the Kibana Public Access tab. Turn on the Kibana Public Access switch to enable the Kibana public access function.
- On the displayed page, set parameters.
-
Table 2 Kibana public access parametersParameter
- |
-Description
- |
-
-
-Bandwidth
- |
-Bandwidth for accessing Kibana with the public IP address
-Value range: 1 to 100
-Unit: Mbit/s
- |
-
-Access Control
- |
-If you disable this function, all IP addresses can access Kibana through the public IP address. If you enable this function, only IP addresses or IP address in the whitelist can access Kibana through the public IP address.
- |
-
-Whitelist
- |
-IP address or IP address range allowed to access a cluster. Use commas (,) to separate multiple addresses. This parameter can be configured only when Access Control is enabled.
-You are advised to enable this function.
- |
-
-
-
-
- - After you set the parameters, click OK.
-
-
Modifying Kibana Public Access
For clusters configured Kibana public access, you can modify its bandwidth and access control or disable this function.
-
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster.
- Click the Kibana Public Access tab to modify the Kibana public access configuration.
- Modifying bandwidth
Click Modify on the right of Bandwidth. On the Modify Bandwidth page, modify the bandwidth and click OK.
- - Modifying access control
Click Modify on the right of Access Control. On the Modify Access Control page, set Access Control and Whitelist, and click OK.
- - Disabling Kibana public access
Toggle off the Kibana Public Access switch.
-
-
-
-
Accessing Kibana with the Public IP Address
After configuring Kibana public access, you will obtain a public IP address that you can use to access Kibana of this cluster.
-
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster.
- Click the Kibana Public Access tab to obtain the Kibana public IP address.
- Use this IP address to access Kibana of this cluster through the Internet.
-
-
-
-
diff --git a/docs/css/umn/css_01_0091.html b/docs/css/umn/css_01_0091.html
deleted file mode 100644
index 525b419d..00000000
--- a/docs/css/umn/css_01_0091.html
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-Managing Indexes
-
-
-
diff --git a/docs/css/umn/css_01_0092.html b/docs/css/umn/css_01_0092.html
deleted file mode 100644
index 66c9076c..00000000
--- a/docs/css/umn/css_01_0092.html
+++ /dev/null
@@ -1,40 +0,0 @@
-
-
-Changing Policies
-You can change any managed index policy. ISM has constraints to ensure that policy changes do not break indexes.
-
If an index is stuck in its current status, never proceeding, and you want to update its policy immediately, make sure that the new policy includes the same status (same name, action, and order) as the old policy. In this case, ISM applies the new policy even if the policy is being executed.
-
If you update the policy without including an identical status, ISM updates the policy only after all actions in the current status finish executing. Alternatively, you can select a specific status in the old policy and have the new policy take effect.
-
To change a policy using Kibana, do the following:
-
- Under Managed Indices, select the indexes to which you want to attach the new policy.
- Click Change policy in the upper right corner. The Choose managed indices page is displayed. Configure parameters required for changing a policy.
-
Table 1 Parameters required for changing a policyParameter
- |
-Description
- |
-
-
-Managed indices
- |
-Select the indexes to which you want to attach the new policy. Multiple indexes can be selected.
- |
-
-State filters
- |
-Select an index status. When a status is selected, the new policy is attached to an index in this status.
- |
-
-New policy
- |
-Select a new policy.
- |
-
-
-
-
- - After configuration is complete, click Change.
-
-
-
diff --git a/docs/css/umn/css_01_0093.html b/docs/css/umn/css_01_0093.html
deleted file mode 100644
index 8ed64253..00000000
--- a/docs/css/umn/css_01_0093.html
+++ /dev/null
@@ -1,37 +0,0 @@
-
-
-Creating and Managing Indexes
-Clusters of version 7.6.2 or later support index status management. ISM is a plugin that allows you to automate periodic and administrative operations based on changes on the index age, index size, or number of documents. When using the ISM plug-in, you can define policies that automatically handle index rollovers or deletions based on your needs.
-
The following procedure uses Elasticsearch 7.6.2 as an example. The Kibana UI varies depending on the Kibana version, but their operations are similar.
-
-
Creating an Index Policy
- Log in to Kibana and choose IM or Index Management on the left. The Index Management page is displayed.
- Click Create policy to create an index policy.
- Enter a policy ID in the Policy ID text box and enter your policy in the Define policy text box.
Figure 1 Configuring a policy
- - Click Create.
-
-
Attaching a Policy to an Index
You can attach a policy to one or more indexes and add the policy ID to an index template. When you create indexes using that index template pattern, the policy will be attached to all created indexes.
-
- Method 1: Kibana commands
On the Dev Tools page of Kibana, run the following command to associate a policy ID with an index template:
-PUT _template/<template_name>
-{
- "index_patterns": ["index_name-*"],
- "settings": {
- "opendistro.index_state_management.policy_id": "policy_id"
- }
-}
-- <template_name>: Replace it with the name of a created index template.
- policy_id: Replace it with a custom policy ID.
-For details about how to create an index template, see Index Template.
- - Method 2: Kibana console
- On the Index Management page of Kibana, choose Indices.
Figure 2 Choosing Indexes
- - In the Indices list, select the target index to which you want to attach a policy.
- Click Apply policy in the upper right corner.
Figure 3 Adding a policy
- - Select the policy you created from the Policy ID drop-down list.
Figure 4 Selecting a policy
- - Click Apply.
After you attach a policy to an index, ISM creates a job that runs every 5 minutes by default, to execute the policy, check conditions, and convert the index to different statuses.
-
-
-
-
Managing Index Policies
- Click Managed Indices.
- If you want to change the policy, click Change policy. For details, see Changing Policies.
- To delete a policy, select your policy, and click Remove policy.
- To retry a policy, select your policy, and click Retry policy.
-
For details, see Index State Management.
-
-
-
-
diff --git a/docs/css/umn/css_01_0094.html b/docs/css/umn/css_01_0094.html
deleted file mode 100644
index e0407ff0..00000000
--- a/docs/css/umn/css_01_0094.html
+++ /dev/null
@@ -1,260 +0,0 @@
-
-
-Creating an Elasticsearch Cluster in Non-Security Mode
-This section describes how to create an Elasticsearch cluster in non-security mode.
-
Procedure
- Log in to the CSS management console.
- On the Dashboard page, click Create Cluster in the upper right corner. The Create page is displayed.
Alternatively, choose Clusters > Elasticsearch in the navigation tree on the left. Click Create Cluster in the upper right corner. The Create page is displayed.
- - Specify Region and AZ.
-
Table 1 Region and AZ parametersParameter
- |
-Description
- |
-
-
-Region
- |
-Select a region for the cluster from the drop-down list on the right. Currently, only eu-de and eu-nl are supported.
- |
-
-AZ
- |
-Select AZs associated with the cluster region.
-You can select a maximum of three AZs. For details, see Deploying a Cross-AZ Cluster.
- |
-
-
-
-
- - Configure basic cluster information.
-
Table 2 Basic parametersParameter
- |
-Description
- |
-
-
-Version
- |
-Select a cluster version from the drop-down list box.
- |
-
-Name
- |
-Cluster name, which contains 4 to 32 characters. Only letters, numbers, hyphens (-), and underscores (_) are allowed and the value must start with a letter.
- NOTE: After a cluster is created, you can modify the cluster name as required. Click the name of a cluster to be modified. On the displayed Basic Information page, click next to the cluster name. After the modification is completed, click to save the modification. If you want to cancel the modification, click .
-
- |
-
-
-
-
- - Configure cluster specifications.
-
Table 3 Parameter descriptionParameter
- |
-Description
- |
-
-
-Nodes
- |
-Number of nodes in a cluster. Select a number from 1 to 32. You are advised to configure three or more nodes to ensure high availability of the cluster.
-- If neither a master node nor client node is enabled, the nodes specified by this parameter are used to serve as both the master node and client node. Nodes provide the cluster management, data storage, cluster access, and data analysis functions. To ensure data stability in the cluster, it is recommended that you set this parameter to a value no less than 3.
- If only the master node function is enabled, nodes specified by this parameter are used to store data and provide functions of client nodes.
- If both the master and client node functions are enabled, the nodes specified by this parameter are only used for storing data.
- If only the client node function is enabled, nodes specified by this parameter are used to store data and provide functions of the master node.
- |
-
-CPU Architecture
- |
-Currently, support x86. The supported type is determined by the actual regional environment.
- |
-
-Node Specifications
- |
-Specifications of nodes in a cluster. You can select a specified specification based on your needs. Each cluster supports only one specification.
-After you select a flavor, the CPU and memory corresponding to the current specification are displayed below the parameter. For example, if you select css.medium.8, then 1 vCPUs | 8 GB will be displayed, indicating that the node flavor you select contains one vCPU and 8 GB memory.
- |
-
-Node Storage Type
- |
-In the current version, the following options are available: Common I/O, High I/O, and Ultra-high I/O.
- |
-
-Node Storage Capacity
- |
-Storage space. Its value varies with node specifications.
-The node storage capacity must be a multiple of 20.
- |
-
-Disk Encryption
- |
-If you select this option, the nodes in the cluster you create will use encrypted EVS disks to protect data. By default, this option is not selected. Note that you cannot modify this setting after the cluster is created. Therefore, exercise caution when performing the setting.
-After you select this option, you need to select an available key from the Key Name drop-down list. If no key is available, click Create/View Key to go to the KMS management console and create or modify a key. For details, see Creating a CMK.
-Enabling disk encryption has no impact on your operations on a cluster (such as accessing the cluster and importing data to the cluster). However, after you enable disk encryption, operation performance deteriorates by about 10%.
- NOTE: - If the cluster is in the Available status and the key used for disk encryption is in the Pending deletion or disable status or has been deleted after a cluster is created, cluster scale-out is not allowed. However, other operations on the cluster, such as restarting the cluster, creating snapshots, restoring the cluster, and importing data to the cluster are not affected. In addition, this key cannot be used for cluster creation in the future.
- After a cluster is created, do not delete the key used by the cluster. Otherwise, the cluster will become unavailable.
- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
-
- |
-
-Master node
- |
-The master node manages all nodes in the cluster. If more than 20 nodes are required to store and analyze a large amount of data, you are advised to enable the master node to ensure cluster stability. Otherwise, you are advised to set only the Nodes parameter and use the nodes as both master and client nodes.
-After enabling the master node, specify Node Specifications, Nodes, and Node Storage Type. The value of Nodes must be an odd number equal to or greater than 3. Up to nine nodes are supported. The value of Node Storage Capacity is fixed. You can select a storage type based on your needs.
- |
-
-Client node
- |
-The client node allows clients to access clusters and analyze data. If more than 20 nodes are required to store and analyze a large amount of data, you are advised to enable the client node to ensure cluster stability. Otherwise, you are advised to set only the Nodes parameter and use the nodes as both master and client nodes.
-After enabling the client node, specify Node Specifications, Nodes and Node Storage Type. The value of Nodes ranges from 1 to 32. The value of Node Storage Capacity is fixed. You can select a storage type based on your needs.
- |
-
-Cold data node
- |
-The cold data node is used to store historical data, for which query responses can be returned in minutes. If you do not quire a quick query response, store historical data on cold data nodes to reduce costs.
-After enabling cold data node, configure Node Specifications, Nodes, Node Storage Type, and Node Storage Capacity. The value of Nodes ranges from 1 to 32. Select Node Storage Type and Node Storage Capacity as requirement.
-After the cold data node is enabled, CSS automatically adds cold and hot tags to related nodes.
- |
-
-
-
-
-Figure 1 Configuring host specifications
- - Set the enterprise project.
When creating a CSS cluster, you can bind an enterprise project to the cluster if you have enabled the enterprise project function. You can select an enterprise project created by the current user from the drop-down list on the right or click View Project Management to go to the Enterprise Project Management console and create a new project or view existing projects.
- - Set network specifications of the cluster.
-
Table 4 Parameter descriptionParameter
- |
-Description
- |
-
-
-VPC
- |
-A VPC is a secure, isolated, and logical network environment.
-Select the target VPC. Click View VPC to enter the VPC management console and view the created VPC names and IDs. If no VPCs are available, create one.
- NOTE: The VPC must contain CIDRs. Otherwise, cluster creation will fail. By default, a VPC will contain CIDRs.
-
- |
-
-Subnet
- |
-A subnet provides dedicated network resources that are isolated from other networks, improving network security.
-Select the target subnet. You can access the VPC management console to view the names and IDs of the existing subnets in the VPC.
- |
-
-Security Group
- |
-A security group implements access control for ECSs that have the same security protection requirements in a VPC. To view more details about the security group, click View Security Group.
- NOTE: - For cluster access purposes, ensure that the security group contains port 9200.
- If your cluster version is 7.6.2 or later, ensure that all the ports used for communication between nodes in the same security group are allowed. If such settings cannot be configured, ensure at least the access to port 9300 is allowed.
- After the port 9300 is enabled, if the cluster disk usage is high, delete expired data to release the disk storage space.
-
- |
-
-Security Mode
- |
-Security mode is disabled.
- |
-
-
-
-
-Figure 2 Configuring network specifications
- - Click Next: Configure Advanced Settings. Configure the automatic snapshot creation and other functions.
- Configure Cluster Snapshot. Set basic configuration and snapshot configuration.
The cluster snapshot function is enabled by default. You can also disable this function as required. To store automatic snapshots in OBS, an agency will be created to access OBS. Additional cost will be incurred if snapshots are stored in standard storage.
-
-Table 5 Cluster snapshot parameterParameter
- |
-Description
- |
-
-
-OBS bucket
- |
-Select an OBS bucket for storing snapshots from the drop-down list box. You can also click Create Bucket on the right to create an OBS bucket. For details, see Creating a Bucket.
-The created or existing OBS bucket must meet the following requirements:
-- Storage Class is Standard or Warm.
- Region must be the same as that of the created cluster.
- |
-
-Backup Path
- |
-Storage path of the snapshot in the OBS bucket.
-The backup path configuration rules are as follows: - The backup path cannot contain the following characters: \:*?"<>|
- The backup path cannot start with a slash (/).
- The backup path cannot start or end with a period (.).
- The backup path cannot contain more than 1,023 characters.
-
- |
-
-IAM Agency
- |
-IAM agency authorized by the current account to CSS access or maintain data stored in the OBS bucket. You can also click Create IAM Agency on the right to create an IAM agency. For details, see Creating an Agency.
-The created or existing IAM agency must meet the following requirements:
-- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- The agency must have the Tenant Administrator permission for the OBS(S3) project in OBS(S3).
- |
-
-Snapshot Encryption
- |
-Indicates whether to enable the snapshot encryption function. Enabling the snapshot encryption function ensures the security of your snapshot data.
-After the snapshot encryption function is enabled, select a key from the Key Name drop-down list. If no key is available, click Create/View Key to switch to the KMS management console to create or modify a key. For details, see Creating a CMK.
-- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
- If a snapshot has been stored in the OBS bucket, you cannot modify the parameters used for encrypting the snapshot.
- If the key used for encryption is in the Pending deletion or disable status, you cannot perform backup and restoration operations on the cluster. Specifically, you cannot create new snapshots for the cluster, or use existing snapshots to restore clusters. In this case, switch to the KMS management console and change the status of the target key to enable so that backup and restore operations are allowed on the cluster.
- If you delete the key used for encryption, you cannot perform backup and restore operations on the cluster. In addition, you cannot restore the deleted key. Therefore, exercise caution when deleting a key. If the key is deleted or is in the Pending deletion or disable state, automatic snapshot creation is allowed based on the configured snapshot policy. However, all automatic snapshot creation tasks will fail, and the failed tasks are displayed in the failed task list in the Failed Tasks dialog box. In such scenario, you are advised to disable the automatic snapshot creation function.
- |
-
-
-
-
-
-Table 6 Automatic snapshot creation parameterParameter
- |
-Description
- |
-
-
-Snapshot Name Prefix
- |
-The snapshot name prefix contains 1 to 32 characters and must start with a lowercase letter. Only lowercase letters, digits, hyphens (-), and underscores (_) are allowed. A snapshot name consists of a snapshot name prefix and a timestamp, for example, snapshot-1566921603720.
- |
-
-Time Zone
- |
-Time zone for the backup time, which cannot be changed. Specify backup started time based on the time zone.
- |
-
-Backup Start Time
- |
-The time when the backup starts automatically every day. You can specify this parameter only in full hours, for example, 00:00 or 01:00. The value ranges from 00:00 to 23:00. Select a time from the drop-down list.
- |
-
-Retention Period (days)
- |
-The number of days that snapshots are retained in the OBS bucket. The value ranges from 1 to 90. You can specify this parameter as required. The system automatically deletes expired snapshots every hour at half past the hour.
- |
-
-
-
-
-Figure 3 Setting parameters for automatic snapshot creation
- - Configure advanced settings for the cluster.
- Default: The VPC Endpoint Service, Kibana Public Access, and Tag functions are disabled by default. You can manually enable these functions after the cluster is created.
- Custom: You can enable the VPC Endpoint Service and Tag functions as required.
-
-Table 7 Parameters for advanced settingsParameter
- |
-Description
- |
-
-
-VPC Endpoint Service
- |
-After enabling this function, you can obtain a private domain name for accessing the cluster in the same VPC. For details, see Accessing a Cluster Using a VPC Endpoint.
- |
-
-Kibana Public Access
- |
-Clusters in non-security mode cannot access Kibana through the Internet.
- |
-
-Tag
- |
-Adding tags to clusters can help you identify and manage your cluster resources. You can customize tags or use tags predefined by Tag Management Service (TMS). For details, see Managing Tags.
- |
-
-
-
-
-
- - Click Next: Confirm. Check the configuration and click Next to create a cluster.
- Click Back to Cluster List to switch to the Clusters page. The cluster you created is listed on the displayed page and its status is Creating. If the cluster is successfully created, its status will change to Available.
If the cluster creation fails, create the cluster again.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0107.html b/docs/css/umn/css_01_0107.html
deleted file mode 100644
index def99a26..00000000
--- a/docs/css/umn/css_01_0107.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-Kibana Platform
-
-
-
diff --git a/docs/css/umn/css_01_0108.html b/docs/css/umn/css_01_0108.html
deleted file mode 100644
index c74c3ced..00000000
--- a/docs/css/umn/css_01_0108.html
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
-Logging In to Kibana
-After creating a CSS cluster, you can log in to Kibana through the console or public network.
-
Procedure
Logging in to the console
- Log in to the CSS management console.
- On the Clusters page, locate the target cluster and click Access Kibana in the Operation column to go to the Kibana login page.
- Non-security cluster: The Kibana console is displayed.
- Security cluster: Enter the username and password on the login page and click Log In to go to the Kibana console. The default username is admin and the password is the one specified during cluster creation.
- - After the login is successful, you can access the Elasticsearch cluster through Kibana.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0109.html b/docs/css/umn/css_01_0109.html
deleted file mode 100644
index c0d71daa..00000000
--- a/docs/css/umn/css_01_0109.html
+++ /dev/null
@@ -1,83 +0,0 @@
-
-
-Creating a User and Granting Permissions by Using Kibana
-CSS uses the opendistro_security plug-in to provide security cluster capabilities. The opendistro_security plug-in is built based on the RBAC model. RBAC involves three core concepts: user, action, and role. RBAC simplifies the relationship between users and actions, simplifies permission management, and facilitates permission expansion and maintenance. The following figure shows the relationship between the three.
-
Figure 1 User, action, and role
-
-
Table 1 ParametersParameter
- |
-Description
- |
-
-
-User
- |
-A user can send operation requests to Elasticsearch clusters. The user has credentials such as username and password, and zero or multiple backend roles and custom attributes.
- |
-
-Role
- |
-A role is a combination of permissions and action groups, including operation permissions on clusters, indexes, documents, or fields.
- |
-
-Permission
- |
-Single permission, for example, creating an index (for example, indices:admin/create)
- |
-
-Role mapping
- |
-A user will be assigned a role after successful authentication. Role mapping is to map a role to a user (or a backend role). For example, the mapping from kibana_user (role) to jdoe (user) means that John Doe obtains all permissions of kibana_user after being authenticated by kibana_user. Similarly, the mapping from all_access (role) to admin (backend role) means that any user with the backend role admin (from the LDAP/Active Directory server) has all the permissions of role all_access after being authenticated. You can map a role to multiple users or backend roles.
- |
-
-Action group
- |
-A group of permissions. For example, the predefined SEARCH action group grants roles to use _search and _msearchAPI.
- |
-
-
-
-
-
In addition to the RBAC model, Elasticsearch has an important concept called tenant. RBAC is used to manage user authorization, and tenants are used for information sharing across tenants. In a tenant space, IAM users can share information such as dashboard data and index patterns.
-
This section describes how to use Kibana to create a user and grant permissions to the user. Kibana can be used to create users and grant permissions only when the security mode is enabled for the cluster.
-
- The Kibana UI varies depending on the Kibana version, but their operations are similar. This section takes Kibana 7.6.2 as an example to describe the procedure.
- You can customize the username, role name, and tenant name in Kibana.
-
-
-
Logging in to Kibana
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
Enter the administrator username and password to log in to Kibana.
- Username: admin (default administrator account name)
- Password: Enter the administrator password you set when creating the cluster in security mode.
-
Figure 2 Login page
-
-
-
-
Creating a User
Log in to Kibana and create a user on the Security page.
-
- After a successful login, choose Security in the navigation tree on the left of the Kibana operation page. The Security page is displayed.
Figure 3 Accessing the Security page
- - Choose Authentication Backends > Internal Users Database.
Figure 4 Adding a user (1)
- - On the Internal Users Database page, choose
. The page for adding user information is displayed.Figure 5 Adding a user (2)
- - On the user creation page, specify Username, Password, and Repeatpassword, and click Submit.
-
The user will be displayed in the user list.
-
-
Creating a Role and Granting Permissions
Create a role and grant permissions to the role.
-
- Click Roles.
Figure 6 Adding a role
- - On the Open Distro Security Roles page, click
.- On the Overview tab page, set the role name.
Figure 7 Entering a role name
- - On the Cluster Permissions tab page, set CSS cluster permissions. Set cluster permissions based on service requirements. If this parameter is not specified for a role, the role has no cluster-level permissions.
- Permissions: Action Groups: You can click Add Action Group to set cluster permissions. For example, if you select the read permission for a cluster, you can only view information such as the cluster status and cluster nodes.
- Permissions: Single Permissions: Select Show Advanced and click Add Single Permission to set more refined permissions for the cluster. For example, if this parameter is set to indices:data/read, you can only read specified indexes.
-Figure 8 Cluster Permissions tab page
- - Configure index permissions on the Index Permissions page.
- Index patterns: Set this parameter to the name of the index whose permission needs to be configured. For example, my_store.
Use different names for the index and the user.
-
- - Permissions: Action Groups: Click Add Action Group and set the permission as required. For example, select the read-only permission Search.
- - On the Tenant Permissions page, set role permissions based on service requirements.
- Global permissions: Click Add Field to set the kibana read and write permissions of a role, for example, kibana_all_read or kibana_all_write.
- Tenant permissions: Click Add tenant pattern to add a tenant mode and set the kibana_all_read or kibana_all_write permission for a new tenant mode.
Figure 9 Tenant Permissions tab
-
-
- - Click Save Role Definition and you can view the configured role.
-
-
Configuring a Role for a User
After creating a role and granting permissions to the role, you need to map the role to a user so that the user can obtain the permissions of the mapped role.
-
- Click Role Mappings. On the displayed Role Mappings page, map the roles.
Figure 10 Role mapping
- - On the Role Mappings page, click
to select a role and add users.- Role: Select the name of the role to be mapped.
- Users: Click Add User and enter the name of the user whose role is mapped.
-Figure 11 Users and roles
- - Click Submit.
- Verify that the configuration takes effect in Kibana.
-
-
-
-
diff --git a/docs/css/umn/css_01_0111.html b/docs/css/umn/css_01_0111.html
deleted file mode 100644
index 26d6fbe9..00000000
--- a/docs/css/umn/css_01_0111.html
+++ /dev/null
@@ -1,31 +0,0 @@
-
-
-Enhanced Cluster Features
-
-
-
diff --git a/docs/css/umn/css_01_0112.html b/docs/css/umn/css_01_0112.html
deleted file mode 100644
index 2bf60f19..00000000
--- a/docs/css/umn/css_01_0112.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-Storage-Compute Decoupling
-
-
-
diff --git a/docs/css/umn/css_01_0113.html b/docs/css/umn/css_01_0113.html
deleted file mode 100644
index 503b47d7..00000000
--- a/docs/css/umn/css_01_0113.html
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
-Context
-You can store hot data on SSD to achieve the optimal query performance, and store historical data in OBS to reduce data storage costs.
-
Application Scenarios
A large volume of data is written to and stored in SSDs. If historical data is no longer updated (is turned into cold data) and its QPS decreases, you can call CSS APIs to dump hot data from SSDs to OBS buckets. This operation freezes indexes, decoupling compute from storage.
-
-
Constraints
- Currently, only Elasticsearch clusters of the versions 7.6.2 and 7.10.2 support decoupled storage and computing.
- The storage-compute decoupling feature depends on OBS. Therefore, you must comply with the restrictions on OBS bandwidth and QPS. If these restrictions are violated, the performance of queries on OBS will deteriorate. For example, the speed of restoring shards and querying data will become slow.
-
-
-
-
diff --git a/docs/css/umn/css_01_0114.html b/docs/css/umn/css_01_0114.html
deleted file mode 100644
index f2553af4..00000000
--- a/docs/css/umn/css_01_0114.html
+++ /dev/null
@@ -1,521 +0,0 @@
-
-
-Freezing an Index
-Precautions
- Before freezing an index, ensure no data is being written to it. The index will be set to read only before being frozen, and data write will fail.
- After an index is frozen,
- It becomes read-only.
- The index data will be dumped to OBS. This process occupies network bandwidth.
- The query latency of a dumped index will increase. During aggregation, the latency of processing complex queries and reading a large volume of data is long.
- It cannot be unfrozen. That is, a read-only index cannot be changed to writable.
- After the freezing is complete, the index data in your local disks will be deleted.
-
-
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- Click Dev Tools in the navigation tree on the left.
- Run the following command to freeze a specified index and dump it to OBS:
POST ${index_name}/_freeze_low_cost
-
-Table 1 Parameter descriptionParameter
- |
-Description
- |
-
-
-index_name
- |
-Name of the index to be frozen.
- |
-
-
-
-
-Information similar to the following is displayed:
-{
- "freeze_uuid": "pdsRgUtSTymVDWR_HoTGFw"
-}
-
-Table 2 Response parameterParameter
- |
-Description
- |
-
-
-freeze_uuid
- |
-After an index freezing request is submitted, an asynchronous job will be started. The request returns the asynchronous job ID, which can be used to query the progress of the asynchronous job.
- |
-
-
-
-
-
After an index freezing request is submitted, data cannot be written to the index. During the index freezing, query requests are not affected. After the freezing is complete, the index is closed and then opened. During this period, the index cannot be queried, and the cluster may be in the red status for a short time. The index is restored after being opened.
-
- - Run the following command to check the freezing task progress:
GET _freeze_low_cost_progress/${freeze_uuid}
-
-Table 3 Parameter descriptionParameter
- |
-Description
- |
-
-
-freeze_uuid
- |
-Asynchronous task ID, which is obtained in 4.
- |
-
-
-
-
-Information similar to the following is displayed:
-{
-
- "stage" : "STARTED",
- "shards_stats" : {
- "INIT" : 0,
- "FAILURE" : 0,
- "DONE" : 0,
- "STARTED" : 3,
- "ABORTED" : 0
- },
- "indices" : {
- "data1" : [
- {
- "uuid" : "7OS-G1-tRke2jHZPlckexg",
- "index" : {
- "name" : "data1",
- "index_id" : "4b5PHXJITLaS6AurImfQ9A",
- "shard" : 2
- },
- "start_ms" : 1611972010852,
- "end_ms" : -1,
- "total_time" : "10.5s",
- "total_time_in_millis" : 10505,
- "stage" : "STARTED",
- "failure" : null,
- "size" : {
- "total_bytes" : 3211446689,
- "finished_bytes" : 222491269,
- "percent" : "6.0%"
- },
- "file" : {
- "total_files" : 271,
- "finished_files" : 12,
- "percent" : "4.0%"
- },
- "rate_limit" : {
- "paused_times" : 1,
- "paused_nanos" : 946460970
- }
- },
- {
- "uuid" : "7OS-G1-tRke2jHZPlckexg",
- "index" : {
- "name" : "data1",
- "index_id" : "4b5PHXJITLaS6AurImfQ9A",
- "shard" : 0
- },
- "start_ms" : 1611972010998,
- "end_ms" : -1,
- "total_time" : "10.3s",
- "total_time_in_millis" : 10359,
- "stage" : "STARTED",
- "failure" : null,
- "size" : {
- "total_bytes" : 3221418186,
- "finished_bytes" : 272347118,
- "percent" : "8.0%"
- },
- "file" : {
- "total_files" : 372,
- "finished_files" : 16,
- "percent" : "4.0%"
- },
- "rate_limit" : {
- "paused_times" : 5,
- "paused_nanos" : 8269016764
- }
- },
- {
- "uuid" : "7OS-G1-tRke2jHZPlckexg",
- "index" : {
- "name" : "data1",
- "index_id" : "4b5PHXJITLaS6AurImfQ9A",
- "shard" : 1
- },
- "start_ms" : 1611972011021,
- "end_ms" : -1,
- "total_time" : "10.3s",
- "total_time_in_millis" : 10336,
- "stage" : "STARTED",
- "failure" : null,
- "size" : {
- "total_bytes" : 3220787498,
- "finished_bytes" : 305789614,
- "percent" : "9.0%"
- },
- "file" : {
- "total_files" : 323,
- "finished_files" : 14,
- "percent" : "4.0%"
- },
- "rate_limit" : {
- "paused_times" : 3,
- "paused_nanos" : 6057933087
- }
- }
- ]
- }
-}
-
-Table 4 Response parametersParameter
- |
-Description
- |
-
-
-stage
- |
-Status. Its value can be:
-- INIT: The instance has just started or is being initialized.
- FAILURE: failed
- DONE: complete
- STARTED: started
- ABORTED: Canceled. This field is reserved.
- |
-
-shards_stats
- |
-Numbers of shards in each state.
- |
-
-indices
- |
-Index status details.
- |
-
-
-
-
-
-Table 5 Return values of indicesParameter
- |
-Description
- |
-
-
-uuid
- |
-UUID of the freezing operation
- |
-
-index
- |
-Index and shard information
- |
-
-start_ms
- |
-Start time
- |
-
-end_ms
- |
-End time. If no end time is specified, the value -1 is displayed.
- |
-
-total_time
- |
-Time spent
- |
-
-total_time_in_millis
- |
-Time spent, in milliseconds
- |
-
-stage
- |
-Status of the current shard.
- |
-
-failure
- |
-Failure cause. If no failure occurs, null is displayed.
- |
-
-size.total_bytes
- |
-Size of files to be frozen, in bytes
- |
-
-size.finished_bytes
- |
-Frozen bytes
- |
-
-size.percent
- |
-Percentage of frozen bytes
- |
-
-file.total_bytes
- |
-Number of files to be frozen
- |
-
-file.finished_bytes
- |
-Number of frozen files
- |
-
-file.percent
- |
-Percentage of frozen files
- |
-
-rate_limit.paused_times
- |
-Number of times that freezing is suspended due to rate limit
- |
-
-rate_limit.paused_nanos
- |
-Duration of freezing task suspension due to rate limit, in nanoseconds
- |
-
-
-
-
-The following parameters are added to a frozen index. For details, see Table 6.
-
-Table 6 Frozen index parametersParameter
- |
-Description
- |
-
-
-index.frozen_low_cost
- |
-Whether an index is frozen. The value is true.
- |
-
-index.blocks.write
- |
-Whether data writing is denied in a frozen index. The value is true.
- |
-
-index.store.type
- |
-Storage type of an index. The value is obs.
- |
-
-
-
-
- - After an index is frozen, its data will be cached. Run the following command to check the current cache status: For details about the cache, see Configuring Cache.
GET _frozen_stats
-GET _frozen_stats/${node_id}
-
-Table 7 Parameter descriptionParameter
- |
-Description
- |
-
-
-node_id
- |
-Node ID, which can be used to obtain the cache status of a node.
- |
-
-
-
-
-Information similar to the following is displayed:
-{
- "_nodes" : {
- "total" : 3,
- "successful" : 3,
- "failed" : 0
- },
- "cluster_name" : "css-zzz1",
- "nodes" : {
- "7uwKO38RRoaON37YsXhCYw" : {
- "name" : "css-zzz1-ess-esn-2-1",
- "transport_address" : "10.0.0.247:9300",
- "host" : "10.0.0.247",
- "ip" : "10.0.0.247",
- "block_cache" : {
- "default" : {
- "type" : "memory",
- "block_cache_capacity" : 8192,
- "block_cache_blocksize" : 8192,
- "block_cache_size" : 12,
- "block_cache_hit" : 14,
- "block_cache_miss" : 0,
- "block_cache_eviction" : 0,
- "block_cache_store_fail" : 0
- }
- },
- "obs_stats" : {
- "list" : {
- "obs_list_count" : 17,
- "obs_list_ms" : 265,
- "obs_list_avg_ms" : 15
- },
- "get_meta" : {
- "obs_get_meta_count" : 79,
- "obs_get_meta_ms" : 183,
- "obs_get_meta_avg_ms" : 2
- },
- "get_obj" : {
- "obs_get_obj_count" : 12,
- "obs_get_obj_ms" : 123,
- "obs_get_obj_avg_ms" : 10
- },
- "put_obj" : {
- "obs_put_obj_count" : 12,
- "obs_put_obj_ms" : 2451,
- "obs_put_obj_avg_ms" : 204
- },
- "obs_op_total" : {
- "obs_op_total_ms" : 3022,
- "obs_op_total_count" : 120,
- "obs_op_avg_ms" : 25
- }
- },
- "reader_cache" : {
- "hit_count" : 0,
- "miss_count" : 1,
- "load_success_count" : 1,
- "load_exception_count" : 0,
- "total_load_time" : 291194714,
- "eviction_count" : 0
- }
- },
- "73EDpEqoQES749umJqxOzQ" : {
- "name" : "css-zzz1-ess-esn-3-1",
- "transport_address" : "10.0.0.201:9300",
- "host" : "10.0.0.201",
- "ip" : "10.0.0.201",
- "block_cache" : {
- "default" : {
- "type" : "memory",
- "block_cache_capacity" : 8192,
- "block_cache_blocksize" : 8192,
- "block_cache_size" : 12,
- "block_cache_hit" : 14,
- "block_cache_miss" : 0,
- "block_cache_eviction" : 0,
- "block_cache_store_fail" : 0
- }
- },
- "obs_stats" : {
- "list" : {
- "obs_list_count" : 17,
- "obs_list_ms" : 309,
- "obs_list_avg_ms" : 18
- },
- "get_meta" : {
- "obs_get_meta_count" : 79,
- "obs_get_meta_ms" : 216,
- "obs_get_meta_avg_ms" : 2
- },
- "get_obj" : {
- "obs_get_obj_count" : 12,
- "obs_get_obj_ms" : 140,
- "obs_get_obj_avg_ms" : 11
- },
- "put_obj" : {
- "obs_put_obj_count" : 12,
- "obs_put_obj_ms" : 1081,
- "obs_put_obj_avg_ms" : 90
- },
- "obs_op_total" : {
- "obs_op_total_ms" : 1746,
- "obs_op_total_count" : 120,
- "obs_op_avg_ms" : 14
- }
- },
- "reader_cache" : {
- "hit_count" : 0,
- "miss_count" : 1,
- "load_success_count" : 1,
- "load_exception_count" : 0,
- "total_load_time" : 367179751,
- "eviction_count" : 0
- }
- },
- "EF8WoLCUQbqJl1Pkqo9-OA" : {
- "name" : "css-zzz1-ess-esn-1-1",
- "transport_address" : "10.0.0.18:9300",
- "host" : "10.0.0.18",
- "ip" : "10.0.0.18",
- "block_cache" : {
- "default" : {
- "type" : "memory",
- "block_cache_capacity" : 8192,
- "block_cache_blocksize" : 8192,
- "block_cache_size" : 12,
- "block_cache_hit" : 14,
- "block_cache_miss" : 0,
- "block_cache_eviction" : 0,
- "block_cache_store_fail" : 0
- }
- },
- "obs_stats" : {
- "list" : {
- "obs_list_count" : 17,
- "obs_list_ms" : 220,
- "obs_list_avg_ms" : 12
- },
- "get_meta" : {
- "obs_get_meta_count" : 79,
- "obs_get_meta_ms" : 139,
- "obs_get_meta_avg_ms" : 1
- },
- "get_obj" : {
- "obs_get_obj_count" : 12,
- "obs_get_obj_ms" : 82,
- "obs_get_obj_avg_ms" : 6
- },
- "put_obj" : {
- "obs_put_obj_count" : 12,
- "obs_put_obj_ms" : 879,
- "obs_put_obj_avg_ms" : 73
- },
- "obs_op_total" : {
- "obs_op_total_ms" : 1320,
- "obs_op_total_count" : 120,
- "obs_op_avg_ms" : 11
- }
- },
- "reader_cache" : {
- "hit_count" : 0,
- "miss_count" : 1,
- "load_success_count" : 1,
- "load_exception_count" : 0,
- "total_load_time" : 235706838,
- "eviction_count" : 0
- }
- }
- }
-}
-
- - Run the following command to reset the cache status:
POST _frozen_stats/reset
-Information similar to the following is displayed:
-{
- "_nodes" : {
- "total" : 1,
- "successful" : 1,
- "failed" : 0
- },
- "cluster_name" : "Es-0325-007_01",
- "nodes" : {
- "mqTdk2YRSPyOSXfesREFSg" : {
- "result" : "ok"
- }
- }
-}
-
This command is used to debug performance issues. If you reset the cache status and run this command, you can check the cache command status. You do not need to run this command during service running.
-
- - Run the following command to check all the frozen indexes:
GET _cat/freeze_indices
-Information similar to the following is displayed:
-green open data2 0bNtxWDtRbOSkS4JYaUgMQ 3 0 5 0 7.9kb 7.9kb
-green open data3 oYMLvw31QnyasqUNuyP6RA 3 0 51 0 23.5kb 23.5kb
-
The parameters and return values of this command are the same as those of _cat/indices of Elasticsearch.
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0116.html b/docs/css/umn/css_01_0116.html
deleted file mode 100644
index 61f404c7..00000000
--- a/docs/css/umn/css_01_0116.html
+++ /dev/null
@@ -1,139 +0,0 @@
-
-
-Configuring Cache
-After data is dumped to OBS, some data is cached to reduce access to OBS and improve Elasticsearch query performance. Data that is requested for the first time is obtained from OBS. The obtained data is cached in the memory. In subsequent queries, the system searches for data in the cache first. Data can be cached in memory or files.
-
Elasticsearch accesses different files in different modes. The cache system supports multi-level cache and uses blocks of different sizes to cache different files. For example, a large number of small blocks are used to cache .fdx and .tip files, and a small number of large blocks are used to cache .fdt files.
-
-
Table 1 Cache configurationsParameter
- |
-Type
- |
-Description
- |
-
-
-low_cost.obs.blockcache.names
- |
-Array
- |
-The cache system supports multi-level cache for data of different access granularities. This configuration lists the names of all caches. If this parameter is not set, the system has a cache named default. To customize the configuration, ensure there is a cache named default.
-Default value: default
- |
-
-low_cost.obs.blockcache.<NAME>.type
- |
-ENUM
- |
-Cache type, which can be memory or file.
-If it is set to memory, certain memory will be occupied. If it is set to file, cache will be stored in disks. You are advised to use ultra-high I/O disks to improve cache performance.
-Default value: memory
- |
-
-low_cost.obs.blockcache.<NAME>.blockshift
- |
-Integer
- |
-Size of each block in the cache. Its value is the number of bytes shifted left. For example, if this parameter is set to 16, the block size is 216 bytes, that is, 65536 bytes (64 KB).
-Default value: 13 (8 KB)
- |
-
-low_cost.obs.blockcache.<NAME>.bank.count
- |
-Integer
- |
-Number of cache partitions.
-Default value: 1
- |
-
-low_cost.obs.blockcache.<NAME>.number.blocks.perbank
- |
-Integer
- |
-Number of blocks included in each cache partition.
-Default value: 8192
- |
-
-low_cost.obs.blockcache. <NAME>.exclude.file.types
- |
-Array
- |
-Extensions of files that are not cached. If the extensions of certain files are neither in the exclude list nor in the include list, they are stored in the default cache.
- |
-
-low_cost.obs.blockcache. <NAME>.file.types
- |
-Array
- |
-Extensions of cached files. If the extensions of certain files are neither in the exclude list nor in the include list, they are stored in the default cache.
- |
-
-
-
-
-
The following is a common cache configuration. It uses two levels of caches, default and large. The default cache uses 64 KB blocks and has a total of 30 x 4096 blocks. It is used to cache files except .fdt files. The large cache uses 2 MB blocks and contains 5 x 1000 blocks. It is used to cache .fdx, .dvd, and .tip files.
-
low_cost.obs.blockcache.names: ["default", "large"]
-low_cost.obs.blockcache.default.type: file
-low_cost.obs.blockcache.default.blockshift: 16
-low_cost.obs.blockcache.default.number.blocks.perbank: 4096
-low_cost.obs.blockcache.default.bank.count: 30
-low_cost.obs.blockcache.default.exclude.file.types: ["fdt"]
-
-low_cost.obs.blockcache.large.type: file
-low_cost.obs.blockcache.large.blockshift: 21
-low_cost.obs.blockcache.large.number.blocks.perbank: 1000
-low_cost.obs.blockcache.large.bank.count: 5
-low_cost.obs.blockcache.large.file.types: ["fdx", "dvd", "tip"]
-
-
-
Table 2 Other parametersParameter
- |
-Type
- |
-Description
- |
-
-
-index.frozen.obs.max_bytes_per_sec
- |
-String
- |
-Maximum rate of uploading files to OBS during freezing. It takes effect immediately after you complete configuration.
-Default value: 150MB
- |
-
-low_cost.obs.index.upload.threshold.use.multipart
- |
-String
- |
-If the file size exceeds the value of this parameter during freezing, the multipart upload function of OBS is used.
-Default value: 1GB
- |
-
-index.frozen.reader.cache.expire.duration.seconds
- |
-Integer
- |
-Timeout duration.
-To reduce the heap memory occupied by frozen indexes, the reader caches data for a period of time after the index shard is started, and stops caching after it times out.
-Default value: 300s
- |
-
-index.frozen.reader.cache.max.size
- |
-Integer
- |
-Maximum cache size.
-Default value: 100
- |
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0117.html b/docs/css/umn/css_01_0117.html
deleted file mode 100644
index e04e6d61..00000000
--- a/docs/css/umn/css_01_0117.html
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-Vector Retrieval
-
-
-
diff --git a/docs/css/umn/css_01_0118.html b/docs/css/umn/css_01_0118.html
deleted file mode 100644
index 44f81cfc..00000000
--- a/docs/css/umn/css_01_0118.html
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-Description
-Image recognition and retrieval, video search, and personalized recommendation impose high requirements on the latency and accuracy of high-dimensional space vector retrieval. To facilitate large-scale vector search, CSS integrates the vector search feature powered by vector search engine and the Elasticsearch plug-in mechanism.
-
Principles
Vector search works in a way similar to traditional search. To improve vector search performance, we need to:
-
- Narrow down the matched scope
Similar to traditional text search, vector search use indexes to accelerate the search instead of going through all data. Traditional text search uses inverted indexes to filter out irrelevant documents, whereas vector search creates indexes for vectors to bypass irrelevant vectors, narrowing down the search scope.
- - Reduce the complexity of calculating a single vector
The vector search method can quantize and approximate high dimensional vectors first. By doing this, you can acquire a smaller and more relevant data set. Then more sophisticated algorithms are applied to this smaller data set to perform computation and sorting. This way, complex computation is performed on only part of the vectors, and efficiency is improved.
-
-
Vector search means to retrieve the k-nearest neighbors (KNN) to the query vector in a given vector data set by using a specific measurement method. Generally, CSS only focuses on Approximate Nearest Neighbor (ANN), because a KNN search requires excessive computational resources.
-
-
Functions
The engine integrates a variety of vector indexes, such as brute-force search, Hierarchical Navigable Small World (HNSW) graphs, product quantization, and IVF-HNSW. It also supports multiple similarity calculation methods, such as Euclidean, inner product, cosine, and Hamming. The recall rate and retrieval performance of the engine are better than those of open-source engines. It can meet the requirements for high performance, high precision, low costs, and multi-modal computation.
-
The search engine also supports all the capabilities of the native Elasticsearch, including distribution, multi-replica, error recovery, snapshot, and permission control. The engine is compatible with the native Elasticsearch ecosystem, including the cluster monitoring tool Cerebro, the visualization tool Kibana, and the real-time data ingestion tool Logstash. Several client languages, such as Python, Java, Go, and C++, are supported.
-
-
Constraints
- Only clusters of version 7.6.2 and 7.10.2 support vector search.
- The vector search plug-in performs in-memory computing and requires more memory than common indexes do. You are advised to use memory-optimized computing specifications.
-
-
-
-
diff --git a/docs/css/umn/css_01_0121.html b/docs/css/umn/css_01_0121.html
deleted file mode 100644
index 82184a28..00000000
--- a/docs/css/umn/css_01_0121.html
+++ /dev/null
@@ -1,230 +0,0 @@
-
-
-Creating a Vector Index
-
-
Creating a Vector Index
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- Click Dev Tools in the navigation tree on the left and run the following command to create a vector index.
Create an index named my_index that contains a vector field my_vector and a text field my_label. The vector field creates the graph index and uses Euclidean distance to measure similarity.
-PUT my_index
-{
- "settings": {
- "index": {
- "vector": true
- }
- },
- "mappings": {
- "properties": {
- "my_vector": {
- "type": "vector",
- "dimension": 2,
- "indexing": true,
- "algorithm": "GRAPH",
- "metric": "euclidean"
- },
- "my_label": {
- "type": "text"
- }
- }
- }
-}
-
-Table 1 Parameters for creating an indexType
- |
-Parameter
- |
-Description
- |
-
-
-Index settings parameters
- |
-vector
- |
-To use a vector index, set this parameter to true.
- |
-
-Field mappings parameters
-
- |
-type
- |
-Field type, for example, vector.
- |
-
-dimension
- |
-Vector dimension.
-The default value is 768 and cannot be changed.
-Value range: [1, 4096]
- |
-
-indexing
- |
-Whether to enable vector index acceleration.
-The value can be: - false: disables vector index acceleration. If this parameter is set to false, vector data is written only to docvalues, and only ScriptScore and Rescore can be used for vector query.
- true: enables vector index acceleration. If this parameter is set to true, an extra vector index is created. The index algorithm is specified by the algorithm field and VectorQuery can be used for data query.
-
-Default value: false
- |
-
-algorithm
- |
-Index algorithm. This parameter is valid only when indexing is set to true.
-The value can be: - FLAT: brute-force algorithm that calculates the distance between the target vector and all vectors in sequence. The algorithm relies on sheer computing power and its recall rate reaches 100%. You can use this algorithm if you require high recall accuracy.
- GRAPH: Hierarchical Navigable Small Worlds (HNSW) algorithm for graph indexes. This algorithm is mainly used in scenarios where high performance and precision are required and the data records of a single shard is fewer than 10 million.
- GRAPH_PQ: combination of the HNSW algorithm and the PQ algorithm. The PQ algorithm reduces the storage overhead of original vectors, so that HNSW can easily search for data among hundreds of millions of records.
- IVF_GRAPH: combination of IVF and HNSW. The entire space is divided into multiple cluster centroids, which makes search much faster but slightly inaccurate. You can use this algorithm if you require high performance when searching for data among hundreds of millions of records.
- IVF_GRAPH_PQ: combination of the PQ algorithm with the IVF or HNSW algorithm to further improve the system capacity and reduce the system overhead. This algorithm is applicable to scenarios where there are more than 1 billion files in shards and high retrieval performance is required.
-
-
- |
-
-Table 2
- |
-If Indexing is set to true, CSS provides optional parameters for vector search to achieve higher query performance or precision.
- |
-
-metric
- |
-Method of calculating the distance between vectors.
-The value can be:
-- euclidean: Euclidean distance
- inner_product: inner product distance
- cosine: cosine distance
- hamming: Hamming distance, which can be used only when dim_type is set to binary.
-Default value: euclidean
- |
-
-dim_type
- |
-Type of the vector dimension value.
-The value can be binary and float (default).
- |
-
-
-
-
-
-Table 2 Optional parametersType
- |
-Parameter
- |
-Description
- |
-
-
-Graph index configuration parameters
- |
-neighbors
- |
-Number of neighbors of each vector in a graph index. The default value is 64. A larger value indicates higher query precision. A larger index results in a slower build and query speed.
-Value range: [10, 255]
- |
-
-shrink
- |
-Cropping coefficient during HNSW build. The default value is 1.0f.
-Value range: (0.1, 10)
- |
-
-scaling
- |
-Scaling ratio of the upper-layer graph nodes during HNSW build. The default value is 50.
-Value range: (0, 128]
- |
-
-efc
- |
-Queue size of the neighboring node during HNSW build. The default value is 200. A larger value indicates a higher precision and slower build speed.
-Value range: (0, 100000]
- |
-
-max_scan_num
- |
-Maximum number of nodes that can be scanned. The default value is 10000. A larger value indicates a higher precision and slower indexing speed.
-Value range: (0, 1000000]
- |
-
-PQ index configuration parameters
- |
-centroid_num
- |
-Number of cluster centroids of each fragment. The default value is 255.
-Value range: (0, 65535]
- |
-
-fragment_num
- |
-Number of fragments. The default value is 0. The plug-in automatically sets the number of fragments based on the vector length.
-Value range: [0, 4096]
- |
-
-
-
-
-
-
-
Importing Vector Data
Run the following command to import vector data. When writing vector data to the my_index index, you need to specify the vector field name and vector data.
-
-
- If the input vector data is a Base64 string encoded using little endian:
When writing binary vectors or high dimensional vectors that have a large number of valid bits, the Base64 encoding format is efficient for data transmission and parsing.
POST my_index/_doc
-{
- "my_vector": "AACAPwAAAEA="
-}
-
- - To write a large amount of data, bulk operations are recommended.
POST my_index/_bulk
-{"index": {}}
-{"my_vector": [1.0, 2.0], "my_label": "red"}
-{"index": {}}
-{"my_vector": [2.0, 2.0], "my_label": "green"}
-{"index": {}}
-{"my_vector": [2.0, 3.0], "my_label": "red"}
-
-
-
Advanced Cluster Configurations
- When importing data offline, you are advised to set refresh_interval of indexes to -1 to disable automatic index refreshing and improve batch write performance.
- You are advised to set number_of_replicas to 0. After the offline data import is complete, you can modify the parameter value as needed.
- The parameters of other advanced functions as follows:
-
Table 3 Cluster parametersParameter
- |
-Description
- |
-
-
-native.cache.circuit_breaker.enabled
- |
-Whether to enable the circuit breaker for off-heap memory.
-Default value: true
- |
-
-native.cache.circuit_breaker.cpu.limit
- |
-Upper limit of off-heap memory usage of the vector index.
-For example, if the overall memory of a host is 128 GB and the heap memory occupies 31 GB, the default upper limit of the off-heap memory usage is 43.65 GB, that is, (128 - 31) x 45%. If the off-heap memory usage exceeds its upper limit, the circuit breaker will be triggered.
-Default value: 45%
- |
-
-native.cache.expire.enabled
- |
-Whether to enable the cache expiration policy. If this parameter is set to true, some cache items that have not been accessed for a long time will be cleared.
-Value: true or false
-Default value: false
- |
-
-native.cache.expire.time
- |
-Expiration time.
-Default value: 24h
- |
-
-native.vector.index_threads
- |
-Number of threads used for creating underlying indexes. Each shard uses multiple threads. Set a relatively small value to avoid resource preemption caused by the build queries of too many threads.
-Default value: 4
- |
-
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0122.html b/docs/css/umn/css_01_0122.html
deleted file mode 100644
index e189316b..00000000
--- a/docs/css/umn/css_01_0122.html
+++ /dev/null
@@ -1,74 +0,0 @@
-
-
-Cluster Planning for Vector Retrieval
-Off-heap memory is used for index construction and query in vector retrieval. Therefore, the required cluster capacity is related to the index type and off-heap memory size. You can estimate the off-heap memory required by full indexing to select proper cluster specifications.
-
There are different methods for estimating the size of off-heap memory required by different types of indexes. The calculation formulas are as follows:
- Graph Index

-
If you need to update indexes in real time, consider the off-heap memory overhead required for vector index construction and automatic merge. The actual size of required mem_needs is at least 1.5 to 2 times of the original estimation.
-
- - PQ Index

- - FALT and IVF Indexes

-
-
-
Table 1 Parameter descriptionParameter
- |
-Description
- |
-
-
-dim
- |
-Vector dimensions
- |
-
-neighbors
- |
-Number of neighbors of a graph node. The default value is 64.
- |
-
-dim_size
- |
-Number of bytes required by each dimension. The default value is four bytes in the float type.
- |
-
-num
- |
-Total number of vectors
- |
-
-delta
- |
-Metadata size. This parameter can be left blank.
- |
-
-frag_num
- |
-Number of vector segments during quantization and coding. If this parameter is not specified when an index is created, the value is determined by the vector dimension dim.
-if dim <= 256:
- frag_num = dim / 4
-elif dim <= 512:
- frag_num = dim / 8
-else :
- frag_num = 64
- |
-
-frag_size
- |
-Size of the center point during quantization and coding. The default value is 1. If the value of frag_num is greater than 256, the value of frag_size is 2.
- |
-
-
-
-
-
-
These calculation methods can estimate the size of off-heap memory required by a complete vector index. To determine cluster specifications, you also need to consider the heap memory overhead of each node.
-
Heap memory allocation policy: The size of the heap memory of each node is half of the node physical memory, and the maximum size is 31 GB.
-
For example, if you create a Graph index for the SIFT10M dataset, set dim to 128, dim_size to 4, neighbors to default value 64, and num to 10 million, the off-heap memory required by the Graph index is as follows:
-

-
Considering the overhead of heap memory, a single server with 8 vCPUs and 16 GB memory is recommended. If real-time write or update is required, you need to apply for larger memory.
-
-
-
diff --git a/docs/css/umn/css_01_0123.html b/docs/css/umn/css_01_0123.html
deleted file mode 100644
index e3ee41fd..00000000
--- a/docs/css/umn/css_01_0123.html
+++ /dev/null
@@ -1,297 +0,0 @@
-
-
-Querying Vectors
-Standard Query
Standard vector query syntax is provided for vector fields with vector indexes. The following command will return n (specified by size/topk) data records that are most close to the query vector.
-
POST my_index/_search
-{
- "size":2,
- "_source": false,
- "query": {
- "vector": {
- "my_vector": {
- "vector": [1, 1],
- "topk":2
- }
- }
- }
-}
-
-
Table 1 Parameters for standard queryParameter
- |
-Description
- |
-
-
-vector (the first one)
- |
-Indicates that the query type is VectorQuery.
- |
-
-my_vector
- |
-Indicates the name of the vector field you want to query.
- |
-
-vector (the second one)
- |
-Indicates the vector value you want to query, which can be an array or a Base64 string
- |
-
-topk
- |
-Same as the value of size generally.
- |
-
-Table 2
- |
-Indicates optional query parameters. You can adjust the vector index parameters to achieve higher query performance or precision.
- |
-
-
-
-
-
-
Table 2 Optional query parametersType
- |
-Parameter
- |
-Description
- |
-
-
-Graph index configuration parameters
- |
-ef
- |
-Queue size of the neighboring node during the query. A larger value indicates a higher query precision and slower query speed. The default value is 200.
-Value range: (0, 100000]
- |
-
-max_scan_num
- |
-Maximum number of scanned nodes. A larger value indicates a higher query precision and slower query speed. The default value is 10000.
-Value range: (0, 1000000]
- |
-
-IVF index configuration parameters
- |
-nprobe
- |
-Number of center points. A larger value indicates a higher query precision and slower query speed. The default value is 100.
-Value range: (0, 100000]
- |
-
-
-
-
-
-
Compound Query
Vector search can be used together with other Elasticsearch subqueries, such as Boolean query and post-filtering, for compound query.
-
In the following two examples, top 10 (topk) results closest to the query vector are queried first. filter retains only the results whose my_label field is red.
-
- Example of a Boolean query
POST my_index/_search
-{
- "size": 10,
- "query": {
- "bool": {
- "must": {
- "vector": {
- "my_vector": {
- "vector": [1, 2],
- "topk": 10
- }
- }
- },
- "filter": {
- "term": { "my_label": "red" }
- }
- }
- }
-}
- - Example of post-filtering
GET my_index/_search
-{
- "size": 10,
- "query": {
- "vector": {
- "my_vector": {
- "vector": [1, 2],
- "topk": 10
- }
- }
- },
- "post_filter": {
- "term": { "my_label": "red" }
- }
-}
-
-
-
ScriptScore Query
You can use script_score to perform Nearest Neighbor Search (NSS) on vectors. The query syntax is provided below.
-
The pre-filtering condition can be any query. script_score traverses only the pre-filtered results, calculates the vector similarity, and sorts and returns the results. The performance of this query depends on the size of the intermediate result set after the pre-filtering. If the pre-filtering condition is set to match_all, brute-force search is performed on all data.
-
POST my_index/_search
- {
- "size":2,
- "query": {
- "script_score": {
- "query": {
- "match_all": {}
- },
- "script": {
- "source": "vector_score",
- "lang": "vector",
- "params": {
- "field": "my_vector",
- "vector": [1.0, 2.0],
- "metric": "euclidean"
- }
- }
- }
- }
- }
-
-
Table 3 script_score parametersParameter
- |
-Description
- |
-
-
-source
- |
-Script description. Its value is vector_score if the vector similarity is used for scoring.
- |
-
-lang
- |
-Script syntax description. Its value is vector.
- |
-
-field
- |
-Vector field name
- |
-
-vector
- |
-Vector data to be queried
- |
-
-metric
- |
-Measurement method, which can be euclidean, inner_product, cosine, and hamming.
-Default value: euclidean
- |
-
-
-
-
-
-
Re-Score Query
If the GRAPH_PQ or IVF_GRAPH_PQ index is used, the query results are sorted based on the asymmetric distance calculated by PQ. CSS supports re-scoring and ranking of query results to improve the recall rate.
-
Assuming that my_index is a PQ index, an example of re-scoring the query results is as follows:
-
GET my_index/_search
- {
- "size": 10,
- "query": {
- "vector": {
- "my_vector": {
- "vector": [1.0, 2.0],
- "topk": 100
- }
- }
- },
- "rescore": {
- "window_size": 100,
- "vector_rescore": {
- "field": "my_vector",
- "vector": [1.0, 2.0],
- "metric": "euclidean"
- }
- }
- }
-
-
Table 4 Rescore parameter descriptionParameter
- |
-Description
- |
-
-
-window_size
- |
-Vector retrieval returns topk search results and ranks the first window_size results.
- |
-
-field
- |
-Vector field name
- |
-
-vector
- |
-Vector data to be queried
- |
-
-metric
- |
-Measurement method, which can be euclidean, inner_product, cosine, and hamming.
-Default value: euclidean
- |
-
-
-
-
-
-
Painless Syntax Extension
CSS extension supports multiple vector distance calculation functions, which can be directly used in customized painless scripts to build flexible re-score formulas.
-
The following is an example:
-
POST my_index/_search
-{
- "size": 10,
- "query": {
- "script_score": {
- "query": {
- "match_all": {}
- },
- "script": {
- "source": "1 / (1 + euclidean(params.vector, doc[params.field]))",
- "params": {
- "field": "my_vector",
- "vector": [1, 2]
- }
- }
- }
- }
-}
-
The following table lists the distance calculation functions supported by the CSS.
-
-
Function Signature
- |
-Description
- |
-
-
-euclidean(Float[], DocValues)
- |
-Euclidean distance function
- |
-
-cosine(Float[], DocValues)
- |
-Cosine similarity function
- |
-
-innerproduct(Float[], DocValues)
- |
-Inner product function
- |
-
-hamming(String, DocValues)
- |
-Hamming distance function Only vectors whose dim_type is binary are supported. The input query vector must be a Base64-encoded character string.
- |
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0124.html b/docs/css/umn/css_01_0124.html
deleted file mode 100644
index 7a76a822..00000000
--- a/docs/css/umn/css_01_0124.html
+++ /dev/null
@@ -1,86 +0,0 @@
-
-
-(Optional) Pre-Building and Registering a Center Point Vector
-When you perform operations in Creating a Vector Index, if IVF_GRAPH and IVF_GRAPH_PQ index algorithms are selected, you need to pre-build and register the center point vector.
-
Context
The vector index acceleration algorithms IVF_GRAPH and IVF_GRAPH_PQ are suitable for ultra-large-scale computing. These two algorithms allow you to narrow down the query range by dividing a vector space into subspaces through clustering or random sampling. Before pre-build, you need to obtain all center point vectors by clustering or random sampling.
-
Then, pre-construct and register the center point vectors to create the GRAPH or GRAPH_PQ index and register them with the Elasticsearch cluster. All nodes in the cluster can share the index file. Reuse of the center index among shards can effectively reduce the training overhead and the number of center index queries, improving the write and query performance.
-
-
Procedure
- On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- Click Dev Tools in the navigation tree on the left.
- Create a center point index table.
- For example, if the created index is named my_dict, number_of_shards of the index must be set to 1. Otherwise, the index cannot be registered.
- If you want to use the IVF_GRAPH index, set algorithm of the center point index to GRAPH.
- If you want to use the IVF_GRAPH_PQ index, set algorithm of the center point index to GRAPH_PQ.
-PUT my_dict
- {
- "settings": {
- "index": {
- "vector": true
- },
- "number_of_shards": 1,
- "number_of_replicas": 0
- },
- "mappings": {
- "properties": {
- "my_vector": {
- "type": "vector",
- "dimension": 2,
- "indexing": true,
- "algorithm": "GRAPH",
- "metric": "euclidean"
- }
- }
- }
- }
- - Write the center point vector to the created index.
Write the center point vector obtained through sampling or clustering into the created my_dict index by referring to Importing Vector Data.
- - Call the registration API.
Register the created my_dict index with a Dict object with a globally unique identifier name (dict_name).
-PUT _vector/register/my_dict
- {
- "dict_name": "my_dict"
- }
- - Create an IVF_GRAPH or IVF_GRAPH_PQ index.
You do not need to specify the dimension and metric information. Simply specify the registered dictionary name.
-PUT my_index
- {
- "settings": {
- "index": {
- "vector": true
- }
- },
- "mappings": {
- "properties": {
- "my_vector": {
- "type": "vector",
- "indexing": true,
- "algorithm": "IVF_GRAPH",
- "dict_name": "my_dict",
- "offload_ivf": false
- }
- }
- }
- }
-
-Table 1 Field mappings parametersParameter
- |
-Description
- |
-
-
-dict_name
- |
-Specifies the name of the depended central point index. The vector dimension and measurement metric of the index are the same as those of the Dict index.
- |
-
-offload_ivf
- |
-Unloads the IVF inverted index implemented by the underlying index to Elasticsearch. In this way, the use of non-heap memory and the overhead of write and merge operations are reduced. However, the query performance also deteriorates. You can use the default value.
-Value: true or false
-Default value: false
- |
-
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0125.html b/docs/css/umn/css_01_0125.html
deleted file mode 100644
index d2863e5e..00000000
--- a/docs/css/umn/css_01_0125.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-Kibana Usage Restrictions
-You can customize the username, role name, and tenant name in Kibana.
-
-
-
diff --git a/docs/css/umn/css_01_0126.html b/docs/css/umn/css_01_0126.html
deleted file mode 100644
index c8053766..00000000
--- a/docs/css/umn/css_01_0126.html
+++ /dev/null
@@ -1,51 +0,0 @@
-
-
-Optimizing the Performance of Vector Retrieval
-Optimizing Write Performance
-
-
Optimizing Query Performance
-
- If the off-heap memory required by the vector index exceeds the circuit breaker limit, index entry swap-in and swap-out occur, which affects the query performance. In this case, you can increase the circuit breaker threshold of off-heap memory.
PUT _cluster/settings
-{
- "persistent": {
- "native.cache.circuit_breaker.cpu.limit": "75%"
- }
-}
- - If the end-to-end latency is greater than the took value in the returned result, you can configure _source to reduce the fdt file size and reduce the fetch overhead.
PUT my_index
-{
- "settings": {
- "index": {
- "vector": "true"
- },
- "index.soft_deletes.enabled": false
- },
- "mappings": {
- "_source": {
- "excludes": ["my_vector"]
- },
- "properties": {
- "my_vector": {
- "type": "vector",
- "dimension": 128,
- "indexing": true,
- "algorithm": "GRAPH",
- "metric": "euclidean"
- }
- }
- }
-}
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0130.html b/docs/css/umn/css_01_0130.html
deleted file mode 100644
index 3751894b..00000000
--- a/docs/css/umn/css_01_0130.html
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-Managing the Vector Index Cache
-The vector retrieval engine is developed in C++ and uses off-heap memory. You can use the following APIs to manage the index cache.
-
- View cache statistics.
GET /_vector/stats
-In the implementation of the vector plug-in, the vector index is the same as other types of Lucene indexes. Each segment constructs and stores an index file. During query, the index file is loaded to the non-heap memory. The plug-in uses the cache mechanism to manage the non-heap memory. You can use this API to query the non-heap memory usage, number of cache hits, and number of loading times.
- - Preload the vector index.
PUT /_vector/warmup/{index_name}
-You can use this API to preload the vector index specified by index_name to the off-heap memory for query.
- - Clear the cache.
PUT /_vector/clear/cache
-PUT /_vector/clear/cache/index_name
-The caching mechanism limits the non-heap memory usage when vector indexes are used. When the total index size exceeds the cache size limit, index entry swap-in and swap-out occur, which affects the query performance. You can use this API to clear unnecessary index cache to ensure the query performance of hot data indexes.
-
-
-
-
diff --git a/docs/css/umn/css_01_0131.html b/docs/css/umn/css_01_0131.html
deleted file mode 100644
index 3463e806..00000000
--- a/docs/css/umn/css_01_0131.html
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-Large Query Isolation
-
-
-
diff --git a/docs/css/umn/css_01_0132.html b/docs/css/umn/css_01_0132.html
deleted file mode 100644
index 7524edae..00000000
--- a/docs/css/umn/css_01_0132.html
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-Context
-The large query isolation feature allows you to separately manage large queries. You can isolate query requests that consume a large amount of memory or take a long period of time. If the heap memory usage of a node is too high, the interrupt control program will be triggered. The program will interrupt a large query based on the policies you configured and cancel the running query tasks of the query.
-
You can also configure a global query timeout duration. Long queries will be intercepted.
-
Currently, only versions 7.6.2 and 7.10.2 support large query isolation.
-
-
-
-
diff --git a/docs/css/umn/css_01_0133.html b/docs/css/umn/css_01_0133.html
deleted file mode 100644
index 75f308a3..00000000
--- a/docs/css/umn/css_01_0133.html
+++ /dev/null
@@ -1,219 +0,0 @@
-
-
-Procedure
-The large query isolation and global timeout features are disabled by default. If you enable them, the configuration will take effect immediately. Perform the following steps to configure the features:
-
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- In the navigation pane of Kibana on the left, choose Dev Tools. Run the following command to enable large query isolation and global timeout features:
PUT _cluster/settings
-{
- "persistent": {
- "search.isolator.enabled": true,
- "search.isolator.time.enabled": true
- }
-}
-The two features each has an independent switch and the following parameters.
-
-Table 1 Parameters for large query isolation and global timeout durationSwitch
- |
-Parameter
- |
-Description
- |
-
-
-search.isolator.enabled
- |
-search.isolator.memory.task.limit
-search.isolator.time.management
- |
-Thresholds of a shard query task. A query task exceeding one of these thresholds is regarded as a large query task.
- |
-
-search.isolator.memory.pool.limit
-search.isolator.memory.heap.limit
-search.isolator.count.limit
- |
-Resource usage thresholds in the isolation pool. If the resource usage of a query task exceeds one of these thresholds, the task will be intercepted.
- NOTE: search.isolator.memory.heap.limit defines the limit on the heap memory consumed by write, query, and other operations of a node. If the limit is exceeded, large query tasks in the isolation pool will be interrupted.
-
- |
-
-search.isolator.strategy
-search.isolator.strategy.ratio
- |
-Policy for selecting a query task in the isolation pool.
- |
-
-search.isolator.time.enabled
- |
-search.isolator.time.limit
- |
-Global timeout interval of query tasks.
- |
-
-
-
-
- - Configure the large query isolation and global timeout duration separately.
- Configure the thresholds of a shard query task. A query task exceeding one of these thresholds is regarded as a large query task.
PUT _cluster/settings
-{
- "persistent": {
- "search.isolator.memory.task.limit": "50MB",
- "search.isolator.time.management": "10s"
- }
-}
-
-Table 2 Parameter descriptionParameter
- |
-Data Type
- |
-Description
- |
-
-
-search.isolator.memory.task.limit
- |
-String
- |
-Threshold of the memory requested by a query task to perform aggregation or other operations. If the requested memory exceeds the threshold, the task will be isolated and observed.
-Value range: 0b to the maximum heap memory of a node
-Default value: 50MB
- NOTE: You can run the following command to query the current heap memory and the maximum heap memory of a cluster:
- GET _cat/nodes?&h=id,ip,port,r,ramPercent,ramCurrent,heapMax,heapCurrent
-
- |
-
-search.isolator.time.management
- |
-String
- |
-Threshold of the duration of a query. (started when cluster resources are used for query). If the duration of a query exceeds the threshold, it will be isolated and observed.
-Value range: ≥ 0ms
-Default value: 10s
- |
-
-
-
-
- - Configure the resource usage thresholds in the isolation pool. If the resource usage of a query task exceeds one of these thresholds, the task will be intercepted.
PUT _cluster/settings
-{
- "persistent": {
- "search.isolator.memory.pool.limit": "50%",
- "search.isolator.memory.heap.limit": "90%",
- "search.isolator.count.limit": 1000
- }
-}
-
-Table 3 Parameter descriptionParameter
- |
-Data Type
- |
-Description
- |
-
-
-search.isolator.memory.pool.limit
- |
-String
- |
-Threshold of the heap memory percentage of the current node. If the total memory requested by large query tasks in the isolation pool exceeds the threshold, the interrupt control program will be triggered to cancel one of the tasks.
-Value range: 0.0 to 100.0%
-Default value: 50%
- |
-
-search.isolator.memory.heap.limit
- |
-String
- |
-Heap memory threshold of the current node. If the heap memory of the node exceeds the threshold, the interrupt control program will be triggered to cancel a large query task in the isolation pool.
-Value range: 0.0 to 100.0%
-Default value: 90%
- |
-
-search.isolator.count.limit
- |
-Integer
- |
-Threshold of the number of large query tasks in the current node isolation pool. If the number of observed query tasks exceeds the threshold, the interrupt control program will be triggered to stop accepting new large queries. New large query requests will be directly canceled.
-Value range: 10–50000
-Default value: 1000
- |
-
-
-
-
-
In addition to search.isolator.memory.pool.limit and search.isolator.count.limit parameters, you can configure search.isolator.memory.task.limit and search.isolator.time.management to control the number of query tasks that enter the isolation pool.
-
- - Policy for selecting a query task in the isolation pool.
PUT _cluster/settings
-{
- "persistent": {
- "search.isolator.strategy": "fair",
- "search.isolator.strategy.ratio": "0.5%"
- }
-}
-
-Parameter
- |
-Data Type
- |
-Description
- |
-
-
-search.isolator.strategy
- |
-String
- |
-Policy for selecting large queries when the interrupt control program is triggered. The selected query will be interrupted.
- NOTE: The large query isolation pool is checked every second until the heap memory is within the safe range.
-
-Values: fair, mem-first, or time-first- mem-first: The query task that uses the most heap memory in the isolation pool is interrupted.
- time-first: The query task that has been running for the longest time in the isolation pool is interrupted.
- fair: If the difference between the heap memory of shard queries is smaller than Maximum_heap_memory x search.isolator.strategy.ratio, the query that takes the longest time should be interrupted. Otherwise, the query that uses the most heap memory is interrupted.
-
-Default value: fair
- |
-
-search.isolator.strategy.ratio
- |
-String
- |
-Threshold of the fair policy. This parameter takes effect only if search.isolator.strategy is set to fair. If the difference between the memory usage of large query tasks does not exceed the threshold, the query that takes the longest time should be interrupted. If the difference between the memory usage of large query tasks exceeds the threshold, the query that uses the most memory is interrupted.
-Value range: 0.0 to 100.0%
-Default value: 1%
- |
-
-
-
-
- - Configure the global timeout duration of query tasks.
PUT _cluster/settings
-{
- "persistent": {
- "search.isolator.time.limit": "120s"
- }
-}
-
-Parameter
- |
-Data Type
- |
-Description
- |
-
-
-search.isolator.time.limit
- |
-String
- |
-Global query timeout duration. If this function is enabled, all the query tasks that exceed the specified duration will be canceled.
-Value range: ≥ 0ms
-Default value: 120s
- |
-
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0134.html b/docs/css/umn/css_01_0134.html
deleted file mode 100644
index c33dbe46..00000000
--- a/docs/css/umn/css_01_0134.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-Index Monitoring
-
-
-
diff --git a/docs/css/umn/css_01_0135.html b/docs/css/umn/css_01_0135.html
deleted file mode 100644
index 54c5f0a9..00000000
--- a/docs/css/umn/css_01_0135.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-Context
-CSS monitors various metrics of the running status and change trend of cluster indexes to measure service usage and handle potential risks in a timely manner, ensuring that clusters can run stably.
-
During index monitoring, the stats information about indexes is collected and saved to the monitoring index (monitoring-eye-css-[yyyy-mm-dd]) of the cluster, and retained for one week by default.
-
Currently, only clusters of the version 7.6.2 and 7.10.2 support index monitoring.
-
-
-
diff --git a/docs/css/umn/css_01_0136.html b/docs/css/umn/css_01_0136.html
deleted file mode 100644
index 8b852309..00000000
--- a/docs/css/umn/css_01_0136.html
+++ /dev/null
@@ -1,76 +0,0 @@
-
-
-Enabling Index Monitoring
-- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- Choose Dev Tools in the navigation pane on the left and run the following command to enable index monitoring:
PUT _cluster/settings
-{
- "persistent": {
- "css.monitoring.index.enabled": "true"
- }
-}
- - (Optional) To monitor a specific index, run the following command on the Dev Tools page of Kibana:
PUT _cluster/settings
-{
- "persistent": {
- "css.monitoring.index.enabled": "true",
- "css.monitoring.index.interval": "30s",
- "css.monitoring.index.indices": ["index_name"],
- "css.monitoring.history.duration": "3d"
- }
-}
-
-Table 1 Parameter descriptionParameter
- |
-Data Type
- |
-Description
- |
-
-
-css.monitoring.index.enabled
- |
-Boolean
- |
-Whether to enable index monitoring. If this parameter is set to true, the monitoring will be enabled.
-Default value: false
- |
-
-css.monitoring.index.interval
- |
-Time
- |
-Interval for collecting index monitoring data.
-Minimum value: 1s
-Default value: 10s
- |
-
-css.monitoring.index.indices
- |
-String
- |
-Name of an index to be monitored. By default, all indexes are monitored. You can configure specific indexes or a type of indexes to monitor.
-Example:
-- ""css.monitoring.index.indices": ["index_name"]" indicates only index_name is monitored.
- "css.monitoring.index.indices": ["log_*"] indicates that only indexes starting with log_ are monitored.
- "css.monitoring.index.indices": ["index1", "index2"] indicates that index1 and index2 are monitored.
-Default value: * (indicating that all indexes are monitored)
- |
-
-css.monitoring.history.duration
- |
-Time
- |
-Retention period of monitoring data storage. The default period is a week.
-Minimum value: 1d
-Default value: 7d
- |
-
-
-
-
-
Indexes starting with monitoring-eye-css-* are regarded as monitoring indexes and will not be monitored.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0137.html b/docs/css/umn/css_01_0137.html
deleted file mode 100644
index abeed18b..00000000
--- a/docs/css/umn/css_01_0137.html
+++ /dev/null
@@ -1,142 +0,0 @@
-
-
-Checking the Index Read and Write Traffic
-You can call an API to query the index read and write traffic within a period of time.
-
Prerequisites
A cluster has been created and index monitoring has been enabled.
-
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- Choose Dev Tools in the navigation pane on the left and run the following commands to query the index read and write traffic:
- Check read and write traffic of all the indexes.
GET /_cat/monitoring
- - Check read and write traffic of a specific index.
GET /_cat/monitoring/{indexName}
-{indexName} indicates the name of the index whose read and write traffic you want to check.
- - Check the read and write traffic of indexes for different periods.
GET _cat/monitoring?begin=1650099461000
-GET _cat/monitoring?begin=2022-04-16T08:57:41
-GET _cat/monitoring?begin=2022-04-16T08:57:41&end=2022-04-17T08:57:41
-
-Table 1 Parameter descriptionParameter
- |
-Mandatory
- |
-Description
- |
-
-
-begin
- |
-No
- |
-Start time (UTC time) of the monitoring data you want to view.
-Time format: strict_date_optional_time|epoch_millis
-The default start time is five minutes before the current time.
- |
-
-end
- |
-No
- |
-End time (UTC time) of the monitoring data you want to view.
-Time format: strict_date_optional_time|epoch_millis
-The default end time is the current time.
- |
-
-
-
-
-
-
These parameters cannot be used for system indexes, whose names start with a dot (.).
-
-Information similar to the following is displayed:
-index begin end status pri rep init unassign docs.count docs.deleted store.size pri.store.size delete.rate indexing.rate search.rate
-test 2022-03-25T09:46:53.765Z 2022-03-25T09:51:43.767Z yellow 1 1 0 1 9 0 5.9kb 5.9kb 0/s 0/s 0/s
-
-Table 2 Parameters in the returned informationParameter
- |
-Description
- |
-
-
-index
- |
-Index name
- |
-
-begin
- |
-Start time of the monitoring data you queried.
- |
-
-end
- |
-End time of the monitoring data you queried.
- |
-
-status
- |
-Index status within the queried monitoring interval.
- |
-
-pri
- |
-The number of index shards within the queried monitoring interval.
- |
-
-rep
- |
-The number of index replicas within the queried monitoring interval.
- |
-
-init
- |
-The number of initialized indexes within the queried monitoring interval.
- |
-
-unassign
- |
-The number of unallocated indexes within the queried monitoring interval.
- |
-
-docs.count
- |
-The number of documents within the queried monitoring interval.
- |
-
-docs.deleted
- |
-The number of deleted documents within the queried monitoring interval.
- |
-
-store.size
- |
-Index storage size within the queried monitoring interval.
- |
-
-pri.store.size
- |
-Size of the primary index shard within the queried monitoring interval.
- |
-
-delete.rate
- |
-Number of indexes deleted per second within the queried monitoring interval.
- |
-
-indexing.rate
- |
-Number of indexes wrote per second within the queried monitoring interval.
- |
-
-search.rate
- |
-Number of indexes queried per second within the queried monitoring interval.
- |
-
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0138.html b/docs/css/umn/css_01_0138.html
deleted file mode 100644
index b212b6e4..00000000
--- a/docs/css/umn/css_01_0138.html
+++ /dev/null
@@ -1,114 +0,0 @@
-
-
-Checking Index Monitoring Information
-You can check preconfigured index monitoring visualizations on the Dashboard and Visualizations pages of Kibana. You can also customize tables and charts.
-
Prerequisites
A cluster has been created and index monitoring has been enabled.
-
-
Checking Dashboard Charts
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- In the navigation tree on the left, click Dashboard.
- Click [Monitoring] Index Monitoring Dashboard to view the preconfigured dashboard.
Figure 1 Preconfigured dashboard charts
-The preconfigured dashboard displays the number of read and write operations per second in the cluster and the top 10 indexes with the most read and write operations per second.
-
-Table 1 Preconfigured chartsChart Name
- |
-Description
- |
-
-
-[monitoring] markdown
- |
-Markdown chart, which briefly describes the dashboard content.
- |
-
-[monitoring] Indexing Rate (/s)
- |
-Number of documents written to a cluster per second.
- |
-
-[monitoring] Search Rate (/s)
- |
-Average number of queries per second in a cluster.
- |
-
-[monitoring] indexing rate of index for top10
- |
-Top 10 indexes with the most documents written per second.
- |
-
-[monitoring] search rate of index for top10
- |
-Top 10 indexes with the most queries per second.
- |
-
-[monitoring] total docs count
- |
-Total number of documents in a cluster.
- |
-
-[monitoring] total docs delete
- |
-Total number of deleted documents in a cluster.
- |
-
-[monitoring] total store size in bytes
- |
-Total storage occupied by documents in a cluster.
- |
-
-[monitoring] indices store_size for top10
- |
-Top 10 indexes that occupy the largest storage space.
- |
-
-[monitoring] indices docs_count for top10
- |
-Top 10 indexes with the largest number of documents.
- |
-
-[monitoring] indexing time in millis of index for top10(ms)
- |
-Top 10 indexes with the longest document write latency in a unit time (ms).
- |
-
-[monitoring] search query time in millis of index for top10(ms)
- |
-Top 10 indexes with the longest index query time in a unit time (ms).
- |
-
-[monitoring] segment count of index for top10
- |
-Top 10 indexes with the largest number of index segments.
- |
-
-[monitoring] segment memory in bytes of index for top10
- |
-Top 10 indexes with the largest heap memory usage of index segments.
- |
-
-
-
-
-
The index pattern of monitoring-eye-css-* cannot be deleted during index monitoring. Otherwise, the monitoring chart will be abnormal.
-
-
-
-
Customizing Visualizations Charts
The index monitoring module periodically stores the index/stats information in the monitoring-eys-css index. You can use the Kibana chart function to draw customized charts.
-
The following procedure describes how to check the trend of the document quantity in a chart as an example.
-
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- Choose Visualize.
- Click Create visualization and select TSVB.
- Set chart parameters and view the visualizations.
On the
Data tab page,
index_stats.primaries.docs.count indicates the number of documents in the primary shard.
Derivative indicates the difference between aggregation buckets. Set
Unit to
1s, visualizing network rates as "per second". Select
Positive only to prevent negative numbers after resetting. To sort statistics by index, set
Group by to
Terms and
By to
index_stats.index. Statistics will be grouped by index name.
Figure 2 TSVB page
-
-To view data in different time segments, set the aggregation interval, or the displayed data will be incomplete. On the Panel options tab page, set Interval to 1m or 30m to adjust the interval of timestamp.
-Figure 3 Setting the interval
-
-
-
Importing Index Monitoring Charts
You can import or export charts on Kibana. If the index monitoring charts are not displayed, you can import the charts to Kibana again to load the monitoring view.
-
The following describes how to import a chart to Kibana:
-
- Create the monitoring-kibana.ndjson file by referring to kibana-monitor.
- Log in to Kibana and choose Management > Stack Management > Saved objects.
Figure 4 Selecting saved objects
- - Click Import and upload the monitoring-kibana.ndjson file created in step 1.
Figure 5 Uploading a file
- - After the upload is complete, click Done. The index monitoring chart is successfully imported.
Figure 6 Successfully importing index monitoring charts
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0139.html b/docs/css/umn/css_01_0139.html
deleted file mode 100644
index f6907ffe..00000000
--- a/docs/css/umn/css_01_0139.html
+++ /dev/null
@@ -1,31 +0,0 @@
-
-
-Flow Control 1.0
-
-
-
diff --git a/docs/css/umn/css_01_0140.html b/docs/css/umn/css_01_0140.html
deleted file mode 100644
index fec5adbe..00000000
--- a/docs/css/umn/css_01_0140.html
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-Context
-Feature Description
CSS can control traffic at the node level. You can configure the blacklist and whitelist, the maximum concurrent HTTP connections, and the maximum HTTP connections for a node. You can also configure the maximum heap memory used by specific request paths, the maximum CPU usage, and block access in one click, and collect statistics on node access IP addresses and URIs. Each function has an independent control switch, which is disabled by default. To restore default values of parameters, set them to null.
-
If flow control is enabled, requests will be blocked at the entry, which relieves the cluster pressure in high-concurrency scenario and avoids unavailability issues.
-
- HTTP/HTTPS flow control:
- You can control client IP address access by setting IP addresses and subnets in HTTP/HTTPS blacklist or whitelist. If an IP address is in the blacklist, the client is disconnected and all its request are rejected. Whitelist rules take precedence over blacklist rules. If a client IP address exists in both the blacklist and whitelist, the client request will not be rejected.
- HTTP/HTTPS concurrent connection flow control limits the total number of HTTP connections to a node per second.
- HTTP/HTTPS new connection flow control limits the number of new connections to a node.
- - Memory flow control limits request paths based on the node heap memory. You can configure memory flow control whitelist, global memory usage threshold, and heap memory threshold for a single path. Global memory flow control threshold takes precedence over the memory threshold of a single path. Paths in the whitelist will not be blocked in memory flow control.
- You can configure the global path whitelist for flow control as required when you need to use custom plug-ins.
- Request sampling can record the number of access requests from client IP addresses and the request paths of sampled users. Based on the statistics, you can identify the access traffic of client IP addresses and analyze the access traffic of request paths.
- Flow control provides an independent API for viewing traffic statistics and records the number of times the API is triggered. You can evaluate the flow control threshold and analyze the cluster load based on the statistics.
- Access logs record the URLs and bodies of HTTP/HTTPS requests received by nodes within a period of time. You can analyze the current traffic pressure based on the access logs.
- You can configure the node CPU usage threshold to limit the accessed traffic on a single node.
- One-click access blocking can block all the access traffic of a node, excluding the traffic from Kibana and Elasticsearch monitor APIs.
-
-
Constraints
- Currently, only versions 7.6.2 and 7.10.2 support the flow control feature.
- Flow control may affect the performance of some nodes.
-
- If flow control is enabled, user requests that exceed the flow control threshold will be rejected.
- Memory flow control and CPU flow control are based on request paths. The length and number of paths cannot be too large, or the cluster performance will be affected.
-
-
-
-
diff --git a/docs/css/umn/css_01_0141.html b/docs/css/umn/css_01_0141.html
deleted file mode 100644
index cc95eebc..00000000
--- a/docs/css/umn/css_01_0141.html
+++ /dev/null
@@ -1,99 +0,0 @@
-
-
-HTTP/HTTPS Flow Control
-Context
You can run commands in Kibana to enable or disable HTTP/HTTPS flow control for your cluster. The command parameters are as follows.
-
-
Table 1 HTTP/HTTPS flow control parametersParameter
- |
-Type
- |
-Description
- |
-
-
-flowcontrol.http.enabled
- |
-Boolean
- |
-Whether to enable HTTP/HTTPS flow control. This function is disabled by default. Enabling it may affect node access performance.
-Value: true or false
-Default value: false
- |
-
-flowcontrol.http.allow
- |
-List<String>
- |
-IP address whitelist.
-It can contain multiple IP addresses and masks, or an IP address list. Use commas (,) to separate multiple values. Example: xx.xx.xx.xx/24,xx.xx.xx.xx/24, or xx.xx.xx.xx.xx,xx.xx.xx.
-The default value is null.
- |
-
-flowcontrol.http.deny
- |
-List<String>
- |
-IP address blacklist.
-Multiple IP addresses and masks or an IP address list can be configured. Use commas (,) to separate multiple IP addresses and masks.
-The default value is null.
- |
-
-flowcontrol.http.concurrent
- |
-Integer
- |
-Maximum concurrent HTTP/HTTPS connections.
-Default value: Number of available cores on a node x 400
- |
-
-flowcontrol.http.newconnect
- |
-Integer
- |
-Maximum new connections that can be created for HTTP/HTTPS requests per second.
-Default value: Number of available cores on a node x 200
- |
-
-flowcontrol.http.warmup_period
- |
-Integer
- |
-Time required for the HTTP/HTTPS connection setup speed to reach the maximum. If flowcontrol.http.newconnect is set to 100 and flowcontrol.http.warmup_period is set to 5000ms, it indicates the system can set up 100 connections per second in 5 seconds.
-Value range: 0–10000
-Unit: ms
-Default value: 0
- |
-
-
-
-
-
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable HTTP/HTTPS flow control.
- Enabling HTTP/HTTPS flow control for a node
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.http.enabled": true,
- "flowcontrol.http.allow": ["192.168.0.1/24", "192.168.2.1/24"],
- "flowcontrol.http.deny": "192.168.1.1/24",
- "flowcontrol.http.concurrent": 1000,
- "flowcontrol.http.newconnect": 1000,
- "flowcontrol.http.warmup_period": 0
- }
-}
-
If all parameters are set to null, they will be restored to default values.
-
- - Disabling HTTP/HTTPS flow control for a node
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.http.enabled": false
- }
-}
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0142.html b/docs/css/umn/css_01_0142.html
deleted file mode 100644
index 8d970bf5..00000000
--- a/docs/css/umn/css_01_0142.html
+++ /dev/null
@@ -1,122 +0,0 @@
-
-
-Memory Flow Control
-Context
Elasticsearch provides a circuit breaker, which will terminate requests if the memory usage exceeds its threshold. However, Elasticsearch does not check the heap memory usage when an API is called, and does not allow users to configure the threshold for a single request. In this case, memory usage can only be calculated during request processing, which may lead to frequent circuit breaking and cannot avoid heap memory waste. To solve this problem, CSS checks the heap memory usage when receiving REST requests, blocking excess API requests and protecting nodes. You can configure global memory flow control, or configure the request path and heap memory threshold for a specific request path. Before a request is processed, the system checks the configured heap memory threshold. If the threshold is exceeded, the request path will be blocked.
-
- Memory flow control may affect request processing performance.
- If the memory flow control is enabled, some Kibana search requests may fail.
- If memory flow control is enabled in Elasticsearch 5.5.1, _mget requests will be blocked and Kibana access will be abnormal. You can add _mget requests to the request whitelist to avoid this problem.
-
-
The following table describes memory flow control parameters.
-
-
Table 1 Memory flow control parametersParameter
- |
-Type
- |
-Description
- |
-
-
-flowcontrol.memory.enabled
- |
-Boolean
- |
-Whether to enable memory flow control. This function is disabled by default. Enabling memory flow control may slightly affect node performance.
-Value: true or false
-Default value: false
- |
-
-flowcontrol.memory.allow_path
- |
-List<String>
- |
-Request path whitelist for memory flow control.
-Whitelisted paths are blocked in memory flow control. Wildcard characters are supported. By default, query APIs controlled by the cluster are not blocked in memory flow control. This prevents the failure to query cluster information when the memory usage reaches the threshold.
-Example:
-- "flowcontrol.memory.allow_path": "/index/_search",
- "flowcontrol.memory.allow_path": "/index*/_search",
- "flowcontrol.memory.allow_path": ["/index/_search", "/index1/_bulk"],
-A maximum of 10 paths can be configured. A path can contain up to 32 characters.
-The default value is null.
- |
-
-flowcontrol.memory.heap_limit
- |
-String
- |
-Maximum global heap memory usage of a node. The value cannot be less than 10% of the heap memory.
-Value range: 10%–100%
-Default value: 90%
- |
-
-flowcontrol.memory.*.filter_path
- |
-String
- |
-Paths under memory flow control.
-The default value is **, indicating all paths. If flowcontrol.memory.heap_limit is configured and flowcontrol.memory.*.filter_path is not, it indicates that all the paths, except those in the whitelist, are under control. The whitelist takes precedence over the single-path rule. If a path is specified in both flowcontrol.memory.allow_path and flowcontrol.memory.*.filter_path, the requests from the path will be allowed.
-For example, if flowcontrol.memory.allow_path and flowcontrol.memory.*.filter_path are both set to abc/_search, then abc/_search will not be under flow control.
-Maximum length: 32 characters
- |
-
-flowcontrol.memory.*.heap_limit
- |
-String
- |
-Heap memory usage threshold of request paths. If the heap memory usage exceeds the threshold, flow control will be triggered.
-Value range: 0–100%
-Default value: 90%
- |
-
-
-
-
-
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable memory flow control.
- Enabling memory flow control
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.memory.enabled": true,
- "flowcontrol.memory.allow_path": "/index/_search",
- "flowcontrol.memory.heap_limit": "85%"
- }
-}
- - Enabling memory flow control for a request path
Configure the heap memory usage threshold for a request path. You can configure the priorities of such threshold rules.
-PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.memory.enabled": true,
- "flowcontrol.memory": {
- "flowcontrol_search": {
- "filter_path": "index1/_search",
- "heap_limit": "50%"
- },
- "flowcontrol_bulk": {
- "filter_path": "index*/_bulk",
- "heap_limit": "50%"
- }
- }
- }
-}
- - Deleting the memory flow control configuration of a request path
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.memory.enabled": true,
- "flowcontrol.memory": {
- "flowcontrol_search": {
- "filter_path": null,
- "heap_limit": null
- }
- }
- }
-}
- - Disabling cluster memory flow control
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.memory.enabled": false
- }
-}
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0143.html b/docs/css/umn/css_01_0143.html
deleted file mode 100644
index 62423ca1..00000000
--- a/docs/css/umn/css_01_0143.html
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-Global Path Whitelist for Flow Control
-Context
The following table describes the global path whitelist parameters for flow control.
-
-
Table 1 Global path whitelist parameters for flow controlParameter
- |
-Type
- |
-Description
- |
-
-
-flowcontrol.path.white_list
- |
-List<String>
- |
-Paths that are not under flow control. These paths are not affected by memory flow control, CPU flow control, or one-click blocking; but are under IP address-based flow control.
-A maximum of 10 paths can be configured. A path can contain up to 32 characters.
-This parameter is left blank by default.
- NOTE: You are advised not to configure this parameter, unless required by plug-ins.
-
- |
-
-
-
-
-
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools. Run the following command to configure the global path whitelist for flow control:
PUT _cluster/settings
-{
- "persistent": {
- "flowcontrol.path.white_list": "xxxx"
- }
-}
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0144.html b/docs/css/umn/css_01_0144.html
deleted file mode 100644
index 96add501..00000000
--- a/docs/css/umn/css_01_0144.html
+++ /dev/null
@@ -1,73 +0,0 @@
-
-
-Request Sampling
-Context
Request sampling can record the access IP addresses, the number of accessed nodes, request paths, request URLs, and request bodies, which can be used to obtain the IP addresses and paths of clients that have sent a large number of access requests.
-
The following table describes request sampling parameters.
-
-
Table 1 Request sampling parametersParameter
- |
-Type
- |
-Description
- |
-
-
-flowcontrol.statics.enabled
- |
-Boolean
- |
-Whether to enable request sampling. Request sampling may affect node performance.
-Value: true or false
-Default value: false
- |
-
-flowcontrol.statics.threshold
- |
-Integer
- |
-Number of recent access requests whose statistics are collected. The value 100 indicates that statistics will be collected on the 100 IP addresses and 100 URLs that are most frequently accessed.
-Minimum value: 10
-Maximum value: 1000
-Default value: 100
- |
-
-flowcontrol.statics.sample_frequency
- |
-Integer
- |
-Path sampling frequency. If this parameter is set to 100, samples are collected from every 100 requests.
-Minimum value: 50
-Default value: 100
- |
-
-
-
-
-
- The IP address statistics and URL sampling statistics are cached based on their access time. If the cache space reaches the threshold (flowcontrol.statics.threshold), the records of the earliest access will be deleted.
- In URL sampling, an access path is uniquely identified by its URL hash.
-
-
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable sampling.
- Enabling sampling
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.statics.enabled": true,
- "flowcontrol.statics.threshold": 100,
- "flowcontrol.statics.sample_frequency": 50
- }
-}
- - Disabling sampling
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.statics.enabled": false
- }
-}
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0145.html b/docs/css/umn/css_01_0145.html
deleted file mode 100644
index ce1a10c3..00000000
--- a/docs/css/umn/css_01_0145.html
+++ /dev/null
@@ -1,176 +0,0 @@
-
-
-Flow Control
-Flow control can be implemented via an independent API.
-
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run the commands to query traffic control information.
-
Example response:
-{
- "_nodes" : {
- "total" : 1,
- "successful" : 1,
- "failed" : 0
- },
- "cluster_name" : "css-flowcontroller",
- "nodes" : {
- "ElBRNCMbTj6L1C-Wke-Dnw" : {
- "name" : "css-flowcontroller-ess-esn-1-1",
- "host" : "10.0.0.133",
- "timestamp" : 1613979513747,
- "flow_control" : {
- "transport" : {
- "concurrent_req" : 0,
- "rejected_concurrent" : 0,
- "rejected_new" : 0,
- "rejected_deny" : 0
- },
- "http" : {
- "concurrent_req" : 0,
- "rejected_concurrent" : 0,
- "rejected_new" : 0,
- "rejected_deny" : 0
- },
- "memory" : {
- "memory_allow" : 41,
- "memory_rejected" : 0
- },
- "cpu": {
- "rejected_cpu" : 0
- }
- "ip_address" : [
- {
- "ip" : "/10.0.0.198",
- "count" : 453
- },
- {
- "ip" : "/198.19.49.1",
- "count" : 42
- }
- ],
- "url_sample" : [
- {
- "url" : "/*/_search?pretty=true",
- "method" : "GET",
- "remote_address" : "/10.0.0.198:16763",
- "count" : 1
- }
- ]
- }
- }
-}
-In the response, the information of each node is separated. The http field records the numbers of concurrent connections and new connections. The memory records memory flow control statistics. The ip_address field records the recent client IP addresses that are accessed most recently. The url_sample field records the recent URLs that are requested most frequently. The cpu field records CPU flow control statistics.
-
-Table 1 Response parametersParameter
- |
-Description
- |
-
-
-concurrent_req
- |
-Number of TCP connections of a node, which is recorded no matter whether flow control is enabled. This value is similar to the value of current_open of the GET /_nodes/stats/http API but is smaller, because whitelisted IP addresses and internal node IP addresses are not counted.
- |
-
-rejected_concurrent
- |
-Number of concurrent connections rejected during HTTP flow control. This value is not cleared when HTTP flow control is disabled.
- |
-
-rejected_new
- |
-Number of new connections rejected during HTTP flow control. This value is not cleared when HTTP flow control is disabled.
- |
-
-rejected_deny
- |
-Number of requests rejected based on the blacklist during HTTP flow control. This value is not cleared when HTTP flow control is disabled.
- |
-
-memory_allow
- |
-Number of allowed requests during memory flow control. This parameter takes effect when memory flow control is enabled, and its value is not cleared after memory flow control is disabled. The requests from the paths in the allow_path whitelist are not recorded. If allow_path is set to **, no requests are recorded.
- |
-
-memory_rejected
- |
-Number of rejected requests during memory flow control. This parameter takes effect when memory flow control is enabled, and its value is not cleared after memory flow control is disabled. The requests from the paths in the allow_path whitelist are not recorded. If allow_path is set to **, no requests are recorded.
- |
-
-rejected_cpu
- |
-Number of requests rejected when the CPU flow control threshold is exceeded. This parameter takes effect when CPU flow control is enabled, and its value is not cleared after CPU flow control is disabled.
- |
-
-ip_address
- |
-IP addresses and the number of requests. For details, see Table 2.
- |
-
-url_sample
- |
-Request path sampling. The number of URLs of a request are collected based on the configured time and sampling interval. For details, see Table 3.
- |
-
-
-
-
-
-Table 2 ip_addressParameter
- |
-Description
- |
-
-
-ip
- |
-Source IP address for accessing the node.
- |
-
-method
- |
-Number of access requests from an IP address.
- |
-
-
-
-
-
-Table 3 url_sampleParameter
- |
-Description
- |
-
-
-url
- |
-Request URL
- |
-
-method
- |
-Method corresponding to the request path
- |
-
-remote_address
- |
-Source IP address and port number of the request
- |
-
-count
- |
-How many times a path is sampled
- |
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0146.html b/docs/css/umn/css_01_0146.html
deleted file mode 100644
index 982ab8de..00000000
--- a/docs/css/umn/css_01_0146.html
+++ /dev/null
@@ -1,170 +0,0 @@
-
-
-Access Logs
-Context
You can check access logs in either of the following ways:
-
- Enable and check access logs via an independent API. Configure the API parameters to record the access log time and size. The access log content is returned through a REST API.
- Print access logs. Your access logs are printed as files in backend logs.
-
Enabling the access log function may affect cluster performance.
-
The following table describes access log parameters.
-
-
Table 1 Access log parametersParameter
- |
-Type
- |
-Description
- |
-
-
-duration_limit
- |
-String
- |
-Duration recorded in an access log.
-Value range: 10 to 120
-Unit: s
-Default value: 30
- |
-
-capacity_limit
- |
-String
- |
-Size of an access log. After access logging is enabled, the size of recorded requests is checked. If the size exceeds the value of this parameter, the access logging stops.
-Value range: 1 to 5
-Unit: MB
-Default value: 1
- |
-
-
-
-
-
Access logging stops if either duration_limit or capacity_limit reaches the threshold.
-
-
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable access logs.
- Enabling access logs for all nodes in a cluster
PUT /_access_log?duration_limit=30s&capacity_limit=1mb
- - Enabling access logs for a node in a cluster
PUT /_access_log/{nodeId}?duration_limit=30s&capacity_limit=1mb
-{nodeId} indicates the ID of the node where you want to enable access logs.
-
- - Use APIs to check access logs.
-
Example response:
{
- "_nodes" : {
- "total" : 1,
- "successful" : 1,
- "failed" : 0
- },
- "cluster_name" : "css-flowcontroller",
- "nodes" : {
- "8x-ZHu-wTemBQwpcGivFKg" : {
- "name" : "css-flowcontroller-ess-esn-1-1",
- "host" : "10.0.0.98",
- "count" : 2,
- "access" : [
- {
- "time" : "2021-02-23 02:09:50",
- "remote_address" : "/10.0.0.98:28191",
- "url" : "/_access/security/log?pretty",
- "method" : "GET",
- "content" : ""
- },
- {
- "time" : "2021-02-23 02:09:52",
- "remote_address" : "/10.0.0.98:28193",
- "url" : "/_access/security/log?pretty",
- "method" : "GET",
- "content" : ""
- }
- ]
- }
- }
-}
-
-
Table 2 Response parametersParameter
- |
-Description
- |
-
-
-name
- |
-Node name
- |
-
-host
- |
-Node IP address
- |
-
-count
- |
-Number of node access requests in a statistical period
- |
-
-access
- |
-Details about node access requests in a statistical period For details, see Table 3.
- |
-
-
-
-
-
-
-Table 3 accessParameter
- |
-Description
- |
-
-
-time
- |
-Request time
- |
-
-remote_address
- |
-Source IP address and port number of the request
- |
-
-url
- |
-Original URL of the request
- |
-
-method
- |
-Method corresponding to the request path
- |
-
-content
- |
-Request content
- |
-
-
-
-
- - Enable or disable the access log function.
All user access operation can be logged. By default, logs are recorded in the acces_log.log file in the background. The maximum size of a log file is 250 MB, and there can be a maximum of five log files. You can back up access log files to OBS.
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0147.html b/docs/css/umn/css_01_0147.html
deleted file mode 100644
index cb51bdf2..00000000
--- a/docs/css/umn/css_01_0147.html
+++ /dev/null
@@ -1,92 +0,0 @@
-
-
-CPU Flow Control
-Context
CPU flow control can be implemented based on the CPU usage of a node.
-
You can configure the CPU usage threshold of a node to prevent the node from breaking down due to heavy traffic. You can determine the CPU usage threshold based on the traffic threshold. If the CPU usage of a node exceeds the configured threshold, CPU flow control discards excess node requests to protect the cluster. Traffic within the node or passing through Elasticsearch monitoring APIs are not affected.
-
The following table describes CPU flow control parameters.
-
-
Table 1 CPU flow control parametersParameter
- |
-Type
- |
-Description
- |
-
-
-flowcontrol.cpu.enabled
- |
-Boolean
- |
-Whether to enable CPU flow control. If this function is enabled, the node access performance may be affected.
-Value: true or false
-Default value: false
- |
-
-flowcontrol.cpu.percent_limit
- |
-Integer
- |
-Maximum CPU usage of a node.
-Value range: 0–100
-Default value: 90
- |
-
-flowcontrol.cpu.allow_path
- |
-List
- |
-Path whitelist for CPU flow control. The paths specified in the allow_path whitelist are not under CPU flow control.
-The default value is null.
-A path can contain up to 32 characters. A maximum of 10 request paths can be configured. Wildcard characters are supported. For example, if this parameter is set to auto_*/_search, all the search requests of the indexes prefixed with auto_ are not under the flow control.
- |
-
-flowcontrol.cpu.*.filter_path
- |
-String
- |
-Paths under CPU flow control.
-Maximum length: 32 characters
-Example:
-"flowcontrol.cpu.search.filter_path": "/index/_search",
-"flowcontrol.cpu.search.limit": 60,
-The default value is **, indicating all paths. If limit is configured and filter_path is not, it indicates that all the paths, except those in the whitelist, are under control. The whitelist takes precedence over the single-path rule. If a path is specified in both allow_path and filter_path, the requests from the path will be allowed.
-For example, if both filter_path and allow_path both set to abc/_search, then abc/_search will not be under flow control.
- |
-
-flowcontrol.cpu.*.limit
- |
-Integer
- |
-CPU threshold of request paths. If the CPU usage exceeds the threshold, flow control will be triggered.
-Value range: 0–100
-Default value: 90
- |
-
-
-
-
-
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable memory flow control.
- Enabling CPU flow control
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.cpu.enabled": true,
- "flowcontrol.cpu.percent_limit": 80,
- "flowcontrol.cpu.allow_path": ["index/_search"]
- }
-}
- - Disabling CPU flow control
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.cpu.enabled": false
- }
-}
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0148.html b/docs/css/umn/css_01_0148.html
deleted file mode 100644
index aa921334..00000000
--- a/docs/css/umn/css_01_0148.html
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-One-click Traffic Blocking
-You can block all traffic in one click, except the traffic that passes through O&M APIs, to handle unexpected traffic burst and quickly recover your cluster.
-
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable one-click traffic blocking.
-
-
-
-
diff --git a/docs/css/umn/css_01_0149.html b/docs/css/umn/css_01_0149.html
deleted file mode 100644
index ae9956df..00000000
--- a/docs/css/umn/css_01_0149.html
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-Changing the Elasticsearch Cluster Form
-
-
-
diff --git a/docs/css/umn/css_01_0150.html b/docs/css/umn/css_01_0150.html
deleted file mode 100644
index 243ad27f..00000000
--- a/docs/css/umn/css_01_0150.html
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-Overview
-You can scale in or out a cluster and change cluster specifications. In this way, you can improve cluster efficiency and reduce O&M costs.
-
Scaling Out a Cluster- If a data node (ess) processes many data writing and querying requests and responds slowly, you can expand its storage capacity to improve its efficiency. If some nodes turn unavailable due to the excessive data volume or misoperations, you can add new nodes to ensure the cluster availability.
- Cold data nodes (ess-cold) are used to share the workload of data nodes. To prevent cold data loss, you can expand the storage capacity of the cold data node or add new ones.
-
-
Changing Specifications- If the allocation of new indexes or shards takes too long or the node coordination and scheduling are inefficient, you can change the master node (ess-master) specifications.
- If too many tasks need to be distributed or too many results have been aggregated, you can change the client node (ess-client) specifications.
- If the speed of data writing and query decreases suddenly, you can change the data node (ess) specifications.
- If cold data query becomes slow, you can change the cold node (ess-cold) specifications.
-
-
Scaling in a Cluster- If a cluster can process existing data without fully using its resources, you can scale in the cluster to reduce costs.
-
-
Removing Specified Nodes- If a cluster can process existing data without fully using its nodes, you can remove one or more specified nodes from the cluster to reduce costs.
-
-
Replacing a Specified Node- If a node in the cluster is faulty, you can create a new node with the same specifications to replace it.
-
-
Adding Master/Client Nodes- If the workloads on the data plane of a cluster increase, you can dynamically scale the cluster by adding master/client nodes.
-
-
Changing the Security Mode
-
After a cluster is created, its security mode can be changed using the following methods:
- Change a non-security cluster to a security cluster that uses HTTP or HTTPS protocol.
- Change a security cluster that uses HTTP or HTTPS protocol to a non-security cluster.
- Change the protocol of a security cluster.
-
-
Changing AZs
-
You can
Add AZ or
Migrate AZ.
- Add AZ: Add one or two AZs to a single-AZ cluster, or add an AZ to a dual-AZ cluster to improve cluster availability.
- Migrate AZ: Completely migrate data from the current AZ to another AZ that has sufficient resources.
-
-
-
-
diff --git a/docs/css/umn/css_01_0151.html b/docs/css/umn/css_01_0151.html
deleted file mode 100644
index 74a57c7c..00000000
--- a/docs/css/umn/css_01_0151.html
+++ /dev/null
@@ -1,85 +0,0 @@
-
-
-Scaling Out a Cluster
-If the workloads on the data plane of a cluster change, you can scale out the cluster by increasing the number or capacity of its nodes. Services are not interrupted during cluster scale-out.
-
Prerequisites
- The target cluster is available and has no tasks in progress.
- The target cluster has sufficient quotas available.
-
-
Constraints
- The Node Specifications cannot be modified during scale-out. You can modify Node Specifications by referring to Changing Specifications.
- If you change the number and storage capacity of a specified type of node, nodes in other types will not be changed.
- The quota of nodes in different types varies. For details, see Table 1.
-
Table 1 Number of nodes in different typesNode Type
- |
-Number
- |
-
-
-ess
- |
-ess: 1-32
- |
-
-ess, ess-master
- |
-ess: 1-200
-ess-master: an odd number ranging from 3 to 9
- |
-
-ess, ess-client
- |
-ess: 1-32
-ess-client: 1-32
- |
-
-ess, ess-cold
- |
-ess: 1-32
-ess-cold: 1-32
- |
-
-ess, ess-master, ess-client
- |
-ess: 1-200
-ess-master: an odd number ranging from 3 to 9
-ess-client: 1-32
- |
-
-ess, ess-master, ess-cold
- |
-ess: 1-200
-ess-master: an odd number ranging from 3 to 9
-ess-cold: 1-32
- |
-
-ess, ess-client, ess-cold
- |
-ess: 1-32
-ess-client: 1-32
-ess-cold: 1-32
- |
-
-ess, ess-master, ess-client, ess-cold
- |
-ess: 1-200
-ess-master: an odd number ranging from 3 to 9
-ess-client: 1-32
-ess-cold: 1-32
- |
-
-Details about the four node types: - ess: the default node type that is mandatory for cluster creation. The other three node types are optional.
- ess-master: master node
- ess-client: client node
- ess-cold: cold data node
-
- |
-
-
-
-
-
-
-
Procedure
- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, choose the Scale Cluster tab and click Scale out to set parameters.
- Action: Select Scale out.
- Resource: The changed amount of resources.
- Nodes: The number of nodes and node storage capacity of the default data node.
- Nodes: For details, see Table 1.
- The value range of Node Storage Type depends on the Node Specifications. The value must be a multiple of 20.
-
- - Click Next.
- Confirm the information and click Submit.
- Click Back to Cluster List to switch to the Clusters page. The Task Status is Scaling out. When Cluster Status changes to Available, the cluster has been successfully scaled out.
-
-
-
-
diff --git a/docs/css/umn/css_01_0152.html b/docs/css/umn/css_01_0152.html
deleted file mode 100644
index ccdccc87..00000000
--- a/docs/css/umn/css_01_0152.html
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-Changing Specifications
-If the workloads on the data plane of a cluster change, you can change its node specifications as needed.
-
Prerequisites
- The target cluster is available and has no tasks in progress.
- The target cluster has sufficient quotas available.
- When changing the node specifications, ensure that all service data has copies so the services will not be interrupted.
Run the GET _cat/indices?v command in Kibana. If the returned rep value is greater than 0, the data has copies. If the returned rep value is 0, the data has no copies. In this case, create snapshot for the cluster by referring to Manually Creating a Snapshot.
- - If the data volume is large, it may take long to modify the node specifications. You are advised to modify specifications during off-peak hours.
-
-
Constraints
- The number of nodes and the capacity of node storage cannot be changed. You can add nodes and increase the node storage capacity by referring to Scaling Out a Cluster. For details about how to reduce the number of nodes, see Scaling in a Cluster.
- After decreasing cluster specifications, the cluster performance will deteriorate and service capabilities will be affected. Exercise caution when performing this operation.
- If a cluster has multiple node types, you can change the specifications of only one type at a time. After the change, nodes in other types still maintain their original specifications.
- Kibana is unavailable during specification change.
- During the specification modification, the nodes are stopped and restarted in sequence. It is a rolling process.
-
-
Procedure
- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, choose the Scale Cluster tab and click Change Specifications to set parameters.
- Action: select Change specifications.
- Resources: The changed amount of resources.
- Nodes: Specifications of the default data nodes. Select the required specifications from the Node Specifications drop-down list and select the node that you want to change the specifications.
- If a cluster has master nodes, client nodes, or cold data nodes, you can change their specifications.
- - Click Next.
- Confirm the information and click Submit.
- In the displayed Verify Index Copy dialog box, select Verify index copies if you need. Click OK.
- If you selected Verify index copies and the cluster has no master node, indexes must have at least one copy and the cluster must have at least three nodes.
- If you selected Verify index copies and the cluster has no master node, indexes must have at least one copy.
- - Click Back to Cluster List to switch to the Clusters page. The Cluster Status is Configuration modified. When Cluster Status changes to Available, the cluster specifications have been successfully modified.
-
-
-
-
diff --git a/docs/css/umn/css_01_0153.html b/docs/css/umn/css_01_0153.html
deleted file mode 100644
index 4e59d3a6..00000000
--- a/docs/css/umn/css_01_0153.html
+++ /dev/null
@@ -1,87 +0,0 @@
-
-
-Scaling in a Cluster
-If a cluster can process existing data without fully using its resources, you can scale in the cluster to reduce costs. Services are not interrupted during cluster scale-in.
-
Prerequisites
The target cluster is available and has no tasks in progress.
-
-
Constraints
- Only the number of nodes can be modified during cluster scale-in. The node specifications and node storage capacity cannot be modified. For details about how to change node specifications and node storage capacity, see Changing Specifications and Scaling Out a Cluster, respectively.
- If you change the number and storage capacity of a specified type of node, nodes in other types will not be changed.
- Ensure that the disk usage after scale-in is less than 80% and each AZ of each node type has at least one node.
- When scaling in a cluster, the data in the node to be deleted is migrated to other nodes. The timeout threshold for data migration is five hours. If data migration is not complete within 5 hours, the cluster scale-in fails. You are advised to perform scale-in for multiple times when the cluster has huge amounts of data.
- For a cluster without master nodes, the number of remaining data nodes (including cold data nodes and other types of nodes) after scale-in must be greater than half of the original node number, and greater than the maximum number of index replicas.
- For a cluster with master nodes, the number of removed master nodes in a scale-in must be fewer than half of the original master node number. After scale-in, there has to be an odd number of master nodes, and there has to be at least three of them.
- A cluster with two nodes cannot be scaled in. You can create a cluster using a single node and then .
- The quota of nodes in different types varies. For details, see Table 1.
-
Table 1 Number of nodes in different typesNode Type
- |
-Number
- |
-
-
-ess
- |
-ess: 1-32
- |
-
-ess, ess-master
- |
-ess: 1-200
-ess-master: an odd number ranging from 3 to 9
- |
-
-ess, ess-client
- |
-ess: 1-32
-ess-client: 1-32
- |
-
-ess, ess-cold
- |
-ess: 1-32
-ess-cold: 1-32
- |
-
-ess, ess-master, ess-client
- |
-ess: 1-200
-ess-master: an odd number ranging from 3 to 9
-ess-client: 1-32
- |
-
-ess, ess-master, ess-cold
- |
-ess: 1-200
-ess-master: an odd number ranging from 3 to 9
-ess-cold: 1-32
- |
-
-ess, ess-client, ess-cold
- |
-ess: 1-32
-ess-client: 1-32
-ess-cold: 1-32
- |
-
-ess, ess-master, ess-client, ess-cold
- |
-ess: 1-200
-ess-master: an odd number ranging from 3 to 9
-ess-client: 1-32
-ess-cold: 1-32
- |
-
-Details about the four node types: - ess: the default node type that is mandatory for cluster creation. The other three node types are optional.
- ess-master: master node
- ess-client: client node
- ess-cold: cold data node
-
- |
-
-
-
-
-
-
-
Procedure
- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, choose the Scale Cluster tab and click Scale in to set parameters.
-
- Click Next.
- Confirm the information and click Submit.
- Click Back to Cluster List to switch to the Clusters page. The Task Status is Scaling in. When Cluster Status changes to Available, the cluster has been successfully scaled in.
-
-
-
-
diff --git a/docs/css/umn/css_01_0154.html b/docs/css/umn/css_01_0154.html
deleted file mode 100644
index 64782c11..00000000
--- a/docs/css/umn/css_01_0154.html
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-Removing Specified Nodes
-If a cluster can process existing data without fully using its nodes, you can remove one or more specified nodes from the cluster to reduce costs. Services will not be interrupted during the removal of specified nodes.
-
Prerequisites
The target cluster is available and has no tasks in progress.
-
-
Constraints
- Ensure that the disk usage after scale-in is less than 80% and each AZ of each node type has at least one node.
- In a cross-AZ cluster, the difference between the numbers of the same type nodes in different AZs cannot exceed 1.
- For a cluster without master nodes, the number of removed data nodes and cold data nodes in a scale-in must be fewer than half of the original number of data nodes and cold data nodes, and the number of remaining data nodes and cold data nodes after a scale-in must be greater than the maximum number of index replicas.
- For a cluster with master nodes, the number of removed master nodes in a scale-in must be fewer than half of the original master node number. After scale-in, there has to be an odd number of master nodes, and there has to be at least three of them.
-
-
Procedure
- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, click the Scale In tab.
- On the Scale In tab page, set the following parameters:
-
- Whether to perform data migration: If this option is selected, data migration is performed. If the target node contains disabled indexes or indexes that have no replicas, this option must be selected.
- In the data node table, select the node to be scaled in.
- - Click Next.
- Confirm the information and click Submit.
- Click Back to Cluster List to switch to the Clusters page. The Task Status is Scaling in. When Cluster Status changes to Available, the cluster has been successfully scaled in.
-
-
-
-
diff --git a/docs/css/umn/css_01_0155.html b/docs/css/umn/css_01_0155.html
deleted file mode 100644
index 5c24c334..00000000
--- a/docs/css/umn/css_01_0155.html
+++ /dev/null
@@ -1,79 +0,0 @@
-
-
-Configuring Cluster Monitoring
-You can use Cloud Eye to monitor the created clusters. After configuring the cluster monitoring, you can log in to the Cloud Eye management console to view cluster metrics.
-
The procedure for configuring cluster monitoring:
-
- Creating Alarm Rules: Customize alarm rules for the monitoring metrics. Once a metric exceeds the threshold, the system will notify you by sending emails or HTTP/HTTPS requests.
- Configuring Monitoring Metrics: Configure monitoring metrics for a cluster or a node in the cluster.
- Viewing Monitoring Metrics: View the statistics of the monitoring metrics in specific periods.
-
Prerequisites
- The cluster is in the Available or Processing status.
- The cluster has been running properly for more than 10 minutes.
-
-
Recommended Monitoring Metrics
- Cluster CPU and JVM usage. You are advised to configure the following monitoring metrics: average JVM heap usage, maximum JVM heap usage, average CPU usage, and maximum CPU usage.
- Cluster write and query latency and throughput. You are advised to configure the following monitoring metrics: average index latency, average index rate, average search latency, and average QPS.
- Cluster write and query queue and rejected tasks. You are advised to configure the following monitoring metrics: tasks in write queue, tasks in search queue, rejected tasks in write queue, and rejected tasks in search queue.
-
-
Creating Alarm Rules
- Log in to the Cloud Eye console.
- In the navigation pane on the left, choose Alarm Management > Alarm Rules.
- In the Resource Type column, select Cloud Search Service as criteria to search for alarm rules that meet the requirements.
If no alarm rules are available, create one by referring to the "Creating an Alarm Rule" section. For details about how to set Resource Type and Dimension, see Table 1.
-
-Table 1 Alarm rule configuration parameterParameter
- |
-Description
- |
-Remark
- |
-
-
-Resource Type
- |
-Type of the resource that the alarm rule is created for
- |
-Select Cloud Search Service.
- |
-
-Dimension
- |
-Metric dimension of the selected resource type
- |
-CSS supports two dimensions. Select a dimension as required.
-- CSS Clusters: Alarm rules are specified by cluster.
- CSS Clusters - CSS Instances: Alarm rules are specified by node in a cluster.
- |
-
-
-
-
-
-
-
Configuring Monitoring Metrics
- Create a monitoring panel by referring to the "Creating a Dashboard" section. If an available monitoring panel has been created, skip this step.
- Add CSS monitoring graphs by referring to the "Adding a Graph" section.
For details about how to set Resource Type and Dimension, see Table 2.
-
-Table 2 Graph configuration parameterParameter
- |
-Description
- |
-Remark
- |
-
-
-Resource Type
- |
-Type of the resource to be monitored
- |
-Select Cloud Search Service.
- |
-
-Dimension
- |
-Metric dimension
- |
-CSS supports two dimensions. Select a dimension as required.
-- CSS Clusters: Monitoring is executed by cluster.
- CSS Clusters - CSS Instances: Monitoring is executed by node in a cluster.
- |
-
-
-
-
-
-
-
Viewing Monitoring Metrics
- Log in to the CSS management console.
- Choose Clusters. Locate the target cluster and choose More > View Metric in the Operation column.
- Select a time range.
- View the monitoring metrics.
-
-
-
-
diff --git a/docs/css/umn/css_01_0156.html b/docs/css/umn/css_01_0156.html
deleted file mode 100644
index 198ede90..00000000
--- a/docs/css/umn/css_01_0156.html
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-Replacing a Specified Node
-If a node in the cluster is faulty, you can create a new node with the same specifications to replace it.
-
Prerequisites
The target cluster is available and has no tasks in progress.
-
-
Constraints
- Only one node can be replaced at a time.
- The ID, IP address, specifications, and AZ of the new node will be the same as those of the original one.
- The configurations you modified manually will not be retained after node replacement. For example, if you have manually added a return route to the original node, you need to add it to the new node again after the node replacement is complete.
- If the node you want to replace is a data node (ess) or cold data node (ess-cold), pay attention to the following precautions:
- Before a data node or cold data node is replaced, its data needs to be migrated to other nodes. To properly store the data, ensure the maximum sum of replicas and primary shards of an index is smaller than the total number of data nodes (ess and ess-cold nodes) in the cluster. The node replacement duration depends heavily on the migration speed.
- Clusters whose version is earlier than 7.6.2 cannot have closed indexes. Otherwise, data nodes or cold data nodes cannot be replaced.
- The AZ of the node to be replaced must have two or more data nodes (including ess and ess-cold).
- If the cluster of the node to be replaced does not have a master node (ess-master), the number of available data nodes (including ess and ess-cold) in the cluster must be greater than or equal to 3.
- The preceding precautions do not apply if you are replacing a master node (ess-master) or client node (ess-client).
- The precautions 1 to 4 do not apply if you are replacing a faulty node, regardless of its type. Faulty nodes are not included in _cat/nodes.
-
-
-
Procedure
- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, click the Replace Node tab.
- On the Replace Node tab page, set the following parameters:
-
- Whether to perform data migration: If this option is selected, data migration is performed. If the target node have disabled indexes or indexes that have no replicas, this option must be selected.
- Select the node to be replaced in the data node table.
- - Click Submit.
- Click Back to Cluster List to switch to the Clusters page. The Task Status is Upgrading. When Cluster Status changes to Available, the node has been successfully replaced.
-
-
-
-
diff --git a/docs/css/umn/css_01_0157.html b/docs/css/umn/css_01_0157.html
deleted file mode 100644
index 8949f861..00000000
--- a/docs/css/umn/css_01_0157.html
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-Adding Master/Client Nodes
-If workloads on the data plane of a cluster increase, you can add master or client nodes as needed. Services are not interrupted while they are added.
-
Prerequisites
The target cluster is available and has no tasks in progress.
-
-
Constraints
- If a cluster already has master and client nodes, the Add Master/Client Node tab is not displayed on the Modify Configuration page. In this case, you need to add the master or client nodes by referring to Scaling Out a Cluster.
- When you add master or client nodes, the number of nodes that can be configured varies depending on the node type. For details, see Table 1.
-
Table 1 Number of nodes in different typesNode Flavor
- |
-Number
- |
-
-
-Master node
- |
-An odd number ranging from 3 to 9
- |
-
-Client node
- |
-1 to 32
- |
-
-
-
-
-
-
-
Procedure
- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, choose the Add Master/Client Node tab.
- Select the target node type and set the node specifications, quantity, and storage.
- Master and client nodes cannot be added at the same time.
- If a cluster already has a master or client node, you can only add nodes of the other type.
-Figure 1 Adding a master or client node
- - Click Next.
- Confirm the information and click Submit.
Return to the cluster list page. The Task Status of the cluster is Scaling out.
-- If you added a master node and Cluster Status changed to Available, the master node has been successfully added.
If the cluster version is earlier than 7.x, when the Cluster Status changes to Available, you need to restart all data nodes and cold data nodes in the cluster to make the new node take effect. Before the restart, the cluster may be unavailable. For details, see Restarting a Cluster.
-
- - If you added a client node and Cluster Status changed to Available, the client node has been added. You can restart data nodes and cold data nodes to shut down Cerebro and Kibana processes on the nodes.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0158.html b/docs/css/umn/css_01_0158.html
deleted file mode 100644
index b86f3431..00000000
--- a/docs/css/umn/css_01_0158.html
+++ /dev/null
@@ -1,82 +0,0 @@
-
-
-Changing the Security Mode
-After a cluster is created, its security mode can be changed using the following methods:
-
-
Context
You can create clusters in multiple security modes. For details about the differences between security modes, see
Table 1.
-
Table 1 Cluster security modesSecurity Mode
- |
-Scenario
- |
-Advantage
- |
-Disadvantage
- |
-
-
-Non-Security Mode
- |
-Intranet services and test scenarios
- |
-Simple. Easy to access.
- |
-Poor security. Anyone can access such clusters.
- |
-
-Security Mode + HTTP Protocol
- |
-User permissions can be isolated, which is applicable to scenarios sensitive to cluster performance.
- |
-Security authentication is required for accessing such clusters, which improves cluster security. Accessing a cluster through HTTP protocol can retain the high performance of the cluster.
- |
-Cannot be accessed from the public network.
- |
-
-Security Mode + HTTPS Protocol
- |
-Scenarios that require high security and public network access.
- |
-Security authentication is required for accessing such clusters, which improves cluster security. HTTPS protocol allows public network to access such clusters.
- |
-The performance of clusters using HTTPS is 20% lower than that of using HTTP.
- |
-
-
-
-
-
-
-
Prerequisites
- You are advised to back up data before changing the cluster security mode.
- The target cluster is available and has no tasks in progress.
-
-
Constraints
- Only clusters (whose version is 6.5.4 or later) created after November 2022 support security mode switching.
- A cluster automatically restarts when its security mode is being changed. Services are interrupted during the restart. The authentication mode for invoking the cluster will change after the restart, and client configurations need to be adjusted accordingly.
- If a cluster has already opened the Kibana session box, a session error message will be displayed after you change the cluster security mode. In this case, clear the cache and open Kibana again.
-
-
Switching from the Non-Security Mode to Security Mode
You can change a non-security cluster to a security cluster that uses HTTP or HTTPS. After a cluster's security mode is enabled, security authentication is required for accessing the cluster.
-
- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- Choose the Configure Security Mode tab.
- Enable the security mode. Enter and confirm the administrator password of the cluster.
Figure 1 Enabling the security mode
- - Enable or disable HTTPS Access.
- If you enable HTTPS Access: The HTTPS protocol is used to encrypt cluster communication and you can configure public networks to access the cluster.
- If you disable HTTPS Access: The HTTP protocol is used and you cannot configure public networks to access the cluster.
- - Click Submit. Confirm the information and the cluster list page is displayed.
The Task Status of the cluster is The security mode is changing. When the cluster status changes to Available, the security mode has been successfully changed.
-
-
-
Switching from the Security to Non-Security Mode
You can change a security cluster that uses HTTP or HTTPS to a non-security cluster. After a cluster's security mode is disabled, security authentication is no longer required for accessing the cluster.
-
- Clusters in non-security mode can be accessed without security authentication, and HTTP protocol is used to transmit data. Ensure the security of the cluster access environment and do not expose the access interface to the public network.
- During the switchover from the security mode to the non-security mode, the indexes of the original security cluster will be deleted. Back up data before disabling the security mode.
- If a security cluster has been bound to a public IP address, unbind it before changing the security mode.
- If a security cluster has enabled Kibana public network access, disable it before changing the security mode.
-
-
- Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters. On the displayed Clusters page, locate the target cluster and choose More > Modify Configuration in the Operation column.
- Choose the Configure Security Mode tab.
- Disable the security mode.
Figure 2 Disabling the security mode
- - Click Submit. Confirm the information and the cluster list page is displayed.
The Task Status of the cluster is The security mode is changing. When the cluster status changes to Available, the security mode has been successfully changed.
-
-
-
Switching the Protocol of Security Clusters
You can change the protocol of a security cluster.
-
If a security cluster has been bound to a public IP address, you need to unbind it before changing HTTPS protocol to HTTP.
-
-
- Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters. On the displayed Clusters page, locate the target cluster and choose More > Modify Configuration in the Operation column.
- Choose the Configure Security Mode tab.
- Enable or disable HTTPS Access.
Figure 3 Configuring the protocol
-- If you enable HTTPS Access:
HTTPS protocol is used to encrypt cluster communication and you can configure public network access.
- - If you disable HTTPS Access: An alarm message is displayed. Click OK to disable the function.
Cluster communication is no longer encrypted and the public network access function cannot be enabled.
-
- - Click Submit. Confirm the information and the cluster list page is displayed.
The Task Status of the cluster is The security mode is changing. When the cluster status changes to Available, the security mode has been successfully changed.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0161.html b/docs/css/umn/css_01_0161.html
deleted file mode 100644
index 6c5df62b..00000000
--- a/docs/css/umn/css_01_0161.html
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-Read/Write Splitting
-
-
-
diff --git a/docs/css/umn/css_01_0162.html b/docs/css/umn/css_01_0162.html
deleted file mode 100644
index db65359d..00000000
--- a/docs/css/umn/css_01_0162.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-Features
-CSS supports read/write splitting. Data written to the primary cluster (Leader) can be automatically synchronized to the secondary cluster (Follower). In this way, data is written to the primary cluster and queried in the secondary cluster. The read and write can be separated to improve the query performance (as shown in the left part of Figure 1). When the primary cluster is unavailable, the secondary cluster can provide data write and query services (as shown in the right part of Figure 1).
-
Figure 1 Two application scenarios of read/write splitting
-
Currently, only clusters of versions 7.6.2 and 7.10.2 support read/write isolation.
-
-
-
diff --git a/docs/css/umn/css_01_0164.html b/docs/css/umn/css_01_0164.html
deleted file mode 100644
index 3c4ae16a..00000000
--- a/docs/css/umn/css_01_0164.html
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-Instructions
-
-
-
diff --git a/docs/css/umn/css_01_0165.html b/docs/css/umn/css_01_0165.html
deleted file mode 100644
index 154eb206..00000000
--- a/docs/css/umn/css_01_0165.html
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-Basic Settings
-- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- Click Dev Tools in the navigation tree on the left and perform the following operations:
Configure the primary cluster information.
PUT /_cluster/settings
-{
- "persistent" : {
- "cluster" : {
- "remote.rest" : {
- "leader1" : {
- "seeds" : [
- "http://10.0.0.1:9200",
- "http://10.0.0.2:9200",
- "http://10.0.0.3:9200"
- ] ,
- "username": "elastic",
- "password": "*****"
- }
- }
- }
- }
-}
-
- Secondary clusters must be able to access the REST API (default port: 9200) of the primary cluster.
- The primary cluster name is leader1 and can be changed.
- The value of seeds is the REST address of the primary cluster. Multiple values are supported. When HTTPS access is enabled, the URI schema must be changed to HTTPS.
- username and password are required only when the security mode is enabled for the primary cluster.
- After the configuration is complete, you can use the GET _remote/rest/info API to obtain the connection status with the primary cluster.
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0166.html b/docs/css/umn/css_01_0166.html
deleted file mode 100644
index d3f14957..00000000
--- a/docs/css/umn/css_01_0166.html
+++ /dev/null
@@ -1,64 +0,0 @@
-
-
-Synchronizing Specified Indexes
-Synchronize a single index.
-
The request URL and request body parameters are as follows:
-
PUT start_remote_sync
-
-
Table 1 Request body parametersParameter
- |
-Description
- |
-
-
-remote_cluster
- |
-Name of the primary cluster. The default name is leader1. You can change the name by configuring the primary cluster information.
- |
-
-remote_index
- |
-Name of the index to be synchronized in the primary cluster
- |
-
-local_index
- |
-Name of the index being synchronized to the secondary cluster
- |
-
-settings
- |
-Index settings of the index being synchronized
- |
-
-
-
-
-
After the synchronization function is enabled, indexes in the secondary cluster become read-only and are periodically synchronized with indexes in the primary cluster.
-
The following are two examples:
-
- Synchronize a single index from the primary cluster to the secondary cluster.
PUT start_remote_sync
-{
- "remote_cluster": "leader1",
- "remote_index": "data1_leader",
- "local_index": "data1_follower"
-}
- - Synchronize a single index from the primary cluster to the secondary cluster and modify the index configurations.
PUT start_remote_sync
-{
- "remote_cluster": "leader1",
- "remote_index": "data1_leader",
- "local_index": "data1_follower",
- "settings": {
- "number_of_replicas": 4
- }
-}
-
The following index configurations cannot be modified:
-
- number_of_shards
- version.created
- uuid
- creation_date
- soft_deletes.enabled
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0167.html b/docs/css/umn/css_01_0167.html
deleted file mode 100644
index a2c59bc8..00000000
--- a/docs/css/umn/css_01_0167.html
+++ /dev/null
@@ -1,70 +0,0 @@
-
-
-Matching Index Synchronization
-The request URL and request body parameters are as follows:
-
PUT auto_sync/pattern/{pattern_name}
-
-
Table 1 Request body parametersParameter
- |
-Description
- |
-
-
-remote_cluster
- |
-Name of the primary cluster. The default name is leader1. You can change the name by configuring the primary cluster information.
- |
-
-remote_index_patterns
- |
-Mode of the index to be synchronized in the primary cluster. The wildcard (*) is supported.
- |
-
-local_index_pattern
- |
-Mode of the index being synchronized in the secondary cluster. The template can be replaced. For example, if this parameter is set to {{remote_index}}-sync, the index log1 change to log1-sync after synchronization.
- |
-
-apply_exist_index
- |
-Whether to synchronize existing indexes in the primary cluster. The default value is true.
- |
-
-settings
- |
-Index settings of the index being synchronized
- |
-
-
-
-
-
The following are two examples:
-
1. Synchronize a single index from the primary cluster to the secondary cluster.
-
PUT auto_sync/pattern/pattern1
-{
- "remote_cluster": "leader1",
- "remote_index_patterns": "log*",
- "local_index_pattern": "{{remote_index}}-sync",
- "apply_exist_index": true
-}
-
2. Synchronize a single index from the primary cluster to the secondary cluster and modify the index configurations.
-
PUT auto_sync/pattern/pattern1
-{
- "remote_cluster": "leader1",
- "remote_index_patterns": "log*",
- "local_index_pattern": "{{remote_index}}-sync",
- "apply_exist_index": true,
- "settings": {
- "number_of_replicas": 4
- }
-}
-
The following index configurations cannot be modified:
-
- number_of_shards
- version.created
- uuid
- creation_date
- soft_deletes.enabled
-
-
-
-
diff --git a/docs/css/umn/css_01_0168.html b/docs/css/umn/css_01_0168.html
deleted file mode 100644
index fc2ed271..00000000
--- a/docs/css/umn/css_01_0168.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-Stopping Index Synchronization
-You can specify multiple indexes or use wildcard to match the target indexes and terminate their synchronization tasks. Subsequent modifications to the indexes in the primary cluster will not be synchronized to the secondary cluster. The read-only state of the indexes in the secondary cluster is cancelled, and new data can be written to the secondary cluster.
-
An example request is as follows:
-
PUT log*/stop_remote_sync
-
-
-
diff --git a/docs/css/umn/css_01_0169.html b/docs/css/umn/css_01_0169.html
deleted file mode 100644
index 5d2f396c..00000000
--- a/docs/css/umn/css_01_0169.html
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
-Other Management APIs
-- Querying the created patterns.
This API is used to query the pattern list and query a specified pattern by name.
-An example request is as follows:
-GET auto_sync/pattern
-GET auto_sync/pattern/{pattern_name}
-The following is an example of the response:
-{
- "patterns" : [
- {
- "name" : "pattern1",
- "pattern" : {
- "remote_cluster" : "leader",
- "remote_index_patterns" : [
- "log*"
- ],
- "local_index_pattern" : "{{remote_index}}-sync",
- "settings" : { }
- }
- }
- ]
-}
- - Deleting a created schema.
This API is used to delete a specified pattern.
-An example request is as follows:
-DELETE auto_sync/pattern/{pattern_name}
- - Obtaining the automatic synchronization status.
This API is used to obtain the synchronization status of matched indexes.
-An example request is as follows:
-GET auto_sync/stats
-The following is an example of the response:
-{
- "success_count" : 3,
- "failed_count" : 0,
- "failed_remote_cluster_state_requests_count" : 0,
- "last_fail_exception" : { },
- "last_fail_remote_cluster_requests_exception" : { }
-}
- - Obtaining the synchronization status of the index that is being synchronized.
An example request is as follows:
-GET {index_name}/sync_stats
-The following is an example of the response:
-{
- "indices" : {
- "data1_follower" : {
- "shards" : {
- "0" : [
- {
- "primary" : false,
- "total_synced_times" : 27,
- "total_empty_times" : 25,
- "total_synced_files" : 4,
- "total_synced_bytes" : 3580,
- "total_paused_nanos" : 0,
- "total_paused_times" : 0,
- "current" : {
- "files_count" : 0,
- "finished_files_count" : 0,
- "bytes" : 0,
- "finished_bytes" : 0
- }
- },
- {
- "primary" : true,
- "total_synced_times" : 28,
- "total_empty_times" : 26,
- "total_synced_files" : 20,
- "total_synced_bytes" : 17547,
- "total_paused_nanos" : 0,
- "total_paused_times" : 0,
- "current" : {
- "files_count" : 0,
- "finished_files_count" : 0,
- "bytes" : 0,
- "finished_bytes" : 0
- }
- }
- ]
- }
- }
- }
-}
- - Changing the synchronization period.
The synchronization period is 30 seconds by default and can be modified.
-An example request is as follows (change the synchronization period to 2 seconds):
-PUT {index_name}/_settings
-{
- "index.remote_sync.sync_interval": "2s"
-}
-
-
-
-
diff --git a/docs/css/umn/css_01_0170.html b/docs/css/umn/css_01_0170.html
deleted file mode 100644
index 0ce38150..00000000
--- a/docs/css/umn/css_01_0170.html
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-Best Practices
-This section describes how to switch from the primary cluster to the secondary cluster when the primary cluster is faulty.
-
1. If the synchronization of specified indexes has been configured between the primary and secondary clusters.
-
(1) Call the API for stopping index synchronization in the secondary cluster. In this case, the read and write traffic can be switched to the secondary cluster.
-
(2) After the primary cluster recovers, call the index synchronization API to synchronize data from the secondary cluster to the primary cluster.
-
2. If the matching pattern for index synchronization has been established between the primary and secondary clusters.
-
(1) Call the API for deleting the created matching pattern for index synchronization in the secondary cluster.
-
(2) Call the API for stopping index synchronization on the secondary cluster (using * for matching). In this case, the read and write traffic can be switched to the secondary cluster.
-
(3) After the primary cluster recovers, call the index synchronization API to synchronize data from the secondary cluster to the primary cluster.
-
-
-
diff --git a/docs/css/umn/css_01_0172.html b/docs/css/umn/css_01_0172.html
deleted file mode 100644
index 72939175..00000000
--- a/docs/css/umn/css_01_0172.html
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-Enhanced Aggregation
-
-
-
diff --git a/docs/css/umn/css_01_0173.html b/docs/css/umn/css_01_0173.html
deleted file mode 100644
index fb0a89c8..00000000
--- a/docs/css/umn/css_01_0173.html
+++ /dev/null
@@ -1,44 +0,0 @@
-
-
-Features
-The enhanced aggregation is an optimization feature for service awareness. With this feature, you can optimize the aggregation analysis capability of observable services.
-
Currently, the enhanced aggregation is supported by only clusters of version 7.10.2.
-
Working Principles
In large-scale dataset aggregation and analysis scenarios, data grouping and aggregation takes a lot of time. Improving the grouping aggregation capability depends on the following key features:
-
- Sorting key: Data is stored in sequence based on the sorting key.
- Clustering key: It is contained in the sorting key. Data is clustered based on the clustering key.
-
In the case of data clustering, enhanced aggregation uses the vectorization technology to process data in batches, improving aggregation performance.
-
-
Table 1 Feature parametersParameter
- |
-Description
- |
-
-
-index.search.turbo.enabled
- |
-Indicates whether to enable the feature. The default value is true.
- |
-
-index.sort.field
- |
-Sorting key
- |
-
-index.cluter.field
- |
-Clustering key
- |
-
-
-
-
-
-
Features
Based on different service requirements, enhanced aggregation can be used in the following three scenarios:
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0174.html b/docs/css/umn/css_01_0174.html
deleted file mode 100644
index d712e0c7..00000000
--- a/docs/css/umn/css_01_0174.html
+++ /dev/null
@@ -1,62 +0,0 @@
-
-
-Grouping and Aggregation of Low-cardinality Fields
-Low-cardinality fields have high data clustering performance when being sorted, which facilitates vectorized optimization. Assume that the following query statement exists:
-
POST testindex/_search
-{
- "size": 0,
- "aggs": {
- "groupby_region": {
- "terms": {
- "field": "region"
- },
- "aggs": {
- "groupby_host": {
- "terms": {
- "field": "host"
- },
- "aggs": {
- "avg_cpu_usage": {
- "avg": {
- "field": "cpu_usage"
- }
- }
- }
- }
- }
- }
- }
-}
-
Assume that the region and host are low-cardinality fields. To use the enhanced aggregation, set the parameters as follows:
-
The clustering key must be a prefix subset of the sorting key.
-
-
// Configure an index
-"settings" : {
- "index" : {
- "search" : {
- "turbo" : {
- "enabled" : "true" // Enable optimization
- }
- },
- "sort" : { // Specify a sorting key
- "field" : [
- "region",
- "host",
- "other"
- ]
- },
- "cluster" : {
- "field" : [ // Specify a clustering key
- "region",
- "host"
- ]
- }
- }
-}
-
-
-
diff --git a/docs/css/umn/css_01_0175.html b/docs/css/umn/css_01_0175.html
deleted file mode 100644
index dfeca19b..00000000
--- a/docs/css/umn/css_01_0175.html
+++ /dev/null
@@ -1,47 +0,0 @@
-
-
-High-cardinality Field Histogram Aggregation
-High-cardinality fields are usually used for histogram grouping and aggregation instead of single-point grouping and aggregation. For example, collecting the statistics of logs at a certain period. Assume that the following query statement exists:
-
POST testindex/_search?pretty
-{
- "size": 0,
- "aggs": {
- "avg_score": {
- "avg": {
- "field": "score"
- },
- "aggs": {
- "groupbytime": {
- "date_histogram": {
- "field": "timestamp",
- "calendar_interval": "day"
- }
- }
- }
- }
- }
-}
-
This query groups the field timestamp using a histogram and calculates the average score. timestamp is a typical high-cardinality field. To use the enhanced aggregation for the preceding query, set parameters as follows:
-
// Configure an index
-"settings" : {
- "index" : {
- "search" : {
- "turbo" : {
- "enabled" : "true" // Enable optimization
- }
- },
- "sort" : { // Specify a sorting key
- "field" : [
- "timestamp"
- ]
- }
- }
-}
-
-
-
-
diff --git a/docs/css/umn/css_01_0176.html b/docs/css/umn/css_01_0176.html
deleted file mode 100644
index ed2eff84..00000000
--- a/docs/css/umn/css_01_0176.html
+++ /dev/null
@@ -1,71 +0,0 @@
-
-
-Low-cardinality and High-cardinality Field Mixing
-In the scenario where low-cardinality and high-cardinality fields are mixed, assume that the following query statement exists:
-
POST testindex/_search
-{
- "size": 0,
- "aggs": {
- "groupby_region": {
- "terms": {
- "field": "region"
- },
- "aggs": {
- "groupby_host": {
- "terms": {
- "field": "host"
- },
- "aggs": {
- "groupby_timestamp": {
- "date_histogram": {
- "field": "timestamp",
- "interval": "day"
- },
- "aggs": {
- "avg_score": {
- "avg": {
- "field": "score"
- }
- }
- }
- }
- }
- }
- }
- }
- }
-}
-
Group the low-cardinality fields and create a histogram using the high-cardinality fields. To use the enhanced aggregation for the preceding query, set the parameters as follows:
-
- A clustering key is the prefix subset of a sorting key.
- High-cardinality fields must be in the sorting key, and high-cardinality fields must follow the last low-cardinality field.
-
-
// Configure an index
-"settings" : {
- "index" : {
- "search" : {
- "turbo" : {
- "enabled" : "true" // Enable optimization
- }
- },
- "sort" : { // Specify a sorting key
- "field" : [
- "region",
- "host",
- "timestamp",
- "other"
- ]
- },
- "cluster" : {
- "field" : [ // Specify a clustering key
- "region",
- "host"
- ]
- }
- }
-}
-
-
-
diff --git a/docs/css/umn/css_01_0177.html b/docs/css/umn/css_01_0177.html
deleted file mode 100644
index 495fcdf9..00000000
--- a/docs/css/umn/css_01_0177.html
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-Enhanced Cluster Monitoring
-
-
-
diff --git a/docs/css/umn/css_01_0178.html b/docs/css/umn/css_01_0178.html
deleted file mode 100644
index c2a1cdee..00000000
--- a/docs/css/umn/css_01_0178.html
+++ /dev/null
@@ -1,70 +0,0 @@
-
-
-P99 Latency Monitoring
-Context
The Elasticsearch community only discusses how to monitor the average latency of search requests, which cannot reflect the actual search performance of a cluster. To enhance monitoring, CSS allows you to monitor the P99 latency of search requests in clusters.
-
-
Prerequisites
Currently, only clusters of version 7.6.2 and 7.10.2 support P99 latency monitoring.
-
-
Obtaining Monitoring Information
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools and run the following command to check the P99 latency of the current cluster:
GET /search/stats/percentile
-Example response:
-{
- "overall" : {
- "1.0" : 2.0,
- "5.0" : 2.0,
- "25.0" : 6.5,
- "50.0" : 19.5,
- "75.0" : 111.0,
- "95.0" : 169.0,
- "99.0" : 169.0,
- "max" : 169.0,
- "min" : 2.0
- },
- "last_one_day" : {
- "1.0" : 2.0,
- "5.0" : 2.0,
- "25.0" : 6.5,
- "50.0" : 19.5,
- "75.0" : 111.0,
- "95.0" : 169.0,
- "99.0" : 169.0,
- "max" : 169.0,
- "min" : 2.0
- },
- "latest" : {
- "1.0" : 26.0,
- "5.0" : 26.0,
- "25.0" : 26.0,
- "50.0" : 26.0,
- "75.0" : 26.0,
- "95.0" : 26.0,
- "99.0" : 26.0,
- "max" : 26.0,
- "min" : 26.0
- }
-}
-
- In the response, overall indicates all the statistics that have been collected since the cluster startup, last_one_day indicates the statistics collected in the last day, and latest indicates the statistics that have been collected since the last reset.
- The calculated P99 latency is an estimation. It is more precise than the P50 latency.
- The P99 latency of a cluster is cleared and recalculated if the cluster is restarted.
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0179.html b/docs/css/umn/css_01_0179.html
deleted file mode 100644
index 7f1a7361..00000000
--- a/docs/css/umn/css_01_0179.html
+++ /dev/null
@@ -1,63 +0,0 @@
-
-
-HTTP Status Code Monitoring
-Context
When an external system accesses Elasticsearch through the HTTP protocol, a response and the corresponding status code are returned. The open-source Elasticsearch server does not collect the status code, so users cannot monitor Elasticsearch APIs status or cluster request status. CSS allows you to monitor the HTTP status codes of clusters.
-
-
Prerequisites
Currently, only clusters of versions 7.6.2 and 7.10.2 support HTTP status code monitoring.
-
-
Obtaining Status Codes
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools.
- On the console page of Dev Tools, run commands based on the cluster version.
- For clusters of version 7.6.2, run the following command to obtain the status code statistics:
GET /_nodes/http_stats
-Example response:
-{
- "_nodes" : {
- "total" : 1,
- "successful" : 1,
- "failed" : 0 },
- "cluster_name" : "css-8362",
- "nodes" : {
- "F9IFdQPARaOJI7oL7HOXtQ" : {
- "http_code" : {
- "200" : 114,
- "201" : 5,
- "429" : 0,
- "400" : 7,
- "404" : 0,
- "405" : 0
- }
- }
- }
- }
- - For clusters of version 7.10.2, run the following command to obtain the status code statistics:
GET _nodes/stats/http
-Example response:
-{
-// ...
- "cluster_name" : "css-2985",
- "nodes" : {
-// ...
- "omvR9_W-TsGApraMApREjA" : {
-
-// ...
- "http" : {
- "current_open" : 4,
- "total_opened" : 37,
- "http_code" : {
- "200" : 25,
- "201" : 7,
- "429" : 0,
- "400" : 3,
- "404" : 0,
- "405" : 0
- }
- }
- }
- }
-}
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0181.html b/docs/css/umn/css_01_0181.html
deleted file mode 100644
index d50d498f..00000000
--- a/docs/css/umn/css_01_0181.html
+++ /dev/null
@@ -1,226 +0,0 @@
-
-
-Scenario
-CSS integrates shared load balancers and allows you to bind public network access and enable the VPC Endpoint service. Dedicated load balancers provide more functions and higher performance than shared load balancers. This section describes how to connect a cluster to a dedicated load balancer.
-
Advantages of connecting a cluster to a dedicated load balancer:
- A non-security cluster can also use capabilities of the Elastic Load Balance (ELB) service.
- You can use customized certificates for HTTPS bidirectional authentication.
- Seven-layer traffic monitoring and alarm configuration are supported, allowing you to view the cluster status at any time.
-
-
There are eight service forms for clusters in different security modes to connect to dedicated load balancers. Table 1 describes the load balancer capabilities for the eight service forms. Table 2 describes the configurations for the eight service forms.
-
You are not advised to connect an ELB that has bound the public network to a non-security cluster. Non-security clusters can be accessed over HTTP without security authentication. A load balancer with an EIP allows access to such clusters over the Internet, which may bring security risks.
-
-
-
Table 1 ELB capabilities for different clustersSecurity Mode
- |
-Service Form Provided by ELB for External Systems
- |
-ELB Load Balancing
- |
-ELB Traffic Monitoring
- |
-ELB Two-way Authentication
- |
-
-
-Non-security
- |
-No authentication
- |
-Yes
- |
-Yes
- |
-No
- |
-
-One-way authentication
-Two-way authentication
- |
-Yes
- |
-Yes
- |
-Yes
- |
-
-Security mode + HTTP
- |
-Password authentication
- |
-Yes
- |
-Yes
- |
-No
- |
-
-One-way authentication + Password authentication
-Two-way authentication + Password authentication
- |
-Yes
- |
-Yes
- |
-Yes
- |
-
-Security mode + HTTPS
- |
-One-way authentication + Password authentication
-Two-way authentication + Password authentication
- |
-Yes
- |
-Yes
- |
-Yes
- |
-
-
-
-
-
-
Table 2 Configuration for interconnecting different clusters with ELBSecurity Mode
- |
-Service Form Provided by ELB for External Systems
- |
-ELB Listener
- |
-Backend Server Group
- |
-
-Frontend Protocol
- |
-Port
- |
-SSL Parsing Mode
- |
-Backend Protocol
- |
-Health Check Port
- |
-Health Check Path
- |
-
-Non-security
- |
-No authentication
- |
-HTTP
- |
-9200
- |
-No authentication
- |
-HTTP
- |
-9200
- |
-/
- |
-
-One-way authentication
- |
-HTTPS
- |
-9200
- |
-One-way authentication
- |
-HTTP
- |
-9200
- |
-
-Two-way authentication
- |
-HTTPS
- |
-9200
- |
-Two-way authentication
- |
-HTTP
- |
-9200
- |
-
-Security mode + HTTP
- |
-Password authentication
- |
-HTTP
- |
-9200
- |
-No authentication
- |
-HTTP
- |
-9200
- |
-/_opendistro/_security/health
- |
-
-One-way authentication + Password authentication
- |
-HTTPS
- |
-9200
- |
-One-way authentication
- |
-HTTP
- |
-9200
- |
-
-Two-way authentication + Password authentication
- |
-HTTPS
- |
-9200
- |
-Two-way authentication
- |
-HTTP
- |
-9200
- |
-
-Security mode + HTTPS
- |
-One-way authentication + Password authentication
- |
-HTTPS
- |
-9200
- |
-One-way authentication
- |
-HTTPS
- |
-9200
- |
-
-Two-way authentication + Password authentication
- |
-HTTPS
- |
-9200
- |
-Two-way authentication
- |
-HTTPS
- |
-9200
- |
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0182.html b/docs/css/umn/css_01_0182.html
deleted file mode 100644
index b35142a7..00000000
--- a/docs/css/umn/css_01_0182.html
+++ /dev/null
@@ -1,397 +0,0 @@
-
-
-Connecting to a Dedicated Load Balancer
-This section describes how to connect a CSS cluster to a dedicated load balancer.
-
(Optional) Preparing a Self-signed Certificate
If the target ELB listener uses the HTTP protocol, skip this step.
-
Prepare and upload a self-signed certificate.
-
You are advised to use a certificate purchased in Cloud Certificate Manager (CCM) or issued by an authoritative organization.
-
-
- Log in to a Linux client where the OpenSSL tool and JDK are installed.
- Run the following commands to create a self-signed certificate:
1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
-10
-11
-12
-13
-14
-15
-16
-17
-18
-19
-20
-21
-22
-23
-24
-25
-26
-27
-28
-29
-30
-31
-32
-33
-34
-35
-36
-37
-38
-39
-40
-41
-42
-43
-44
-45
-46
-47
-48
-49
-50
-51
-52
-53
-54
-55
-56
-57
-58
-59
-60
-61
-62
-63
-64
-65
-66
-67
-68
-69
-70
-71
-72
-73 | mkdir ca
-mkdir server
-mkdir client
-
-#Use OpenSSL to create a CA certificate.
-cd ca
-#Create the OpenSSL configuration file ca_cert.conf for the CA certificate.
-cat >ca_cert.conf <<EOF
-[ req ]
-distinguished_name = req_distinguished_name
-prompt = no
-
-[ req_distinguished_name ]
- O = ELB
-EOF
-#Create private key file ca.key for the CA certificate.
-openssl genrsa -out ca.key 2048
-#Create the CSR file ca.csr for the CA certificate.
-openssl req -out ca.csr -key ca.key -new -config ./ca_cert.conf
-#Create a self-signed CA certificate ca.crt.
-openssl x509 -req -in ca.csr -out ca.crt -sha1 -days 5000 -signkey ca.key
-#Convert the CA certificate format to p12.
-openssl pkcs12 -export -clcerts -in ca.crt -inkey ca.key -out ca.p12
-#Convert the CA certificate format to JKS.
-keytool -importkeystore -srckeystore ca.p12 -srcstoretype PKCS12 -deststoretype JKS -destkeystore ca.jks
-
-
-#Use the CA certificate to issue a server certificate.
-cd ../server
-#Create the OpenSSL configuration file server_cert.conf for the server certificate. Change the CN field to the domain name or IP address of the server as required.
-cat >server_cert.conf <<EOF
-[ req ]
-distinguished_name = req_distinguished_name
-prompt = no
-
-[ req_distinguished_name ]
- O = ELB
- CN = 127.0.0.1
-EOF
-#Create the private key file server.key for the server certificate.
-openssl genrsa -out server.key 2048
-#Create the CSR request file server.csr for the server certificate.
-openssl req -out server.csr -key server.key -new -config ./server_cert.conf
-#Use the CA certificate to issue the server certificate server.crt.
-openssl x509 -req -in server.csr -out server.crt -sha1 -CAcreateserial -days 5000 -CA ../ca/ca.crt -CAkey ../ca/ca.key
-#Convert the server certificate format to p12.
-openssl pkcs12 -export -clcerts -in server.crt -inkey server.key -out server.p12
-#Convert the service certificate format to JKS.
-keytool -importkeystore -srckeystore server.p12 -srcstoretype PKCS12 -deststoretype JKS -destkeystore server.jks
-
-
-#Use the CA certificate to issue a client certificate.
-cd ../client
-#Create the OpenSSL configuration file client_cert.conf for the client certificate. Change the CN field to the domain name or IP address of the server as required.
-cat >client_cert.conf <<EOF
-[ req ]
-distinguished_name = req_distinguished_name
-prompt = no
-
-[ req_distinguished_name ]
-O = ELB
-CN = 127.0.0.1
-EOF
-#Create private key client.key for the client certificate.
-openssl genrsa -out client.key 2048
-#Create the CSR file client.csr for the client certificate.
-openssl req -out client.csr -key client.key -new -config ./client_cert.conf
-#Use the CA certificate to issue the client certificate client.crt.
-openssl x509 -req -in client.csr -out client.crt -sha1 -CAcreateserial -days 5000 -CA ../ca/ca.crt -CAkey ../ca/ca.key
-#Convert the client certificate to a p12 file that can be identified by the browser.
-openssl pkcs12 -export -clcerts -in client.crt -inkey client.key -out client.p12
-#Convert the client certificate format to JKS.
-keytool -importkeystore -srckeystore client.p12 -srcstoretype PKCS12 -deststoretype JKS -destkeystore client.jks
- |
-
-
- - Upload the self-signed certificate. For details, see the section "Configuring the Server Certificate and Private Key" in Elastic Load Balance User GuideConfiguring the Server Certificate and Private Key.
-
-
Creating a Dedicated Load Balancer
- Log in to the ELB management console.
- Create a dedicated load balancer. For details, see Creating a Dedicated Load Balancer. Table 1 describes the parameters required for connecting a CSS cluster with a dedicated load balancer.
-
Table 1 Parameters for interconnecting a CSS cluster with a dedicated load balancerParameter
- |
-Description
- |
-Example
- |
-
-
-Type
- |
-Load balancer type. Select Dedicated.
- |
-Dedicated
- |
-
-Billing Mode
- |
-Billing mode of the dedicated load balancer.
- |
-Pay-per-use
- |
-
-Region
- |
-Region where the CSS cluster is located.
- |
--
- |
-
-IP as Backend Servers
- |
-A CSS cluster can be connected only after the cross-VPC backend is enabled.
- |
-Enabled
- |
-
-Network Type
- |
-Type of the network used by the load balancer to provide services for external systems.
- |
-Private IPv4 network
- |
-
-VPC
- |
-VPC where the load balancer works. This parameter is mandatory no matter which network type is selected.
-Select the VPC of the CSS cluster
- |
--
- |
-
-Subnet
- |
-Subnet where the load balancer is to be created. This parameter is mandatory no matter which network type is selected.
-Select the subnet of the CSS cluster
- |
--
- |
-
-Specifications
- |
-You are advised to select Application load balancing (HTTP/HTTPS), which provides better functions and performance.
- |
-Application load balancing (HTTP/HTTPS)
-Small I
- |
-
-
-
-
-
-
-
Interconnecting with a Load Balancer
A cluster in security mode with HTTPS access enabled does not support HTTP protocol authentication. If you need to enable HTTP protocol authentication, disable the security mode of the cluster.
-
Before changing the security mode, disable load balancing. After the security mode is changed, enable load balancing.
-
-
- Log in to the CSS management console.
- On the Clusters page, select the cluster you want to connect to the load balancer and click the cluster name. The cluster basic information page is displayed.
- In the navigation pane, choose Load Balancing. Toggle on the load balancing switch and configure basic load balancing information.
- Load Balancer: Select a created load balancer. You can also click Create Load Balancer to create one.
- Agency: Select an agency name. If no agency is available, click Create Agency to create one. The selected agency must have the ELB Administrator and ELB FullAccess permissions.
Figure 1 Enabling load balancing
-
- - Click OK. The listener configuration page is displayed.
Figure 2 Creating a listener
- - In the Listener Configuration area, click
to configure listener information.Figure 3 Configuring a listener
-
-Table 2 Listener configuration informationParameter
- |
-Description
- |
-
-
-Frontend Protocol
- |
-The protocol used by the client and listener to distribute traffic.
-Select a protocol as required.
- |
-
-Frontend Port
- |
-The port used by the client and listener to distribute traffic.
-For example, 9200. You need to specify this parameter as required.
- |
-
-SSL Authentication
- |
-Authentication mode for the client to access the server.
-Select a parsing mode as required.
- |
-
-Server Certificate
- |
-The server certificate is used for SSL handshake negotiation. The certificate content and private key must be provided.
-When SSL Authentication is set to Two-way authentication, this parameter is mandatory.
- |
-
-CA Certificate
- |
-Also called client CA public key certificate. It is used to verify the issuer of a client certificate.
-When the HTTPS two-way authentication is enabled, an HTTPS connection can be established only when the client can provide the certificate issued by a specified CA.
-This parameter is mandatory only when the Frontend Protocol is set to HTTPS.
- |
-
-
-
-
- - (Optional) In the Connection Mode area, you can click Settings next to Access Control to configure the IP addresses or network segments that are allowed to access the system. If you do not set the IP addresses or network segments, all IP addresses are allowed to access the system by default.
Figure 4 Configuring access control
-
-
In the
Health Check area, you can view the health check result of each node IP address. The following table describes the health check results.
-
Health Check Result
- |
-Description
- |
-
-
-Normal
- |
-The IP address of the node is properly connected.
- |
-
-Abnormal
- |
-The node IP address is connected and unavailable.
- |
-
-
-
-
-
-
-
Accessing a Cluster Using the Curl Command
Run the following commands to check whether the dedicated load balancer can be connected to a cluster.
-
-
Table 3 Commands for accessing different clustersSecurity Mode
- |
-Service Form Provided by ELB for External Systems
- |
-Curl Command for Accessing a Cluster
- |
-
-
-Non-security
- |
-No authentication
- |
-curl http://IP:9200
- |
-
-One-way authentication
- |
-curl -k --cert ./client.crt --key ./client.key https://IP:9200
- |
-
-Two-way authentication
- |
-curl --cacert ./ca.crt --cert ./client.crt --key ./client.key https://IP:9200
- |
-
-Security mode + HTTP
- |
-Password authentication
- |
-curl http://IP:9200 -u user:pwd
- |
-
-One-way authentication + Password authentication
- |
-curl -k --cert ./client.crt --key ./client.key https://IP:9200 -u user:pwd
- |
-
-Two-way authentication + Password authentication
- |
-curl --cacert ./ca.crt --cert ./client.crt --key ./client.key https://IP:9200 -u user:pwd
- |
-
-Security mode + HTTPS
- |
-One-way authentication + Password authentication
- |
-curl -k --cert ./client.crt --key ./client.key https://IP:9200 -u user:pwd
- |
-
-Two-way authentication + Password authentication
- |
-curl --cacert ./ca.crt --cert ./client.crt --key ./client.key https://IP:9200 -u user:pwd
- |
-
-
-
-
-
-
Table 4 VariablesVariable
- |
-Description
- |
-
-
-IP
- |
-ELB IP address
- |
-
-user
- |
-Username for accessing the CSS cluster
- |
-
-pwd
- |
-Password of the user
- |
-
-
-
-
-
If the Elasticsearch cluster information is returned, the connection is successful. For example, if a security cluster using the HTTPS protocol is connected to a load balancer using two-way authentication, the information shown in Figure 5 is returned.
-
Figure 5 Accessing a cluster
-
-
-
-
diff --git a/docs/css/umn/css_01_0184.html b/docs/css/umn/css_01_0184.html
deleted file mode 100644
index cbaeac20..00000000
--- a/docs/css/umn/css_01_0184.html
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-(Optional) Interconnecting with a Dedicated Load Balancer
-
-
-
diff --git a/docs/css/umn/css_01_0185.html b/docs/css/umn/css_01_0185.html
deleted file mode 100644
index 09df6921..00000000
--- a/docs/css/umn/css_01_0185.html
+++ /dev/null
@@ -1,169 +0,0 @@
-
-
-Viewing Basic Information About an Elasticsearch Cluster
-On the Cluster Information page, you can view the information about a cluster, including the private network address, public IP address, version, and node.
-
- Log in to the CSS management console.
- Choose Clusters > Elasticsearch. The cluster list is displayed.
- Click a cluster name to go to the Cluster Information page and view the basic information about the cluster.
-
Table 1 Parameters for configuring basic informationType
- |
-Parameter
- |
-Description
- |
-
-
-Cluster Information
- |
-Name
- |
-Cluster name. The name can be customized.
-You can click on the right to change the cluster name.
- |
-
-ID
- |
-Unique ID of a cluster, which is automatically generated by the system.
-Each cluster in the same region has a unique ID.
- |
-
-Version
- |
-Cluster version information.
- |
-
-Cluster Status
- |
-Current status of a cluster
- |
-
-Task Status
- |
-Current task status of a cluster. If no task is in progress, -- is displayed.
- |
-
-Created
- |
-Time when a cluster was created
- |
-
-Cluster Storage Capacity (GB)
- |
-Storage capacity of a cluster
- |
-
-Used Cluster Storage (GB)
- |
-Used storage capacity of a cluster
- |
-
-Configuration
- |
-Region
- |
-Region where a cluster is located
- |
-
-AZ
- |
-AZ where a cluster is located
- |
-
-VPC
- |
-VPC to which the cluster belongs
- |
-
-Subnet
- |
-Subnet to which the cluster belongs
- |
-
-Security Group
- |
-Security group to which a cluster belongs.
-To change the security group of a cluster, click Change Security Group on the right.
- NOTICE: Before changing the security group, ensure that the port 9200 required for service access has been enabled. Incorrect security group configuration may cause service access failures. Exercise caution when performing this operation.
-
- |
-
-Security Mode
- |
-Security mode of a cluster.
-- Enabled: The current cluster is a security cluster.
- Disabled: The current cluster is a non-security cluster.
-For details about how to change the security mode of a cluster, see Changing the Security Mode.
- |
-
-Reset Password
- |
-This parameter is displayed only for security clusters.
-Click Reset to change the password of the administrator account admin of the security cluster.
- NOTE: Requirements for administrator passwords:
- - The password can contain 8 to 32 characters.
- The password must contain at least three of the following character types: uppercase letters, lowercase letters, digits, and special characters. The following special characters are supported: ~!@#$%^&*()-_=+\|[{}];:,<.>/?
- Do not use the administrator name, or the administrator name spelled backwards.
- You are advised to change the password periodically.
-
- |
-
-Enterprise Project
- |
-Enterprise project to which a cluster belongs.
-You can click the project name to view the basic information about the enterprise project.
- |
-
-Public IP Address
- |
-Public network access information, which is displayed only for clusters in security mode.
-- For a security cluster with public network access enabled, the configured public network address is displayed. You can use this address to access the security cluster from the public network.
- For a security cluster with public network access disabled, -- is displayed.
-When using a public IP address to access a cluster, you are advised to enable access control and configure an access whitelist to improve cluster security. For details about how to configure the public network access, see Accessing a Cluster from a Public Network.
- |
-
-Access Control
- |
-Whether to set access control for a cluster. This parameter is displayed only for clusters with public network access enabled.
-- Enabled: Only IP addresses in the whitelist can access the cluster through the public network.
- Disabled: Any IP address can access the cluster through the public network.
-Click Set to configure the access control and the whitelist.
- |
-
-Bandwidth
- |
-The bandwidth for public network access. This parameter is displayed only for clusters with public network access enabled.
-Click Edit to change the bandwidth size.
- |
-
-HTTPS Access
- |
-Whether to enable the HTTPS access protocol for a cluster.
-- Disabled: The HTTP protocol is used for cluster access.
- Enabled: The HTTPS protocol is used for cluster access. Only security clusters can enable this function. If HTTPS Access is enabled, you can click Download Certificate to obtain the CER security certificate for accessing the security cluster. Currently, the security certificate cannot be used in the public network environment.
-For details about how to change the access mode of a cluster in security mode, see Switching the Protocol of Security Clusters.
- |
-
-Private Network Address
- |
-Private IP address and port number of a cluster, which can be used to access the cluster. If the cluster has only one node, the IP address and port number of only one node are displayed, for example, 10.62.179.32:9200. If the cluster has multiple nodes, the IP addresses and port numbers of all nodes are displayed, for example, 10.62.179.32:9200,10.62.179.33:9200.
- |
-
-Node
- |
-Node Specifications
- |
-Specifications of nodes in a cluster
- |
-
-Node Storage Type
- |
-Storage capacity and storage type of nodes in a cluster
- |
-
-Nodes
- |
-Number of nodes in a cluster
- |
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0187.html b/docs/css/umn/css_01_0187.html
deleted file mode 100644
index abeef788..00000000
--- a/docs/css/umn/css_01_0187.html
+++ /dev/null
@@ -1,153 +0,0 @@
-
-
-Enhanced Cold Data Query Performance
-Context
When you query data on the Discover page of Kibana for the first time, all data needs to be obtained from OBS because there is no cache. If a large number of documents are returned, it takes a long time to obtain the corresponding time fields and file metadata from OBS. To accelerate queries the first time they run on the Discover page, you can cache data locally.
-
-
Prerequisites
This feature is available in Elasticsearch clusters of versions 7.6.2 and 7.10.2 and Opensearch clusters created after February 2023.
-
-
API for Querying Cold Data from Local Cache
This API can be used to query the cold data from local cache.
-
Example request:
-
GET /_frozen_stats/local_cache
-GET /_frozen_stats/local_cache/{nodeId}
-
Response example:
-
{
- "_nodes" : {
- "total" : 1,
- "successful" : 1,
- "failed" : 0
- },
- "cluster_name" : "elasticsearch",
- "nodes" : {
- "6by3lPy1R3m55Dcq3liK8Q" : {
- "name" : "node-1",
- "transport_address" : "127.0.0.1:9300",
- "host" : "127.0.0.1",
- "ip" : "127.0.0.1",
- "local_cache" : {
- "get_stats" : {
- "get_total_count" : 562, //Total number of times data was retrieved from the local cold data cache.
- "get_hit_count" : 562, //Total number of hits in the local cold data cache.
- "get_miss_count" : 0, //Total number of local cold data cache misses.
- "get_total_ns" : 43849200, //Total duration for retrieving data from the local cold data cache.
- "get_avg_ns" : 78023 //Average duration for retrieving data from the local cold data cache.
- },
- "load_stats" : {
- "load_count" : 2, //Number of times cold data was loaded from the local cache
- "load_total_ms" : 29, //Total duration for loading cold data from the local cache
- "load_avg_ms" : 14, //Average duration for loading cold data from the local cache
- "load_fail_count" : 0, //Number of failure times for loading cold data from the local cache
- "load_overflow_count" : 0 //Number of times the local cold data cache exceeds the cache pool size.
- },
- "reload_stats" : {
- "reload_count" : 0, //Number of times the local cold data cache was regenerated.
- "reload_total_ms" : 0, //Total duration for regenerating the local cold data cache.
- "reload_avg_ms" : 0, //Average duration for regenerating the local cold data cache.
- "reload_fail_count" : 0 //Number of failures in regenerating the local cold data cache.
- },
- "init_stats" : {
- "init_count" : 0, //Number of times the local cold data cache was initialized.
- "init_total_ms" : 0, //Total duration for initializing the local cold data cache.
- "init_avg_ms" : 0, //Average duration for initializing the local cold data cache.
- "init_fail_count" : 0 //Number of failures in initializing the local cold data cache.
- }
- }
- }
- }
- }
-
-
-
Configuring Parameters
-
Configuration Item
- |
-Type
- |
-Unit
- |
-Value Range
- |
-Scope
- |
-Can Be Dynamically Modified
- |
-Description
- |
-
-
-low_cost.local_cache.max.capacity
- |
-Integer
- |
--
- |
-The value ranges from 10 to 5000. The default value is 500.
- |
-node
- |
-Yes
- |
-Maximum number of available cold data caches on a node. Each shard corresponds to a cache object.
- NOTE: - If the heap memory usage remains high, decrease the value.
- If the value of load_overflow_count keeps increasing rapidly, increase the value.
-
- |
-
-index.low_cost.local_cache.threshold
- |
-Integer
- |
-%
- |
-The value ranges from 0 to 100. The default value is 50.
- |
-index
- |
-Yes
- |
-Threshold for enabling the local cache of cold data.
- NOTE: - If the percentage of date fields is less than the value of this parameter, the cold data of the date type will be cached locally. Otherwise, this parameter is not used.
- If the date fields of the current index occupy most of the data volume of the current index, you are not advised to use this function.
-
- |
-
-index.low_cost.local_cache.evict_time
- |
-String
- |
-Days
- |
-The value ranges from 1d to 365d. The default value is 30d.
- |
-index
- |
-Yes
- |
-Wait time before cold data is deleted from local cache. The value is determined based on index.frozen_date (time when the freezing is successful).
- NOTE: - For indexes that have been frozen in old clusters and do not have index.frozen_date specified, the value of this parameter is determined based on the index creation time.
- You are advised to adjust the deletion time based on the disk usage to avoid high disk usage.
-
- |
-
-
-
-
-
-
Modifying Parameters
- Run the following command to modify low_cost.local_cache.max.capacity:
PUT _cluster/settings
- {
- "persistent": {
- "low_cost.local_cache.max.capacity":1000
- }
- }
- - Run the following command to modify index.low_cost.local_cache.threshold:
PUT es_write_pref2-00000000021/_settings
- {
- "index.low_cost.local_cache.threshold":20
- }
- - Run the following command to modify index.low_cost.local_cache.evict_time:
PUT es_write_pref2-00000000021/_settings
- {
- "index.low_cost.local_cache.evict_time":"7d"
- }
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0188.html b/docs/css/umn/css_01_0188.html
deleted file mode 100644
index 06779de3..00000000
--- a/docs/css/umn/css_01_0188.html
+++ /dev/null
@@ -1,156 +0,0 @@
-
-
-Deploying a Cross-AZ Cluster
-To prevent data loss and minimize the cluster downtime in case of service interruption, CSS supports cross-AZ cluster deployment. When creating a cluster, you can select two or three AZs in the same region. The system will automatically allocate nodes to these AZs.
-
Allocating Nodes
If you select two or three AZs when creating a cluster, CSS automatically enables the cross-AZ HA function and properly allocates nodes to different AZs. Table 1 describes how the nodes are allocated.
-
- When creating a cluster, ensure that the number of selected nodes is no less than the number of AZs. Otherwise, cross-AZ deployment is not supported.
- If you enable master nodes when deploying a cross-AZ cluster, the master nodes will also be distributed to different AZs.
- The node quantity difference between any two AZs is no more than one.
-
-
-
Table 1 Number of nodes and AZ distributionNodes
- |
-Single AZ
- |
-Two AZs
- |
-Three AZs
- |
-
-AZ1
- |
-AZ1
- |
-AZ2
- |
-AZ1
- |
-AZ2
- |
-AZ3
- |
-
-1
- |
-1
- |
-Not supported
- |
-Not supported
- |
-
-2
- |
-2
- |
-1
- |
-1
- |
-Not supported
- |
-
-3
- |
-3
- |
-2
- |
-1
- |
-1
- |
-1
- |
-1
- |
-
-4
- |
-4
- |
-2
- |
-2
- |
-2
- |
-1
- |
-1
- |
-
-...
- |
-...
- |
-...
- |
-...
- |
-...
- |
-...
- |
-...
- |
-
-
-
-
-
-
Setting Replicas
Setting replicas enables clusters effectively use the HA capability of AZs.
-
- In two-AZ deployment, if one AZ becomes unavailable, the other AZ continues to provide services. In this case, at least one replica is required. Elasticsearch has one replica by default. You can retain the default value if you do not require higher read performance.
- In three-AZ deployment, if one AZ becomes unavailable, the other AZs continue to provide services. In this case, at least one replica is required. Elasticsearch has one replica by default. If you need more replicas to improve the cluster's ability to handle queries, modify settings to change the number of replicas.
You can run the following command to modify the number of index replicas:
-curl -XPUT http://ip:9200/{index_name}/_settings -d '{"number_of_replicas":2}'
-Alternatively, run the following command to specify the number of replicas in the template:
-curl -XPUT http://ip:9200/ _template/templatename -d '{ "template": "*", "settings": {"number_of_replicas": 2}}'
-
-
- ip: private network address
- index_name: index name
- number_of_replicas: number of replicas after modification. The value in the preceding command indicates that two replicas are required.
-
-
-
Possible Service Interruptions
The following table describes the possible service interruptions when an AZ of a two- or three-AZ cluster is faulty.
-
-
Table 2 Possible service interruptionsAZs
- |
-Master Nodes
- |
-Service Interruption Analysis
- |
-
-
-2
- |
-0
- |
-- When the number of nodes is an even number:
- If half of data nodes are faulty, replace one node in the faulty AZ before you select the master node.
- - When the number of nodes is an odd number:
- If the faulty AZ contains one more node than the normal AZ, you need to replace one node in the faulty AZ before you select the master node. For details about how to replace nodes, contact technical support.
- If the faulty AZ contains one less node than the normal AZ, services will not be interrupted and you can select the master node.
-
- |
-
-2
- |
-3
- |
-There is a 50% possibility for service interruption. When two dedicated master nodes are allocated to one AZ and another master node is allocated to the other AZ:
-- If service interruption happens in the AZ with one master node, you can select a master node from the AZ that has two dedicated master nodes.
- If service interruption happens in the AZ with two dedicated master nodes, you have no choice in the remaining AZ, because it has only one dedicated master node. In this case, services will be interrupted and you need to contact technical support.
- |
-
-3
- |
-0
- |
-If you configure four nodes in three AZs, each AZ will have at least one node. If the AZ with two nodes is faulty, the services will be interrupted. You are not advised to configure four nodes when selecting three AZs.
-Generally, service interruption will not occur.
- |
-
-3
- |
-3
- |
-Service interruption does not occur.
- |
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0189.html b/docs/css/umn/css_01_0189.html
deleted file mode 100644
index a94e0e49..00000000
--- a/docs/css/umn/css_01_0189.html
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
-Clusters in Security Mode
-When creating an Elasticsearch cluster, you can enable the security mode for it. Identity authentication is required when users access a security cluster. You can also authorize and encrypt security clusters.
-
Identity Verification
To access a security cluster, you need to enter the username and password. The identity verification is required for the following two types of users:
-
- Administrator: The default administrator username is admin, and the password is the one specified during cluster creation.
- Users: Enter the username and password created through Kibana.
-
-
Authorization
On the Kibana console, click Security to control user permissions in Elasticsearch clusters. You can configure hierarchical user permissions by cluster, index, document, and field. For details, see Creating a User and Granting Permissions by Using Kibana.
-
You can add or delete users, and map users to different roles for permissions control.
-
Figure 1 Configuring users
-
You can use role mapping to configure roles and map a user, backend role, and host name to a role.
-
Figure 2 Role mapping
-
You can set permissions for each role to access clusters, indexes and documents and assign Kibana tenants different roles.
-
Figure 3 Configuring role permissions
-
You can set action groups, assign the groups to roles, and configure the roles' permission for accessing indexes and documents.
-
Figure 4 Configuring action groups
-
You can view the parameters of authentication and authorization for the current cluster. You can also run the securityadmin command to modify the configuration.
-
Figure 5 Viewing cluster parameters
-
You can also clear the security cache.
-
Figure 6 Clearing the security cache
-
-
Encryption
When key data is transferred between nodes or through the HTTP protocol, SSL/TLS encryption is used to ensure data security.
-
You can perform the preceding functions on Kibana, using .yml files (not recommended), or by calling RESTful APIs. For more information about the security mode, see Security.
-
-
Resetting the Administrator Password
If you want to change the administrator password of a security cluster or you have forgotten the password, reset the password.
-
- On the Clusters page, locate the target cluster whose password you want to reset and click the cluster name. The Cluster Information page is displayed.
- In the Configuration area, click Reset next to Reset Password.
- The password can contain 8 to 32 characters.
- The password must contain at least three of the following character types: uppercase letters, lowercase letters, digits, and special characters. The following special characters are supported: ~!@#$%^&*()-_=+\|[{}];:,<.>/?
- Do not use the administrator name, or the administrator name spelled backwards.
- You are advised to change the password periodically.
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0190.html b/docs/css/umn/css_01_0190.html
deleted file mode 100644
index 83f0abce..00000000
--- a/docs/css/umn/css_01_0190.html
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-Accessing an Elasticsearch Cluster
-Elasticsearch clusters have built-in Kibana and Cerebro components. You can quickly access an Elasticsearch cluster through Kibana and Cerebro.
-
Access a Cluster Through Kibana
- Log in to the CSS management console.
- On the Clusters page, locate the target cluster and click Access Kibana in the Operation column to go to the Kibana login page.
- Non-security cluster: The Kibana console is displayed.
- Security cluster: Enter the username and password on the login page and click Log In to go to the Kibana console. The default username is admin and the password is the one specified during cluster creation.
- - After the login is successful, you can access clusters through Kibana.
-
-
Accessing a Cluster Through Cerebro
- Log in to the CSS management console.
- On the Clusters page, locate the target cluster and click More > Cerebro in the Operation column to go to the Cerebro login page.
- Non-security cluster: Click the cluster name on the Cerebro login page to go to the Cerebro console.
- Security cluster: Click the cluster name on the Cerebro login page, enter the username and password, and click Authenticate to go to the Cerebro console. The default username is admin and the password is the one specified during cluster creation.
- - After the login is successful, you can access clusters through Cerebro.
-
-
-
-
diff --git a/docs/css/umn/css_01_0191.html b/docs/css/umn/css_01_0191.html
deleted file mode 100644
index 8f65309d..00000000
--- a/docs/css/umn/css_01_0191.html
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
-Flow Control 2.0
-
-
-
diff --git a/docs/css/umn/css_01_0192.html b/docs/css/umn/css_01_0192.html
deleted file mode 100644
index 69bb327c..00000000
--- a/docs/css/umn/css_01_0192.html
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-Context
-Feature Description
CSS can control traffic at the node level. You can configure the blacklist and whitelist, the maximum concurrent HTTP connections, and the maximum HTTP connections for a node. You can also configure backpressure based on client traffic in the node memory and block access in one click. CSS can also collect statistics on node access IP addresses and URIs. Each function has an independent control switch, which is disabled by default. To restore default values of parameters, set them to null.
-
After the client write traffic backpressure and control is enabled, large requests will be rejected when too much node heap memory has been occupied. This function prevents nodes from being suspended and reduces the risk of node unavailability.
-
- HTTP/HTTPS flow control:
- You can control client IP address access by setting IP addresses and subnets in HTTP/HTTPS blacklist or whitelist. If an IP address is in the blacklist, the client is disconnected and all its request are rejected. Whitelist rules take precedence over blacklist rules. If a client IP address exists in both the blacklist and whitelist, the client request will not be rejected.
- HTTP/HTTPS concurrent connection flow control limits the total number of HTTP connections to a node per second.
- HTTP/HTTPS new connection flow control limits the number of new connections to a node.
- - Memory flow control limits the write traffic based on the node heap memory. You can back pressure requests to the client, trigger resource recycling as much as possible, and then accept requests based on the available heap memory.
- Request sampling can record the access of client IP addresses and the type of requests from the client. Based on the statistics, you can identify the access traffic of client IP addresses and analyze the client write and query requests.
- One-click access blocking can block all the access traffic of a node, excluding the traffic from Kibana and CSS O&M and monitoring APIs.
- Flow control provides an independent API for viewing traffic statistics and records the number of current client connections and client backpressure connections. You can evaluate the flow control threshold and analyze the cluster loads based on the statistics.
- Access logs record the URLs and bodies of HTTP/HTTPS requests received by nodes within a period of time. You can analyze the current traffic pressure based on the access logs.
-
-
Constraints
- Currently, only versions 7.6.2 and 7.10.2 support the flow control feature.
- Clusters of versions 7.6.2 and 7.10.2 created after January 2023 support only traffic control version 2.0. Clusters created before January 2023 support only traffic control version 1.0.
-
-
-
-
diff --git a/docs/css/umn/css_01_0193.html b/docs/css/umn/css_01_0193.html
deleted file mode 100644
index a032ce69..00000000
--- a/docs/css/umn/css_01_0193.html
+++ /dev/null
@@ -1,99 +0,0 @@
-
-
-HTTP/HTTPS Flow Control
-Context
You can run commands in Kibana to enable or disable HTTP/HTTPS flow control for your cluster. The command parameters are as follows.
-
-
Table 1 HTTP/HTTPS flow control parametersParameter
- |
-Type
- |
-Description
- |
-
-
-flowcontrol.http.enabled
- |
-Boolean
- |
-Whether to enable HTTP/HTTPS flow control. This function is disabled by default. Enabling it may affect node access performance.
-Value: true or false
-Default value: false
- |
-
-flowcontrol.http.allow
- |
-List<String>
- |
-IP address whitelist.
-It can contain multiple IP addresses and masks, or an IP address list. Use commas (,) to separate multiple values. Example: xx.xx.xx.xx/24,xx.xx.xx.xx/24, or xx.xx.xx.xx.xx,xx.xx.xx.
-The default value is null.
- |
-
-flowcontrol.http.deny
- |
-List<String>
- |
-IP address blacklist.
-Multiple IP addresses and masks or an IP address list can be configured. Use commas (,) to separate multiple IP addresses and masks.
-The default value is null.
- |
-
-flowcontrol.http.concurrent
- |
-Integer
- |
-Maximum concurrent HTTP/HTTPS connections.
-Default value: Number of available cores on a node x 400
- |
-
-flowcontrol.http.newconnect
- |
-Integer
- |
-Maximum new connections that can be created for HTTP/HTTPS requests per second.
-Default value: Number of available cores on a node x 200
- |
-
-flowcontrol.http.warmup_period
- |
-Integer
- |
-Time required for the HTTP/HTTPS connection setup speed to reach the maximum. If flowcontrol.http.newconnect is set to 100 and flowcontrol.http.warmup_period is set to 5000ms, it indicates the system can set up 100 connections per second in 5 seconds.
-Value range: 0–10000
-Unit: ms
-Default value: 0
- |
-
-
-
-
-
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable HTTP/HTTPS flow control.
- Enabling HTTP/HTTPS flow control for a node
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.http.enabled": true,
- "flowcontrol.http.allow": ["192.168.0.1/24", "192.168.2.1/24"],
- "flowcontrol.http.deny": "192.168.1.1/24",
- "flowcontrol.http.concurrent": 1000,
- "flowcontrol.http.newconnect": 1000,
- "flowcontrol.http.warmup_period": 0
- }
-}
-
If all parameters are set to null, they will be restored to default values.
-
- - Disabling HTTP/HTTPS flow control for a node
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.http.enabled": false
- }
-}
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0194.html b/docs/css/umn/css_01_0194.html
deleted file mode 100644
index 20bc50b7..00000000
--- a/docs/css/umn/css_01_0194.html
+++ /dev/null
@@ -1,104 +0,0 @@
-
-
-Memory Flow Control
-Context
Elasticsearch provides a circuit breaker, which will terminate requests or return the error code 429 if the memory usage exceeds its threshold. However, the circuit breaker rejects a request only after the node reads the entire request, which occupies heap memory. To prevent a request from being fully received by a node before the request is rejected, you can control the client traffic based on the real-time status of the node heap memory.
-
-
Configuring Parameters
The following table describes memory flow control parameters.
-
-
Table 1 Memory flow control parametersParameter
- |
-Type
- |
-Description
- |
-
-
-flowcontrol.memory.enabled
- |
-Boolean
- |
-Whether to enable memory flow control. After this function is enabled, the memory usage is continuously monitored. The value can be:
-- true
- false (default value)
- |
-
-flowcontrol.memory.heap_limit
- |
-String
- |
-Maximum global heap memory usage of a node. If the value of this parameter is exceeded, traffic backpressure is performed.
-Value range: 10%–100%
-Default value: 90%
- |
-
-flowcontrol.holding.in_flight_factor
- |
-Float
- |
-Backpressure release factor. The principle is similar to that of the circuit breaker parameter network.breaker.inflight_requests.overhead. When the memory usage reaches the limit, a larger value indicates stronger backpressure. The write traffic will be limited.
-Value range: ≥ 0.5
-Default value: 1.0
- |
-
-flowcontrol.holding.max
- |
-TimeValue
- |
-Maximum delay of each request. If the delay exceeds the value of this parameter, you can disconnect the request backpressure or disconnect the request link. For details, see the configuration of flowcontrol.holding.max_strategy.
-Value range: ≥ 15s
-Default value: 60s
- |
-
-flowcontrol.holding.max_strategy
- |
-String
- |
-Policy after the maximum delay time is exceeded. The value can be:
-- keep (default value): If the heap memory is still high, continue the backpressure. The server determines when to execute the request based on the real-time memory.
- soft: The requests will be executed even if the heap memory is still high. The inFlight circuit breaker will determine whether to execute or reject the requests.
- hard: If the heap memory is still high, requests will be discarded and the client connection of the requests will be disconnected.
- |
-
-flowcontrol.memory.once_free_max
- |
-String
- |
-Maximum memory that can be opened at a time for a suspended request queue. This parameter is used to prevent a cluster from being entirely suspended due to temporary low memory under high pressure.
-Value range: 1 to 50
-Default value: 10%
- |
-
-flowcontrol.memory.nudges_gc
- |
-Boolean
- |
-Whether to trigger garbage collection to ensure write stability when the write pressure is too high. (The backpressure connection pool is checked every second. The write pressure is regarded high if all the existing connections are blocked and new write requests cannot be released.) The value can be:
-- true (default value)
- false
- |
-
-
-
-
-
- flowcontrol.memory.enabled and flowcontrol.memory.heap_limit are the most important parameters. enabled indicates the memory flow control switch, and heap_limit indicates the heap memory threshold of a node.
- The default value 90% of flowcontrol.memory.heap_limit is a conservative threshold. When the heap memory usage is greater than 90%, the system stops reading large requests that exceed 64 KB from the client until the heap memory decreases. If the heap memory decreases to 85%, the maximum client data that can be read is 5% of the maximum heap memory. If the heap memory usage has been higher than 90% for a long time, client connection requests cannot be read. In this case, the GC algorithm is triggered to perform garbage collection until the heap memory usage is lower than the threshold.
- Generally, you can set the flowcontrol.memory.heap_limit threshold to 80% or less to ensure that the node has certain heap memory for operations besides data writing, such as Elasticsearch query and segment merge.
-
-
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable memory flow control.
- Enable memory flow control
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.memory.enabled": true,
- "flowcontrol.memory.heap_limit": "80%"
- }
-}
- - Disable cluster memory flow control
PUT /_cluster/settings
-{
- "persistent": {
- "flowcontrol.memory.enabled": false
- }
-}
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0195.html b/docs/css/umn/css_01_0195.html
deleted file mode 100644
index 2938d09f..00000000
--- a/docs/css/umn/css_01_0195.html
+++ /dev/null
@@ -1,51 +0,0 @@
-
-
-Request Sampling
-Context
Request sampling can record the access of client IP addresses and the type of requests from the client. Based on the statistics, you can identify the access traffic of client IP addresses and analyze the client write and query requests.
-
-
Table 1 Request statistics parametersParameter
- |
-Type
- |
-Description
- |
-
-
-flowcontrol.log.access.enabled
- |
-Boolean
- |
-Whether to collect statistics on the IP addresses of clients that accessed the ES cluster recently and the number of requests. The value can be:
-- true
- false (default value)
- |
-
-flowcontrol.log.access.count
- |
-Integer
- |
-Number of client IP addresses that accessed a cluster recently.
-Value range: 0–100
-Default value: 10
- |
-
-flowcontrol.log.file.enabled
- |
-Boolean
- |
-Whether to record the log details of each request to the background log file. The value can be:
-- true
- false (default value)
- |
-
-
-
-
-
- IP address statistics switches control whether to collect request type statistics and whether to enable logging.
- flowcontrol.log.access.enabled controls whether to collect statistics on client requests, including bulk write, search, and msearch requests.
- flowcontrol.log.file.enabled is the log access switch. Request details can be directly recorded in log files for audit analysis.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0196.html b/docs/css/umn/css_01_0196.html
deleted file mode 100644
index 7712650a..00000000
--- a/docs/css/umn/css_01_0196.html
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
-One-click Traffic Blocking
-You can block all connections in one click, except the connections that passes through O&M APIs, to handle unexpected traffic burst and quickly recover your cluster.
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable one-click traffic blocking.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0198.html b/docs/css/umn/css_01_0198.html
deleted file mode 100644
index d5bcce2d..00000000
--- a/docs/css/umn/css_01_0198.html
+++ /dev/null
@@ -1,107 +0,0 @@
-
-
-Access Statistics and Traffic Control Information Query
-Flow control can be implemented via an independent API.
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run the commands to query traffic control information.
- Check the traffic control status of all nodes.
GET /_nodes/stats/filter/v2
- - View traffic control details of all nodes.
GET /_nodes/stats/filter/v2?detail
- - View the traffic control status of a specific node.
GET /_nodes/{nodeId}/stats/filter/v2
-{nodeId} indicates the ID of the node you want to check.
-Example response:
-{
- "_nodes" : {
- "total" : 1,
- "successful" : 1,
- "failed" : 0
- },
- "cluster_name" : "css-xxxx",
- "nodes" : {
- "d3qnVIpPTtSoadkV0LQEkA" : {
- "name" : "css-xxxx-ess-esn-1-1",
- "host" : "192.168.x.x",
- "timestamp" : 1672236425112,
- "flow_control" : {
- "http" : {
- "current_connect" : 52,
- "rejected_concurrent" : 0,
- "rejected_rate" : 0,
- "rejected_black" : 0,
- "rejected_breaker" : 0
- },
- "access_items" : [
- {
- "remote_address" : "10.0.0.x",
- "search_count" : 0,
- "bulk_count" : 0,
- "other_count" : 4
- }
- ],
- "holding_requests" : 0
- }
- }
- }
-}
-
-Table 1 Response parametersParameter
- |
-Description
- |
-
-
-current_connect
- |
-Number of HTTP connections of a node, which is recorded even if flow control is disabled. This value is equal to the current_open value of GET /_nodes/stats/http API. It includes the current client connections of nodes.
- |
-
-rejected_concurrent
- |
-Number of concurrent connections rejected during HTTP flow control. This value is not cleared when HTTP flow control is disabled.
- |
-
-rejected_rate
- |
-Number of new connections rejected during HTTP flow control. This value is not cleared when HTTP flow control is disabled.
- |
-
-rejected_black
- |
-Number of requests rejected based on the blacklist during HTTP flow control. This value is not cleared when HTTP flow control is disabled.
- |
-
-rejected_breaker
- |
-Number of rejected new connections after one-click traffic blocking is enabled.
- |
-
-remote_address
- |
-IP addresses and the number of requests.
- |
-
-search_count
- |
-Number of times that a client accessed a database using _search and _msearch.
- |
-
-bulk_count
- |
-Number of times that a client accessed a database using _bulk.
- |
-
-other_count
- |
-Number of times that a client accessed a database using other requests.
- |
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0199.html b/docs/css/umn/css_01_0199.html
deleted file mode 100644
index ac53f6b5..00000000
--- a/docs/css/umn/css_01_0199.html
+++ /dev/null
@@ -1,159 +0,0 @@
-
-
-Temporary Access Statistics Logs
-Context
You can check access logs in either of the following ways:
-
- Enable and check access logs via an independent API. Configure the API parameters to record the access log time and size. The access log content is returned through a REST API.
- Print access logs. Your access logs are printed as files in backend logs. To enable this method, set the flowcontrol.log.file.enabled configuration item in 16.5.4.
-
The following table describes access log parameters.
-
-
Table 1 Access log parametersParameter
- |
-Type
- |
-Description
- |
-
-
-duration_limit
- |
-String
- |
-Duration recorded in an access log.
-Value range: 10 to 120
-Unit: s
-Default value: 30
- |
-
-capacity_limit
- |
-String
- |
-Size of an access log. After access logging is enabled, the size of recorded requests is checked. If the size exceeds the value of this parameter, the access logging stops.
-Value range: 1 to 5
-Unit: MB
-Default value: 1
- |
-
-
-
-
-
Access logging stops if either duration_limit or capacity_limit reaches the threshold.
-
-
-
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable access logs.
- Enable access logs for all nodes in a cluster.
PUT /_access_log?duration_limit=30s&capacity_limit=1mb
- - Enable access logs for a node in a cluster.
PUT /_access_log/{nodeId}?duration_limit=30s&capacity_limit=1mb
-{nodeId} indicates the ID of the node where you want to enable access logs.
-
- - View access logs.
-
- Run the following commands to delete access logs.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0200.html b/docs/css/umn/css_01_0200.html
deleted file mode 100644
index 6a56de32..00000000
--- a/docs/css/umn/css_01_0200.html
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-Flow Control
-
-
-
diff --git a/docs/css/umn/css_01_0201.html b/docs/css/umn/css_01_0201.html
deleted file mode 100644
index e1f93db0..00000000
--- a/docs/css/umn/css_01_0201.html
+++ /dev/null
@@ -1,70 +0,0 @@
-
-
-Changing AZs
-CSS supports cross-AZ deployment. You can add an AZ to obtain more resources or improve cluster availability, and can migrate your current AZ to one with higher specifications. This section describes how to add or migrate your AZs.
-
Description
You can
Add AZ or
Migrate AZ.
- Add AZ: Add one or two AZs to a single-AZ cluster, or add an AZ to a dual-AZ cluster to improve cluster availability.
- Migrate AZ: Completely migrate data from the current AZ to another AZ that has sufficient resources.
-
-
-
Prerequisites
- Ensure that an AZ with sufficient resources exists.
- The target cluster is available and has no tasks in progress.
- Make sure that no non-standard operations have been performed in the cluster. If you have made non-standard modifications, such as modifying return routes, system parameters, and Kibana configurations, these modifications will be lost after the AZ change and your services may be affected.
-
-
Constraints
- To ensure service continuity, the total number of data nodes and cold data nodes in a cluster must be greater than or equal to 3.
- During the change, nodes are brought offline one by one and then new nodes are created. Ensure that the disk capacity of other nodes can store all the data of the node after a single node is brought offline.
- To prevent backup allocation failures after a node is brought offline during the change, ensure that the maximum number of primary and standby index shards of an index can be allocated to the remaining data nodes and cold data nodes. That is, the maximum number of primary and standby shards of an index plus 1 is less than or equal to the total number of data nodes and cold data nodes in the current cluster.
- You are advised to back up data before the change to prevent data loss caused by upgrade faults.
- Before a change completes, some nodes may have been moved to a new AZ. In this case, the AZs before and after the change are both displayed. After the change succeeds, the new AZs and their nodes will be displayed properly.
- When adding AZs, the current AZ must be retained in the change. When adding one or two AZs to a single-AZ cluster, you must change AZs for all nodes at the same time. When adding an AZ to a dual-AZ cluster, you can change AZs for a single type of nodes or all nodes in a cluster at a time. For example, in a cluster using the dual-AZ architecture, you can use the three-AZ architecture for master nodes alone. During HA modification, the nodes with the smallest configurations are modified to rebuild the cluster. After the HA modification is complete, the YML configuration of the nodes that are not modified is also updated. You need to restart the cluster to make the modification take effect.
- When migrating an AZ, you can select only one target AZ. You can migrate AZs for a single type of nodes or all nodes in a cluster at a time. For example, in a cluster with two AZs, you can migrate the AZ of the master node to the other AZ. After adding AZs, you need to restart the cluster to make the modification take effect.
-
-
Procedure
- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- Click the Change AZ tab.
- On the Change AZ page, set parameters.
-
Table 1 Parameters for changing AZsParameter
- |
-Description
- |
-
-
-Operation Type
- |
-- Add AZ: Add one or two AZs to a single-AZ cluster, or add an AZ to a dual-AZ cluster.
During HA modification, the nodes with the smallest configurations are modified to rebuild the cluster. After the HA modification is complete, the YML configuration of the nodes that are not modified is also updated. You need to restart the cluster to make the modification take effect.
- - Migrate AZ: Migrate data from one AZ to another.
After adding AZs, you need to restart the cluster to make the modification take effect.
-
- |
-
-Node Type
- |
-Select a type of node or All nodes to change their AZ.
- NOTE: When adding one or two AZs to a single-AZ cluster, you can only select All nodes to change AZs for all nodes at a time.
-
- |
-
-Current AZ
- |
-Current AZ of a cluster
- |
-
-Target AZ
- |
-Target AZ.
-- Add AZ: Select up to three AZs, which must include all your current AZs.
- Migrate AZ: Select only one target AZ, which cannot be your current AZ.
- |
-
-Agency
- |
-Select an IAM agency to grant the current account the permission to change AZs.
-If no agencies are available, click Create IAM Agency to go to the IAM console and create an agency.
- NOTE: The selected agency must be authorized with the Tenant Administrator or VPC Administrator policy.
-
- |
-
-
-
-
- - Click Submit. Determine whether to check for the backup of all indexes and click OK to start the change.
Figure 1 Checking full index snapshots
- - The current AZ change task is displayed in the task list. If the task status is Running, expand the task list and click View Progress to view the progress details.
Figure 2 Viewing the task progress
-If the task status is Failed, you can retry or terminate the task.
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0202.html b/docs/css/umn/css_01_0202.html
deleted file mode 100644
index 515ccc48..00000000
--- a/docs/css/umn/css_01_0202.html
+++ /dev/null
@@ -1,85 +0,0 @@
-
-
-Monitoring OBS Operations
-To clearly display the operations of the storage and compute decoupling plugin in OBS, the real-time OBS rate metric is added to CSS and recorded in the system index.
-
Prerequisite
This feature is available at Elasticsearch of versions 7.6.2 and 7.10.2 and Opensearch clusters created after March 2023.
-
-
-
GET _frozen_stats/obs_rate API
- Calculation method: The average OBS operation rate in the last 5 seconds is calculated every 5 seconds.
- Example request:
GET _frozen_stats/obs_rate
-GET _frozen_stats/obs_rate/{nodeId}
-{nodeId} indicates the ID of the node whose OBS operation rate you want to query.
- - Example response:
{
- "_nodes" : {
- "total" : 1,
- "successful" : 1,
- "failed" : 0
- },
- "cluster_name" : "elasticsearch",
- "nodes" : {
- "dflDvcSwTJ-fkiIlT2zE3A" : {
- "name" : "node-1",
- "transport_address" : "127.0.0.1:9300",
- "host" : "127.0.0.1",
- "ip" : "127.0.0.1",
- "update_time" : 1671777600482, // Time when the current statistics are updated.
- "obs_rate" : {
- "list_op_rate" : 0.0, // Rate of OBS list operations. Unit: times/s.
- "get_meta_op_rate" : 0.0, // Rate of OBS get meta operations. Unit: times/s.
- "get_obj_op_rate" : 0.0, // Rate of OBS get operations. Unit: times/s.
- "put_op_rate" : 0.0, // Rate of OBS put operations. Unit: times/s.
- "obs_total_op_rate" : 0.0, // Rate of all OBS operations. The unit is times/s.
- "obs_upload_rate" : "0.0 MB/s", // Data upload rate of OBS, in MB/s.
- "obs_download_rate" : "0.0 MB/s" // Data download rate of OBS, in MB/s.
- }
- }
- }
- }
-
-
-
System Index
- System index name: .freeze_obs_rate-YYYY.mm.dd.
- Example: .freeze_obs_rate-2023.01.23
The default retention period of indexes is 30 days.
-
-
-
-
Configuration Item
-
Configuration Item
- |
-Type
- |
-Scope
- |
-Can Be Dynamically Modified
- |
-Description
- |
-
-low_cost.obs_rate_index.evict_time
- |
-String
- |
-node
- |
-Yes
- |
-The retention period of the .freeze_obs_rate-YYYY.mm.dd index.
-- Value range: 1d to 365d
- Default value: 30d
- Unit: day
- |
-
-
-
-
-
-
For example, run the following command to modify the retention period of the .freeze_obs_rate-YYYY.mm.dd index:
-
PUT _cluster/settings
- {
- "persistent": {
- "low_cost.obs_rate_index.evict_time": "7d"
- }
- }
-
-
-
diff --git a/docs/css/umn/css_01_0207.html b/docs/css/umn/css_01_0207.html
deleted file mode 100644
index fa23cd92..00000000
--- a/docs/css/umn/css_01_0207.html
+++ /dev/null
@@ -1,31 +0,0 @@
-
-
-
- Elasticsearch
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0210.html b/docs/css/umn/css_01_0210.html
deleted file mode 100644
index b175295b..00000000
--- a/docs/css/umn/css_01_0210.html
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-
- Accessing an Elasticsearch Cluster
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0211.html b/docs/css/umn/css_01_0211.html
deleted file mode 100644
index c6989001..00000000
--- a/docs/css/umn/css_01_0211.html
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-
- Configuring an Elasticsearch Cluster
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0212.html b/docs/css/umn/css_01_0212.html
deleted file mode 100644
index 593cd69e..00000000
--- a/docs/css/umn/css_01_0212.html
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
- Managing Plugins
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0227.html b/docs/css/umn/css_01_0227.html
deleted file mode 100644
index 2229d530..00000000
--- a/docs/css/umn/css_01_0227.html
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-Enhanced Import Performance
-
-
-
diff --git a/docs/css/umn/css_01_0228.html b/docs/css/umn/css_01_0228.html
deleted file mode 100644
index 9fa55630..00000000
--- a/docs/css/umn/css_01_0228.html
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-Context
-Feature Description
CSS provides enhanced data import function. It optimizes bulk route, and speeds up processing through indexes and word segmentation, improving import performance and reduces bulk rejection. This function applies to clusters that contain a large number of index shards and text indexes, and have high import throughput.
-
-
Constraints
Currently, only Elasticsearch clusters of version 7.10.2 support the import performance enhancement.
-
-
Prerequisites
An Elasticsearch cluster of version 7.10.2 has been created on the CSS console.
-
-
Precautions
- After the local shard preferential bulk routing optimization and bulk routing optimization are enabled, data writing is not routed based on IDs, and routing-related functions are restricted. For example, ID-based GET requests may fail. The optimization of local shard preferential bulk routing depends on the random distribution of client bulk requests and the balanced distribution of primary shards.
- If index.native_speed_up (the text index acceleration function) is enabled, index_sorting is not supported.
- Prerequisites for enabling index.native_analyzer:
- The index.native_speed_up function has been enabled.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0229.html b/docs/css/umn/css_01_0229.html
deleted file mode 100644
index 265dcbc0..00000000
--- a/docs/css/umn/css_01_0229.html
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-Instructions
-
-
-
diff --git a/docs/css/umn/css_01_0230.html b/docs/css/umn/css_01_0230.html
deleted file mode 100644
index f148b8fe..00000000
--- a/docs/css/umn/css_01_0230.html
+++ /dev/null
@@ -1,45 +0,0 @@
-
-
-Bulk Route Optimization
-According to the default routing rule of Elasticsearch, data in a bulk request is routed to different shards. When massive data is written and a large number of index shards exist, excessive internal requests forwarding may trigger bulk rejection. In a large-scale cluster, the long tail effect causes a high bulk request latency.
-
You can specify the index.bulk_routing configuration item to enable bulk route optimization. This function reduces the requests that need to be internally forwarded. For clusters containing a large number of shards, this function can improve write performance and reduce bulk rejection.
-
Procedure
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools.
- On the Dev Tools page, run the following command:
PUT my_index
-{
- "settings": {
- "index.bulk_routing": "local_pack"
- }
-}
-
-Table 1 Values of index.bulk_routingValue
- |
-Description
- |
-
-
-default
- |
-The default routing mechanism of Elasticsearch is used. Records in a bulk request are split and routed independently.
- |
-
-pack
- |
-Data of a single bulk request is randomly routed to the same shard.
- |
-
-local_pack
- |
-The data of a single bulk request is routed to the local shard of the data node that receives the bulk request. If the node does not contain the corresponding index shard, the data is randomly routed to another node that contains the index shard.
- |
-
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0231.html b/docs/css/umn/css_01_0231.html
deleted file mode 100644
index 287afe48..00000000
--- a/docs/css/umn/css_01_0231.html
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-Bulk Aggregation Optimization
-You can specify the index.aggr_perf_batch_size configuration item to enable or disable batch import optimization. After the batch import function is enabled, documents in bulk requests are written in batches. This function reduces the overhead of memory application, application lock, and other calls, improving data import performance.
-
The value range of index.aggr_perf_batch_size is [1, Integer.MAX_VALUE]. The default value is 1, indicating that the batch import function is disabled. If the value is greater than 1, the batch import function is enabled and the value of MIN(bulk_doc_size, aggr_perf_batch_size) indicates the batch size.
-
-
Procedure
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools.
- On the Dev Tools page, run the following command:
PUT my_index
-{
- "settings": {
- "index.aggr_perf_batch_size": "128"
- }
-}
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0232.html b/docs/css/umn/css_01_0232.html
deleted file mode 100644
index 5b34f812..00000000
--- a/docs/css/umn/css_01_0232.html
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-Text Index Acceleration
-- You can configure index.native_speed_up to enable or disable text index acceleration. This function optimizes the index process and memory usage to accelerate index building for text fields (text and keyword).
- You can configure index.native_analyzer to enable or disable word segmentation acceleration. For texts that require common word segmentation, you can use the analyzer to accelerate word segmentation.
-
Procedure
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools.
- On the Dev Tools page, run the following command:
PUT my_index
-{
- "settings": {
- "index.native_speed_up": true,
- "index.native_analyzer": true
- },
- "mappings": {
- "properties": {
- "my_field": {
- "type": "text"
- }
- }
- }
-}
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0233_0.html b/docs/css/umn/css_01_0233_0.html
deleted file mode 100644
index b14428e3..00000000
--- a/docs/css/umn/css_01_0233_0.html
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-Optimization of Other Parameters
-After the import performance is enhanced, the number of index merge tasks increases accordingly. You can adjust the following configuration to reduce the impact of merge task overhead on the import performance:
-
You can increase the value of index.merge.scheduler.max_thread_count to increase the number of shard merge threads and reduce the traffic limit on data import. The default value is 4 and you are advised to set it to 8.
-
Procedure
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools.
- On the Dev Tools page, run the following command:
PUT my_index
-{
- "settings": {
- "index.merge.scheduler.max_thread_count": 8
- }
-}
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0234.html b/docs/css/umn/css_01_0234.html
deleted file mode 100644
index 629a51d3..00000000
--- a/docs/css/umn/css_01_0234.html
+++ /dev/null
@@ -1,44 +0,0 @@
-
-
-Performance Data
-- Test environment
- Cluster: 3 M6 ECSs (8 vCPUs | 64 GB memory)
- Data: open-source web server access logs and internal service dataset (dns_logs)
- Configuration: 120 shards, no replicas, and all the enhanced features enabled
- - Test result
-
Type
- |
-Performance (Before)
- |
-Performance (After)
- |
-Improved By
- |
-
-
-Open-source dataset
- |
-85 Mbit/s
- |
-131 Mbit/s
- |
-54%
- |
-
-Service dataset
- |
-124 Mbit/s
- |
-218 Mbit/s
- |
-76%
- |
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0246.html b/docs/css/umn/css_01_0246.html
deleted file mode 100644
index 2ea2c243..00000000
--- a/docs/css/umn/css_01_0246.html
+++ /dev/null
@@ -1,369 +0,0 @@
-
-
-Monitoring Metrics
-Function
This topic describes CSS metrics that can be monitored by Cloud Eye as well as their namespaces and dimensions. You can search for the monitoring metrics and alarms generated for CSS by using the Cloud Eye console or calling APIs.
-
-
-
Monitoring Metrics
- Table 1 describes the monitoring metrics of CSS clusters.
- Monitored object: Cloud service nodes of CSS clusters
- Monitoring period (original metric): 1 minute
-
Accumulated value: The value is accumulated from the time when a node is started. After the node is restarted, the value is reset to zero and accumulated again.
-
-
-
Table 1 CSS metricsMetric ID
- |
-Metric
- |
-Description
- |
-Value Range
- |
-Monitored Target
- |
-Monitoring Interval (Raw Data)
- |
-
-
-jvm_heap_usage
- |
-JVM Heap Usage
- |
-JVM heap memory usage of a node.
-Unit: %
- |
-0-100%
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-cpu_usage
- |
-CPU Usage
- |
-CPU usage.
-Unit: %
- |
-0-100%
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-load_average
- |
-Average Load
- |
-Average number of queuing tasks per minute on a node
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-open_file_descriptors
- |
-Open File Descriptors
- |
-Number of opened file descriptors on a node
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-max_file_descriptors
- |
-Max. Allowed File Descriptors
- |
-Maximum number of allowed file descriptors
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-thread_pool_write_queue
- |
-Tasks in Write Queue
- |
-Number of job queues in a write thread pool
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-thread_pool_search_queue
- |
-Tasks in Search Queue
- |
-Number of job queues in a search thread pool
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-sum_thread_pool_force_merge_queue
- |
-Tasks in ForceMerge Queue
- |
-Number of job queues in a force merge thread pool
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-sum_thread_pool_write_rejected
- |
-Rejected Tasks in Write Queue
- |
-Number of rejected jobs in a write thread pool
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-sum_thread_pool_search_rejected
- |
-Rejected Tasks in Search Queue
- |
-Number of rejected jobs in a search thread pool
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-sum_thread_pool_force_merge_rejected
- |
-Rejected Tasks in ForceMerge Queue
- |
-Number of rejected jobs in a force merge thread pool
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-sum_thread_pool_write_threads
- |
-Size of Write Thread Pool
- |
-Size of a write thread pool
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-sum_thread_pool_search_threads
- |
-Size of Search Thread Pool
- |
-Size of a search thread pool
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-sum_thread_pool_force_merge_threads
- |
-Size of ForceMerge Thread Pool
- |
-Size of a force merge thread pool
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-free_fs_size
- |
-Available Size of File Systems
- |
-Available size of file systems in a CSS cluster
-Unit: byte
- |
-≥ 0 bytes
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-total_fs_size
- |
-Total Size of File Systems
- |
-Total size of file systems in a CSS cluster
-Unit: byte
- |
-≥ 0 bytes
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-sum_jvm_old_gc_count
- |
-Total GCs of Old-Generation JVM
- |
-Number of old-generation garbage collection times
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-sum_jvm_old_gc_time
- |
-Total GC Duration of Old-Generation JVM
- |
-Old-generation garbage collection duration.
-Unit: ms
- |
-≥ 0 ms
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-sum_jvm_young_gc_count
- |
-Total GCs of Young-Generation JVM
- |
-Number of young-generation garbage collection times
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-sum_jvm_young_gc_time
- |
-GC Duration of Young-Generation JVM
- |
-Young-generation garbage collection duration.
-Unit: ms
- |
-≥ 0 ms
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-mem_free_in_bytes
- |
-Available Memory
- |
-Unused memory space of a node.
-Unit: byte
- |
-≥ 0 bytes
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-mem_free_percent
- |
-Available Memory Percentage
- |
-Percentage of unused memory space on a node.
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-mem_used_in_bytes
- |
-Used Memory
- |
-Used memory space of a node.
-Unit: byte
- |
-≥ 0 bytes
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-current_opened_http_count
- |
-Currently Open HTTP Connections
- |
-Number of HTTP connections on a node
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-total_opened_http_count
- |
-Total Open HTTP Connections
- |
-Total number of HTTP connections on a node
- |
-≥ 0
- |
-CSS cluster - cloud service node
- |
-1 minute
- |
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0266.html b/docs/css/umn/css_01_0266.html
deleted file mode 100644
index 45a30140..00000000
--- a/docs/css/umn/css_01_0266.html
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
-Restoring Data
-You can use existing snapshots to restore the backup index data to a specified cluster.
-
Prerequisites
To use the function of creating or restoring snapshots, the account or IAM user logging in to the CSS management console must have both of the following permissions:
-
- Tenant Administrator for project OBS in region Global service
- CSS Administrator in the current region
-
-
Precautions
- Cluster snapshots will increase the CPU usage and disk I/O. You are advised to take cluster snapshots during off-peak hours.
- If snapshots have been stored in the OBS bucket, the OBS bucket cannot be changed. You can disable the snapshot function, enable the snapshot function, and specify a new OBS bucket. After you disable the snapshot function, you cannot use previously created snapshots to restore the cluster.
- If a cluster is in the Unavailable status, you can use the cluster snapshot function only to restore clusters and view existing snapshot information.
- During backup and restoration of a cluster, you can perform only certain operations, including scaling out, accessing Kibana, viewing metric, and deleting other snapshots of clusters. However, you cannot perform the following operations: restarting or deleting the cluster, deleting a snapshot that is in the Creating or Restoring status, and creating or restoring another snapshot. If a snapshot is being created or restored for a cluster, any automatic snapshot creation task initiated for the cluster will be canceled.
- Cluster data cannot be queried during snapshot restoration.
- If you restore a CSS cluster snapshot to another cluster, indexes with the same name in the destination cluster will be overwritten. If the snapshot and the destination cluster use different shards, the indexes with the same name will not be overwritten.
- The version of the destination cluster used for restoration must be the same as or higher than that of the source cluster.
-
-
Restoring Data
You can use snapshots whose Snapshot Status is Available to restore cluster data. The stored snapshot data can be restored to other clusters.
-
Restoring data will overwrite current data in clusters. Therefore, exercise caution when restoring data.
-
- In the Snapshots area, locate the row that contains the snapshot you want to restore and click Restore in the Operation column.
- On the Restore page, set restoration parameters.
Index: Enter the name of the index you want to restore. If you do not specify any index name, data of all indexes will be restored. The value can contain 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?) are not allowed. You can use the asterisk (*) to match multiple indexes. For example, index* indicates that all indexes with the prefix index in snapshots are restored.
-Rename Pattern: Enter a regular expression. Indexes that match the regular expression are restored. The default value index_(.+) indicates restoring data of all indexes. The value contains 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?,) are not allowed.
-Rename Replacement: Enter the index renaming rule. The default value restored_index_$1 indicates that restored_ is added in front of the names of all restored indexes. The value contains 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?,) are not allowed.
-
The Rename Pattern and Rename Replacement take effect only when they are configured at the same time.
-
-Cluster: Select the cluster that you want to restore. You can select the current cluster or others. However, you can only restore the snapshot to clusters whose status is Available. If the status of the current cluster is Unavailable, you cannot restore the snapshot to the current cluster. When you restore data to another cluster, the version of the target cluster must be later than or equal to that of the current cluster. If the target cluster you selected has an index with the same name as the original cluster, data in the index will be overwritten after the restoration. Exercise caution when performing this operation.
-Figure 1 Restoring a snapshot
- - Click OK. If restoration succeeds, Task Status of the snapshot in the snapshot list will change to Restoration succeeded, and the index data is generated again according to the snapshot information.
Figure 2 Successful restoration
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0267.html b/docs/css/umn/css_01_0267.html
deleted file mode 100644
index f51670e4..00000000
--- a/docs/css/umn/css_01_0267.html
+++ /dev/null
@@ -1,65 +0,0 @@
-
-
-Managing Automatic Snapshot Creation
-Snapshots are automatically created at a specified time each day according to the rules you create. You can enable or disable the automatic snapshot creation function and set the automatic snapshot creation policy.
-
Prerequisites
To use the function of creating or restoring snapshots, the account or IAM user logging in to the CSS management console must have both of the following permissions:
-
- Tenant Administrator for project OBS in region Global service
- CSS Administrator in the current region
-
-
Precautions
- When creating a backup for the first time, you are advised to back up data of all indexes.
- Cluster snapshots will increase the CPU usage and disk I/O. You are advised to take cluster snapshots during off-peak hours.
- Before creating a snapshot, you need to perform basic configurations, including configuring the OBS bucket for storing snapshots and IAM agency used for security authentication.
- If there are available snapshots in the snapshot list when you configure the OBS bucket for storing cluster snapshots for the first time, you cannot change the bucket for snapshots that are subsequently created automatically or manually. Exercise caution when you configure the OBS bucket.
- If snapshots have been stored in the OBS bucket, the OBS bucket cannot be changed. You can disable the snapshot function, enable the snapshot function, and specify a new OBS bucket. After you disable the snapshot function, you cannot use previously created snapshots to restore the cluster.
- If a cluster is in the Unavailable status, you can use the cluster snapshot function only to restore clusters and view existing snapshot information.
- During backup and restoration of a cluster, you can perform only certain operations, including scaling out, accessing Kibana, viewing metric, and deleting other snapshots of clusters. However, you cannot perform the following operations: restarting or deleting the cluster, deleting a snapshot that is in the Creating or Restoring status, and creating or restoring another snapshot. If a snapshot is being created or restored for a cluster, any automatic snapshot creation task initiated for the cluster will be canceled.
- The first snapshot of a cluster is a full snapshot, and subsequent snapshots are incremental snapshots. CSS snapshot files depend on each other.
-
-
Managing Automatic Snapshot Creation
- In the CSS navigation pane on the left, click Clusters.
- On the Clusters page that is displayed, click the name of the target cluster. In the navigation pane on the left, choose Cluster Snapshots.
- On the displayed Cluster Snapshots page, click the icon to the right of Cluster Snapshot to enable the cluster snapshot function.
- Enable the cluster snapshot function. OBS buckets and IAM agencies are automatically created to store snapshots. The automatically created OBS bucket and IAM agency are displayed on the page. You can also click
on the right of Basic Configuration to edit the configuration. To ensure the security of snapshot data, you can select a key to encrypt the snapshot.
-Table 1 Cluster snapshot parameterParameter
- |
-Description
- |
-
-
-OBS Bucket
- |
-Select an OBS bucket for storing snapshots from the drop-down list box. You can also click Create Bucket on the right to create an OBS bucket. For details, see Creating a Bucket.
-The created or existing OBS bucket must meet the following requirements:
-- Storage Class is Standard or Warm.
- Region must be the same as that of the created cluster.
- |
-
-Backup Path
- |
-Storage path of the snapshot in the OBS bucket.
-The backup path configuration rules are as follows: - The backup path cannot contain the following characters: \:*?"<>|
- The backup path cannot start with a slash (/).
- The backup path cannot start or end with a period (.).
- The total length cannot exceed 1,023 characters.
-
- |
-
-IAM Agency
- |
-IAM agency authorized by the current account for CSS to access or maintain data stored in OBS You can also click Create IAM Agency on the right to create an IAM agency. For details, see Creating an Agency.
-The created or existing IAM agency must meet the following requirements:
-- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- The agency must have the Tenant Administrator permission for the OBS(S3) project in OBS(S3).
- |
-
-Snapshot Encryption
- |
-Indicates whether to enable the snapshot encryption function. Enabling the snapshot encryption function ensures the security of your snapshot data.
-After the snapshot encryption function is enabled, select a key from the Key Name drop-down list. If no key is available, click Create/View Key to switch to the KMS management console to create or modify a key. For details, see Creating a CMK.
-- You cannot use default CMKs whose aliases end with /default in KMS to encrypt snapshots.
- If a snapshot has been stored in the OBS bucket, you cannot modify the parameters used for encrypting the snapshot.
- If the key used for encryption is in the Pending deletion or disable status, you cannot perform backup and restoration operations on the cluster. Specifically, you cannot create new snapshots for the cluster, or use existing snapshots to restore clusters. In this case, switch to the KMS management console and change the status of the target key to enable so that backup and restore operations are allowed on the cluster.
- If you delete the key used for encryption, you cannot perform backup and restore operations on the cluster. In addition, you cannot restore the deleted key. Therefore, exercise caution when deleting a key. If the key is deleted or is in the Pending deletion or disable state, automatic snapshot creation is allowed based on the configured snapshot policy. However, all automatic snapshot creation tasks will fail, and the failed tasks are displayed in the failed task list in the Failed Tasks dialog box. In such scenario, you are advised to disable the automatic snapshot creation function.
- |
-
-
-
-
-Figure 1 Edit Basic configuration
- - Enable the automatic snapshot creation function. The Configure Automatic Snapshot Creation dialog box is displayed. If the automatic snapshot creation function is enabled, you can click
on the right of Automatic Snapshot Creation to modify the snapshot policy.
-Figure 2 Automatic snapshot creation
- - Click OK to save the snapshot policy.
Snapshots that are automatically created according to the snapshot policy are displayed in the snapshot list, along with manually created snapshots. You can distinguish them by the Snapshot Type setting. In the upper right corner of the snapshot list, enter the keyword of the snapshot name or snapshot ID to search for the desired snapshots.
-Figure 3 Automatic snapshot creation
- - (Optional) Disable the automatic snapshot creation function.
After you disable the automatic snapshot creation function, the system stops automatic creation of snapshots. If the system is creating a snapshot based on the automatic snapshot creation policy and the snapshot is not yet displayed in the snapshot list, you cannot disable the automatic snapshot creation function. In this case, if you click the button next to Automatic Snapshot Creation, a message is displayed, indicating that you cannot disable the function. You are advised to disable the function after the system completes automatic creation of the snapshot, and the created snapshot is displayed in the snapshot list.
-When disabling the automatic snapshot creation function, you can choose whether to delete the snapshots that have been automatically created by selecting Delete automated snapshots in the displayed dialog box. By default, automatically created snapshots are not deleted.
-- If you do not select Delete automated snapshots, automatically created snapshots are not deleted when you disable the automatic snapshot creation function. You can manually delete them later. For details, see Deleting a Snapshot. If you do not manually delete the automatically created snapshots and enable the automatic snapshot creation function again, then all snapshots with Snapshot Type set to Automated in the snapshot list of the cluster can only be automatically deleted by the system. Specifically, the system automatically deletes snapshots based on the snapshot policy configured when you enable the automatic snapshot creation function again. For example, if you set Retention Period (days) to 10, the system will automatically delete the snapshots that have been retained for more than 10 days.
- If you select Delete automated snapshots, all snapshots with Snapshot Type set to Automated in the snapshot list will be deleted when you disable the automatic snapshot creation function.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0268.html b/docs/css/umn/css_01_0268.html
deleted file mode 100644
index 2bdbd17f..00000000
--- a/docs/css/umn/css_01_0268.html
+++ /dev/null
@@ -1,54 +0,0 @@
-
-
-Manually Creating a Snapshot
-You can manually create a snapshot at any time to back up all data or data of specified indexes.
-
Prerequisites
To use the function of creating or restoring snapshots, the account or IAM user logging in to the CSS management console must have both of the following permissions:
-
- Tenant Administrator for project OBS in region Global service
- CSS Administrator in the current region
-
-
Precautions
- When creating a backup for the first time, you are advised to back up data of all indexes.
- Cluster snapshots will increase the CPU usage and disk I/O. You are advised to take cluster snapshots during off-peak hours.
- Before creating a snapshot, you need to perform basic configurations, including configuring the OBS bucket for storing snapshots and IAM agency used for security authentication.
- If there are available snapshots in the snapshot list when you configure the OBS bucket for storing cluster snapshots for the first time, you cannot change the bucket for snapshots that are subsequently created automatically or manually. Exercise caution when you configure the OBS bucket.
- If snapshots have been stored in the OBS bucket, the OBS bucket cannot be changed. You can disable the snapshot function, enable the snapshot function, and specify a new OBS bucket. After you disable the snapshot function, you cannot use previously created snapshots to restore the cluster.
- If a cluster is in the Unavailable status, you can use the cluster snapshot function only to restore clusters and view existing snapshot information.
- During backup and restoration of a cluster, you can perform only certain operations, including scaling out, accessing Kibana, viewing metric, and deleting other snapshots of clusters. However, you cannot perform the following operations: restarting or deleting the cluster, deleting a snapshot that is in the Creating or Restoring status, and creating or restoring another snapshot. If a snapshot is being created or restored for a cluster, any automatic snapshot creation task initiated for the cluster will be canceled.
- The first snapshot of a cluster is a full snapshot, and subsequent snapshots are incremental snapshots. CSS snapshot files depend on each other.
-
-
Manually Creating a Snapshot
- In the CSS navigation pane on the left, click Clusters.
- On the Clusters page that is displayed, click the name of the target cluster. In the navigation pane on the left, choose Cluster Snapshots.
- On the displayed Cluster Snapshots page, click the icon to the right of Cluster Snapshot to enable the cluster snapshot function.
- Enable the cluster snapshot function. OBS buckets and IAM agencies are automatically created by CSS to store snapshots. The automatically created OBS bucket and IAM agency are displayed on the page. You can also click
on the right of Basic Configuration to edit the configuration. To ensure the security of snapshot data, you can select a key to encrypt the snapshot.
-Table 1 Cluster snapshot parameterParameter
- |
-Description
- |
-
-
-OBS Bucket
- |
-Select an OBS bucket for storing snapshots from the drop-down list box. You can also click Create Bucket on the right to create an OBS bucket. For details, see Creating a Bucket.
-The created or existing OBS bucket must meet the following requirements:
-- Storage Class is Standard or Warm.
- |
-
-IAM Agency
- |
-IAM agency authorized by the current account for CSS to access or maintain data stored in OBS You can also click Create IAM Agency on the right to create an IAM agency. For details, see Creating an Agency.
-The created or existing IAM agency must meet the following requirements:
-- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- The agency must have the Tenant Administrator permission for the OBS(S3) project in OBS(S3).
- |
-
-Snapshot Encryption
- |
-Indicates whether to enable the snapshot encryption function. Enabling the snapshot encryption function ensures the security of your snapshot data.
-After the snapshot encryption function is enabled, select a key from the Key Name drop-down list. If no key is available, click Create/View Key to switch to the KMS management console to create or modify a key. For details, see Creating a CMK.
-- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
- If a snapshot has been stored in the OBS bucket, you cannot modify the parameters used for encrypting the snapshot.
- If the key used for encryption is in the Pending deletion or disable status, you cannot perform backup and restoration operations on the cluster. Specifically, you cannot create new snapshots for the cluster, or use existing snapshots to restore clusters. In this case, switch to the KMS management console and change the status of the target key to enable so that backup and restore operations are allowed on the cluster.
- If you delete the key used for encryption, you cannot perform backup and restore operations on the cluster. In addition, you cannot restore the deleted key. Therefore, exercise caution when deleting a key. If the key is deleted or is in the Pending deletion or disable state, automatic snapshot creation is allowed based on the configured snapshot policy. However, all automatic snapshot creation tasks will fail, and the failed tasks are displayed in the failed task list in the Failed Tasks dialog box. In such scenario, you are advised to disable the automatic snapshot creation function.
- |
-
-
-
-
-Figure 1 Edit Basic configuration
- - After basic configurations are completed, click Create.
-
Figure 2 Create snapshot
- - Click OK.
After the snapshot is created, it will be displayed in the snapshot list. The status Available indicates that the snapshot is created successfully. along with manually created snapshots. You can distinguish them by the Snapshot Type setting. In the upper right corner of the snapshot list, enter the keyword of the snapshot name or snapshot ID to search for the desired snapshots.
-
-
-
-
-
diff --git a/docs/css/umn/css_01_0269.html b/docs/css/umn/css_01_0269.html
deleted file mode 100644
index 03ff9ac8..00000000
--- a/docs/css/umn/css_01_0269.html
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-Index Backup and Restoration
-
-
-
diff --git a/docs/css/umn/css_01_0271.html b/docs/css/umn/css_01_0271.html
deleted file mode 100644
index 19e901c4..00000000
--- a/docs/css/umn/css_01_0271.html
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-Deleting a Snapshot
-If you no longer need a snapshot, delete it to release storage resources. If the automatic snapshot creation function is enabled, snapshots that are automatically created cannot be deleted manually, and the system automatically deletes these snapshots on the half hour after the time specified by Retention Period (days). If you disable the automatic snapshot creation function while retaining the automated snapshots, then you can manually delete them later. If you do not manually delete the automatically created snapshots and enable the automatic snapshot creation function again, then all snapshots with Snapshot Type set to Automated in the snapshot list of the cluster can only be automatically deleted by the system.
-
After a snapshot is deleted, its data cannot be restored. Exercise caution when deleting a snapshot.
-
-
- In the snapshot list, locate the snapshot that you want to delete.
- Click Delete in the Operation column. In the dialog box that is displayed, confirm the snapshot information and click OK.
-
-
-
diff --git a/docs/css/umn/css_02_0001.html b/docs/css/umn/css_02_0001.html
deleted file mode 100644
index a072a5f7..00000000
--- a/docs/css/umn/css_02_0001.html
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-
- FAQs
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0006.html b/docs/css/umn/css_02_0006.html
deleted file mode 100644
index d9d89a59..00000000
--- a/docs/css/umn/css_02_0006.html
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-How Does CSS Ensure Data and Service Security?
-CSS uses network isolation, in addition to various host and data security measures.
-
- Network isolation
The entire network is divided into two planes: service plane and management plane. The two planes are deployed and isolated physically to ensure the security of the service and management networks.
-- Service plane: This is the network plane of the cluster. It provides service channels for users and delivers data definitions, indexing, and search capabilities.
- Management plane: This is the management console, where you manage CSS.
- - Host security
CSS provides the following security measures:
-- The VPC security group ensures the security of the hosts in a VPC.
- Network access control lists (ACLs) allow you to control what data can enter or exit your network.
- The internal security infrastructure (including the network firewall, intrusion detection system, and protection system) monitors all network traffic that enters or exits the VPC through an IPsec VPN.
- - Data security
Multiple replicas, cross-AZ deployment of clusters, and third-party (OBS) backup of index data ensure the security of user data.
-
-
-
-
diff --git a/docs/css/umn/css_02_0007.html b/docs/css/umn/css_02_0007.html
deleted file mode 100644
index 1c503e11..00000000
--- a/docs/css/umn/css_02_0007.html
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
-Which CSS Metrics Should I Focus On?
-Disk usage and cluster health status are two key metrics that you can focus on. You can log in to Cloud Eye and configure alarm rules for these metrics. If alarms are reported, handle them by taking appropriate measures.
-
Configuration examples:
-
- Alarms are reported if the disk usage is higher than or equal to a specified value (for example, 85%) and has reached this value multiple times (for example, 5 times) within a specified time period (for example, 5 minutes).
- Alarms are reported if the value of the cluster health status metric exceeds 0 for multiple times (for example, 5 times) within a specified time period (for example, 5 minutes).
-
Measures:
-
- If disk usage alarms are reported, view available disk space, check whether data can be deleted from cluster nodes or archived to other systems to free up space, or check if you can expand the disk capacity.
- If cluster health status alarms are reported, check whether shard allocation is normal, whether shards have been lost, and check whether the process has been restarted on Cerebro.
-
-
-
diff --git a/docs/css/umn/css_02_0008.html b/docs/css/umn/css_02_0008.html
deleted file mode 100644
index 22fe6e42..00000000
--- a/docs/css/umn/css_02_0008.html
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-What Storage Options Does CSS Provide?
-CSS uses EVS and local disks to store your indices. During cluster creation, you can specify the EVS disk type and specifications (the EVS disk size).
-
- Supported EVS disk types include common I/O, high I/O, and ultra-high I/O.
- The EVS disk size varies depending on the node specifications selected when you create a cluster.
-
-
-
diff --git a/docs/css/umn/css_02_0009.html b/docs/css/umn/css_02_0009.html
deleted file mode 100644
index 1180f4c4..00000000
--- a/docs/css/umn/css_02_0009.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-What Is the Maximum Storage Capacity of CSS?
-You can configure up to 200 nodes for a cluster (each node corresponds to an ECS). The maximum storage capacity of an ECS is the total capacity of EVS disks attached to the ECS. You can calculate the total storage capacity of CSS based on the sizes of EVS disks attached to different ECSs. The EVS disk size is determined by the node specifications selected when you create the cluster.
-
-
-
diff --git a/docs/css/umn/css_02_0010.html b/docs/css/umn/css_02_0010.html
deleted file mode 100644
index 420bdda8..00000000
--- a/docs/css/umn/css_02_0010.html
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-What Can the Disk Space of a CSS Cluster Be Used For?
-You can store the following logs and files:
-
- Log files: Elasticsearch logs
- Data files: Elasticsearch index files
- Other files: cluster configuration files
- OS: 5% storage space reserved for the OS by default
-
-
-
diff --git a/docs/css/umn/css_02_0017.html b/docs/css/umn/css_02_0017.html
deleted file mode 100644
index 7d05bc0a..00000000
--- a/docs/css/umn/css_02_0017.html
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-How Can I Manage CSS?
-You can use any of the following three methods to manage CSS or to use search engine APIs. You can initiate requests based on constructed request messages.
- curl
curl is a command-line tool used to transfer data to or from a given URL. It serves as an HTTP client that can send HTTP requests to the HTTP server and receive response messages. You can also use curl to debug APIs. For more information about curl, visit https://curl.haxx.se/.
- - Code
You can call APIs through code to assemble, send, and process request messages.
- - REST client
Both Mozilla Firefox and Google Chrome provide a graphical browser plugin, the REST client, which you can use to send and process requests.
-– For Mozilla Firefox, see Firefox REST Client.
-– For Google Chrome, see Postman.
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0025.html b/docs/css/umn/css_02_0025.html
deleted file mode 100644
index 53b84471..00000000
--- a/docs/css/umn/css_02_0025.html
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-Why Does My ECS Fail to Connect to a Cluster?
-Perform the following steps to troubleshoot this problem:
-
- Check whether the ECS instance and cluster are in the same VPC.
- If they are, go to 2.
- If they are not, create an ECS instance and ensure that the ECS instance is in the same VPC as the cluster.
- - View the security group rule setting of the cluster to check whether port 9200 (TCP protocol) is allowed or port 9200 is included in the port range allowed in both the outbound and inbound directions.
- If it is allowed, go to 3.
- If it is not allowed, switch to the VPC management console and configure the security group rule of the cluster to allow port 9200 in both the outbound and inbound directions.
- - Check whether the ECS instance has been added to a security group.
- If the instance has been added to a security group, check whether the security group configuration rules are appropriate. You can view the Security Group information on the Basic Information tab page of the cluster. Then, go to step 4.
Figure 1 Viewing security group information
- - If the instance has not been added to the security group, go to the VPC page from the ECS instance details page, select a security group, and add the ECS to the group.
- - Check whether the ECS instance can connect to the cluster.
ssh <Private network address and port number of a node>
If the cluster contains multiple nodes, check whether the ECS can be connected to each node in the cluster.
-
-
-- If the connection is normal, the network is running properly.
- If the connection still fails, contact technical support.
-
-
-
-
diff --git a/docs/css/umn/css_02_0034.html b/docs/css/umn/css_02_0034.html
deleted file mode 100644
index 233c2f38..00000000
--- a/docs/css/umn/css_02_0034.html
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-What Are Regions and AZs?
-Regions and AZs
A region and availability zone (AZ) identify the location of a data center. You can create resources in a specific region and AZ.
-
- A region is a physical data center. Each region is completely independent, and thereby improves fault tolerance and stability. After a resource is created, its region cannot be changed.
- An AZ is a physical location using independent power supplies and networks. Faults in an AZ do not affect other AZs. A region can contain multiple AZs that are physically isolated but networked together. This enables low-cost and low-latency network connections.
-
Figure 1 shows the relationship between regions and AZs.
Figure 1 Regions and AZs
-
-
-
Region Selection
You are advised to select a region close to you or your target users. This reduces network latency and improves the access success rate.
-
-
AZ Selection
When determining whether to deploy resources in the same AZ, consider your application's requirements for disaster recovery (DR) and network latency.
-
- To prioritize DR capabilities, deploy resources in different AZs in the same region.
- To prioritize network latency, deploy resources in the same AZ.
-
-
Regions and Endpoints
Before using an API to call resources, you will need to specify the resource region and endpoint. For details, see "Endpoints" in Cloud Search Service API Reference.
-
-
-
-
diff --git a/docs/css/umn/css_02_0041.html b/docs/css/umn/css_02_0041.html
deleted file mode 100644
index aec38feb..00000000
--- a/docs/css/umn/css_02_0041.html
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-What Data Compression Algorithms Does CSS Use?
-CSS supports two data compression algorithms: LZ4 (by default) and best_compression.
-
- LZ4 algorithm
LZ4 is the default compression algorithm of Elasticsearch. This algorithm can compress and decompress data quickly, but its compression ratio is low.
-LZ4 scans data with a 4-byte window, which slides 1 byte forward at a time. Duplicate data is compressed. This algorithm applies to scenarios where a large amount of data to be read while a small amount of data to be written.
- - best_compression algorithm
This algorithm can be used when a large amount of data is written and the index storage cost is high, such as logs and time sequence analysis. This algorithm can greatly reduce the index storage cost.
-
-
Run the following command to switch the default compression algorithm (LZ4) to best_compression:
PUT index-1
-{
- "settings": {
- "index": {
- "codec": "best_compression"
- }
- }
-}
-
-
The LZ4 algorithm can quickly compress and decompress data while the best_compression algorithm has a higher compression and decompression ratio.
-
-
-
diff --git a/docs/css/umn/css_02_0042.html b/docs/css/umn/css_02_0042.html
deleted file mode 100644
index 66136e83..00000000
--- a/docs/css/umn/css_02_0042.html
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-Why All New Index Shards Are Allocated to the Same Node?
-Possible Cause
The possible causes are as follows:
-
- Shards were unevenly distributed in previous index allocations, and the predominate parameter in the latest indexed shard allocation was balance.shard. To balance the shard distribution across nodes, the new shards were allocated to the node with only a small number of shards.
- After a new node was added to a cluster and before the automatic cluster rebalancing completes, the predominate parameter was balance.shard. The shards of a new index are allocated to the new node, where there are no shards yet.
-
The following two parameters are used to balance the shard allocation in a cluster:
-
cluster.routing.allocation.balance.index (default value: 0.45f)
-
cluster.routing.allocation.balance.shard (default value: 0.55f)
-
- balance.index: A larger value indicates that all the shards of an index are more evenly distributed across nodes. For example, if an index has six shards and there are three data nodes, two shards will be distributed on each node.
- balance.shard: A larger value indicates that all the shards of all the indexes are more evenly distributed across nodes. For example, if index a has two shards, index b has four, and there are three data nodes, two shards will be distributed on each node.
- You can specify both balance.index and balance.shard to balance the shard allocation.
-
-
-
Solution
To prevent the all the shards of an index from being allocated to a single node, use either of the following methods:
-
- To create an index during cluster scale-out, configure the following parameter:
"index.routing.allocation.total_shards_per_node": 2
-That is, allow no more than two shards of an index to be allocated on each node. Determine the maximum number of shards allocated to each node based on the number of data nodes in your cluster and the number of index shards (both primary and secondary).
-
- If too many shards are distributed on only a few nodes, you can move some of the shards to other nodes to balance the distribution. Run the move command of POST _cluster/reroute. The rebalance module will automatically exchange the shard with a shard on the destination node. Determine the values of balance.index and balance.shard as needed.
-
-
-
-
diff --git a/docs/css/umn/css_02_0043.html b/docs/css/umn/css_02_0043.html
deleted file mode 100644
index 1ada87bb..00000000
--- a/docs/css/umn/css_02_0043.html
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-How Do I Query Snapshot Information?
-Prerequisites
The snapshot function has been enabled for the cluster and snapshot information has been configured.
-
-
Querying a Snapshot
- Log in to the CSS management console, and click Clusters in the navigation pane. On the displayed Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the left navigation pane of the Kibana page, click Dev Tools. Click Get to work to switch to the Console page.
Enter the code as required in the left pane, click
to execute the command, and view the result in the right pane.
- - Run the GET _snapshot/_all command to query information about all repositories.
Figure 1 Querying information about all repositories
-- bucket: OBS bucket name
- base_path: Path. It consists of a fixed prefix and a cluster name.
- endpoint: OBS domain name
- region: your region
- - Query snapshot information.
- Run the GET _snapshot/repo_auto/_all command to query the list of all the snapshots in the current repository.
Figure 2 Snapshot information
-- snapshot: snapshot name
- state: snapshot status
- start_time, start_time_in_millis, end_time, and end_time_in_millis: snapshot time
- shards: the number of shards. total indicates the total number of shards. failed indicates the number of failures. successful indicates the number of successes.
- - Run the GET _snapshot/repo_auto/$snapshot-xxx command to query information about a specified snapshot.
- Replace $snapshot-xxx with the actual snapshot name.
- repo_auto is followed by a snapshot name or wildcard characters.
-
- - (Optional) Delete information about a specified snapshot.
To delete a specific snapshot, run the DELETE _snapshot/ repo_auto/$snapshot-xxx command.
-Replace $snapshot-xxx with the actual snapshot name.
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0051.html b/docs/css/umn/css_02_0051.html
deleted file mode 100644
index dd9f4870..00000000
--- a/docs/css/umn/css_02_0051.html
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-General Consulting
-
-
-
diff --git a/docs/css/umn/css_02_0052.html b/docs/css/umn/css_02_0052.html
deleted file mode 100644
index 6e9e25b4..00000000
--- a/docs/css/umn/css_02_0052.html
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-Can I Upgrade a Cluster from an Earlier Version to a Later Version?
-A cluster cannot be directly upgraded. You can purchase a cluster of a later version and migrate your data to it.
-
- Creating a Cluster: Create a cluster of a later version in the region where your current cluster is deployed.
- Migrating a Cluster: Migrate your cluster by backing data up and restoring indexes.
-
-
-
diff --git a/docs/css/umn/css_02_0055.html b/docs/css/umn/css_02_0055.html
deleted file mode 100644
index 86ad80b8..00000000
--- a/docs/css/umn/css_02_0055.html
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-
- Functions
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0058.html b/docs/css/umn/css_02_0058.html
deleted file mode 100644
index f18f3328..00000000
--- a/docs/css/umn/css_02_0058.html
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
-Can Elasticsearch Data Be Migrated Between VPCs?
-Elasticsearch does not support direct data migration between different VPCs. You can use either of the following methods to migrate data.
-
Method 1
Use the backup and restoration function to migrate cluster data.
-
-
Method 2
- Connect the VPC network and establish a VPC peering connection.
- After the network is connected, use Logstash to migrate data.
-
-
-
-
diff --git a/docs/css/umn/css_02_0063.html b/docs/css/umn/css_02_0063.html
deleted file mode 100644
index e763f951..00000000
--- a/docs/css/umn/css_02_0063.html
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-
- Clusters in Security Mode
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0064.html b/docs/css/umn/css_02_0064.html
deleted file mode 100644
index 2400c447..00000000
--- a/docs/css/umn/css_02_0064.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-What Is the Relationship Between the Filebeat Version and Cluster Version?
-- Non-security mode: no restrictions.
- Cluster in security mode: The Filebeat OSS version must match the cluster version. For details on how to download the Filebeat OSS version, see Past Releases of Elastic Stack Software.
-
-
-
diff --git a/docs/css/umn/css_02_0066.html b/docs/css/umn/css_02_0066.html
deleted file mode 100644
index 3bfbc128..00000000
--- a/docs/css/umn/css_02_0066.html
+++ /dev/null
@@ -1,35 +0,0 @@
-
-
-
- Resource Usage and Change
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0067.html b/docs/css/umn/css_02_0067.html
deleted file mode 100644
index a4252af1..00000000
--- a/docs/css/umn/css_02_0067.html
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-How Do I Clear Expired Data to Release Storage Space?
-- Run the following command to delete a single index data record.
curl -XDELETE http://IP:9200/Index_name
-
IP: the IP address of any node in the cluster
-
- - Run the following command to delete all Logstash data of a day. For example, delete all data on June 19, 2017:
For a cluster in non-security mode: curl -XDELETE 'http://IP:9200/logstash-2017.06.19*'
-For a cluster in security mode: curl -XDELETE -u username:password 'https://IP:9200/logstash-2017.06.19' -k
-
- username: username of the administrator. The default value is admin.
- password: the password set during cluster creation
- IP: the IP address of any node in the cluster
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0068.html b/docs/css/umn/css_02_0068.html
deleted file mode 100644
index 9f9ee56d..00000000
--- a/docs/css/umn/css_02_0068.html
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-How Do I Configure a Two-Replica CSS Cluster?
-- Run GET _cat/indices?v in Kibana to check the number of cluster replicas. If the value of rep is 1, the cluster has two replicas.

- - If the value of rep is not 1, run the following command to set the number of replicas:
PUT /index/_settings
-{
-"number_of_replicas" : 1 //Number of replicas
-}
-
index specifies the index name. Set this parameter based on site requirements.
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0069.html b/docs/css/umn/css_02_0069.html
deleted file mode 100644
index 2d22b747..00000000
--- a/docs/css/umn/css_02_0069.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-How Do I Delete Index Data?
-- Manually: Run the DELETE /my_index command in Kibana.
- Automatically: Create scheduled tasks to call the index deletion request and periodically execute the tasks. CSS supports Open Distro Index State Management. For details, see: https://opendistro.github.io/for-elasticsearch-docs/docs/im/ism/
-
-
-
diff --git a/docs/css/umn/css_02_0070.html b/docs/css/umn/css_02_0070.html
deleted file mode 100644
index e6b7adfc..00000000
--- a/docs/css/umn/css_02_0070.html
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
- Components
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0073.html b/docs/css/umn/css_02_0073.html
deleted file mode 100644
index e01401f0..00000000
--- a/docs/css/umn/css_02_0073.html
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-Kibana
-
-
-
diff --git a/docs/css/umn/css_02_0077.html b/docs/css/umn/css_02_0077.html
deleted file mode 100644
index 0154ae39..00000000
--- a/docs/css/umn/css_02_0077.html
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-Clusters
-
-
-
diff --git a/docs/css/umn/css_02_0078.html b/docs/css/umn/css_02_0078.html
deleted file mode 100644
index fba54405..00000000
--- a/docs/css/umn/css_02_0078.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-Can a New Cluster Use the IP Address of the Old Cluster?
-
-
-
diff --git a/docs/css/umn/css_02_0079.html b/docs/css/umn/css_02_0079.html
deleted file mode 100644
index 65bc1b26..00000000
--- a/docs/css/umn/css_02_0079.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-Can I Associate My EIP If I Want to Access the Cluster from the Internet?
-No. To access a cluster from the Internet, see Public IP Address Access.
-
-
-
diff --git a/docs/css/umn/css_02_0081.html b/docs/css/umn/css_02_0081.html
deleted file mode 100644
index 77c2dc4a..00000000
--- a/docs/css/umn/css_02_0081.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-Can I Use x-pack-sql-jdbc to Access CSS Clusters and Query Data?
-No. Currently, CSS does not integrate the x-pack component.
-
-
-
diff --git a/docs/css/umn/css_02_0082.html b/docs/css/umn/css_02_0082.html
deleted file mode 100644
index 648a8593..00000000
--- a/docs/css/umn/css_02_0082.html
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
- Ports
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0083.html b/docs/css/umn/css_02_0083.html
deleted file mode 100644
index 78180044..00000000
--- a/docs/css/umn/css_02_0083.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-Do Ports 9200 and 9300 Both Open?
-Yes. Port 9200 is used by external systems to access CSS clusters, and port 9300 is used for communication between nodes.
-
The methods for accessing port 9300 are as follows:
-
- If your client is in the same VPC and subnet with the CSS cluster, you can access it directly.
- If your client is in the same VPC with but different subnet from the CSS cluster, apply for a route separately.
- If your client is in the different VPCs and subnets from the CSS cluster, create a VPC peering connection to enable communication between the two VPCs, and then apply for routes to connect the two subnets.
-
-
-
diff --git a/docs/css/umn/css_02_0088.html b/docs/css/umn/css_02_0088.html
deleted file mode 100644
index 9ec12c0f..00000000
--- a/docs/css/umn/css_02_0088.html
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-Can I Install Search Guard on CSS?
-CSS does not currently support installation of Search Guard.
-
CSS provides clusters in security mode, which have the same functions as Search Guard.
-
-
-
diff --git a/docs/css/umn/css_02_0089.html b/docs/css/umn/css_02_0089.html
deleted file mode 100644
index 8e8c9533..00000000
--- a/docs/css/umn/css_02_0089.html
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-Can I Change the Number of Shards to Four with Two Replicas When There Is One Shard Set in the JSON File?
-Once an index is created, the number of primary shards cannot be changed.
-
You can run the following command in Kibana to change the number of replicas:
-
PUT /indexname/_settings
-{
-"number_of_replicas":1 //Number of replicas
-}
-
index specifies the index name. Set this parameter based on site requirements.
-
-
-
-
diff --git a/docs/css/umn/css_02_0093.html b/docs/css/umn/css_02_0093.html
deleted file mode 100644
index e2bc870c..00000000
--- a/docs/css/umn/css_02_0093.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-How Do I Check the Numbers of Shards and Replicas in a Cluster on the CSS Console?
-- Log in to the console.
- On the Clusters page, click Access Kibana in the Operation column of a cluster.
- Log in to Kibana and choose Dev Tools.

- - On the Console page, run the GET _cat/indices?v command query the number of shards and replicas in a cluster. In the following figure, the pri column indicates the number of index shards, and the rep column indicates the number of replicas. After an index is created, its pri value cannot be modified. Its rep value can be modified.

-
-
-
-
diff --git a/docs/css/umn/css_02_0094.html b/docs/css/umn/css_02_0094.html
deleted file mode 100644
index 6b04b9a2..00000000
--- a/docs/css/umn/css_02_0094.html
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-How Do I Migrate a CSS Cluster Across Regions?
-CSS clusters cannot be directly migrated. You can back up a cluster to an OBS bucket and restore it to a new region.
-
- If the OBS bucket is in the same region as your CSS cluster, migrate the cluster by following the instructions in Index Backup and Restoration.
- If the OBS bucket is not in the same region as your CSS cluster, configure cross-region replication to back up the cluster to the bucket, and migrate the cluster by following the instructions in Index Backup and Restoration.
-
- Before cross-region replication, ensure the snapshot folder of the destination cluster is empty. Otherwise, the snapshot information cannot be updated to the snapshot list of the destination cluster.
- Before every migration, ensure the folder is empty.
-
-
-
-
diff --git a/docs/css/umn/css_02_0096.html b/docs/css/umn/css_02_0096.html
deleted file mode 100644
index e7af26b8..00000000
--- a/docs/css/umn/css_02_0096.html
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-How Do I Configure the Threshold for CSS Slow Query Logs?
-The slow query log settings of CSS are the same as those of Elasticsearch. You can configure slow query logs via the _settings API. For example, you can run the following command in Kibana to set the index level:
-
PUT /my_index/_settings
-{
- "index.search.slowlog.threshold.query.warn": "10s",
- "index.search.slowlog.threshold.fetch.debug": "500ms",
- "index.indexing.slowlog.threshold.index.info": "5s"
-}
-
- If a query takes longer than 10 seconds, a WARN log will be generated.
- If retrieval takes longer than 500 milliseconds, a DEBUG log will be generated.
- If an index takes longer than 5 seconds, an INFO log will be generated.
-
For details, visit the official website: https://www.elastic.co/guide/cn/elasticsearch/guide/current/logging.html
-
-
-
diff --git a/docs/css/umn/css_02_0097.html b/docs/css/umn/css_02_0097.html
deleted file mode 100644
index bef3452e..00000000
--- a/docs/css/umn/css_02_0097.html
+++ /dev/null
@@ -1,37 +0,0 @@
-
-
-Connecting User-Built Kibana to an Elasticsearch Cluster
-To interconnect user-built Kibana with CSS Elasticsearch clusters, the following conditions must be met:
-
- The local environment must support access from external networks.
- Kibana is built using ECS in the same VPC as Elasticsearch. Kibana can be accessed from the local public network.
- Only Kibana images of the OSS version can be connected to Elasticsearch on CSS.
-
Example of a Kibana configuration file:
-
- Security mode:
elasticsearch.username: "***"
-elasticsearch.password: "***"
-elasticsearch.ssl.verificationMode: none
-server.ssl.enabled: false
-server.rewriteBasePath: false
-server.port: 5601
-logging.dest: /home/Ruby/log/kibana.log
-pid.file: /home/Ruby/run/kibana.pid
-server.host: 192.168.xxx.xxx
-elasticsearch.hosts: https://10.0.0.xxx:9200
-elasticsearch.requestHeadersWhitelist: ["securitytenant","Authorization"]
-opendistro_security.multitenancy.enabled: true
-opendistro_security.multitenancy.tenants.enable_global: true
-opendistro_security.multitenancy.tenants.enable_private: true
-opendistro_security.multitenancy.tenants.preferred: ["Private", "Global"]
-opendistro_security.multitenancy.enable_filter: false
-
- - Non-security mode
server.port: 5601
-logging.dest: /home/Ruby/log/kibana.log
-pid.file: /home/Ruby/run/kibana.pid
-server.host: 192.168.xxx.xxx
-elasticsearch.hosts: http://10.0.0.xxx:9200
-
-
-
-
diff --git a/docs/css/umn/css_02_0098.html b/docs/css/umn/css_02_0098.html
deleted file mode 100644
index 9061ce16..00000000
--- a/docs/css/umn/css_02_0098.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-Can I Export Data from Kibana?
-Exporting data from Kibana requires the SQL Workbench plugin. Currently, you can only export data from Kibana 7.6.2 or later.
-
In SQL Workbench of Kibana, you can enter Elasticsearch SQL statements to query data or click Download to export data. You can export 1 to 200 data records. By default, 200 data records are exported.
-
Figure 1 SQL Workbench
-
-
-
diff --git a/docs/css/umn/css_02_0099.html b/docs/css/umn/css_02_0099.html
deleted file mode 100644
index fa4a3f5d..00000000
--- a/docs/css/umn/css_02_0099.html
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-How Do I Query Index Data on Kibana in an ES Cluster?
-
-
Run the following command to query index data through an API on Kibana:
-
GET indexname/_search
-
The returned data is shown in the following figure.
-
Figure 1 Returned data
-
- took: How many milliseconds the query cost.
- time_out: Whether a timeout occurred.
- _shard: Data is split into five shards. All of the five shards have been searched and data is returned successfully. No query result fails to be returned. No data is skipped.
- hits.total: Number of query results. Three documents are returned in this example.
- max_score: Score of the returned documents. The document that is more relevant to your search criteria would have a higher score.
- hits.hits: Detailed information of the returned documents.
-
-
-
-
diff --git a/docs/css/umn/css_02_0101.html b/docs/css/umn/css_02_0101.html
deleted file mode 100644
index 09754f25..00000000
--- a/docs/css/umn/css_02_0101.html
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
-Can I Modify the TLS Algorithm of an Elasticsearch Cluster?
-You can modify TLS algorithms in CSS 7.6.2 and later versions.
-
- Log in to the CSS management console.
- In the navigation pane, choose Clusters. The cluster list is displayed.
- Click the name of the target cluster to go to the cluster details page.
- Select Parameter Configurations, click Edit, expand the Customize parameter, and click Add.
Add the opendistro_security.ssl.http.enabled_ciphers parameter and set it to ['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384'].
-
If the parameter value contains multiple algorithm protocols, enclose the value with a pair of square brackets ([]). If the parameter value is a single algorithm protocol, enclose the value with a pair of single quotation marks(' ').
-
- - After the modification is complete, click Submit.In the displayed Submit Configuration dialog box, select the box indicating "I understand that the modification will take effect after the cluster is restarted." and click Yes.
If the Status is Succeeded in the parameter modification list, the modification has been saved.
- - Return to the cluster list and choose More > Restart in the Operation column to restart the cluster and make the modification take effect.
-
-
-
diff --git a/docs/css/umn/css_02_0102.html b/docs/css/umn/css_02_0102.html
deleted file mode 100644
index bf75d966..00000000
--- a/docs/css/umn/css_02_0102.html
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-How Do I Set the search.max_buckets Parameter for an ES Cluster?
-Function
If the query results on shards exceed the upper limit of records that can be returned (default value: 10000), you need to increase the limit by changing the value of search.max_buckets.
-
-
Solution
Run the following command on the
Dev Tools page of Kibana:
PUT _cluster/settings
-{
- "persistent": {
- "search.max_buckets": 20000
- }
-}
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0106.html b/docs/css/umn/css_02_0106.html
deleted file mode 100644
index 86f87bd7..00000000
--- a/docs/css/umn/css_02_0106.html
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-How Do I Obtain the Security Certificate of CSS?
-The security certificate (CloudSearchService.cer) can be downloaded only for security clusters that have enabled HTTPS access.
-
- Log in to the CSS management console.
- In the navigation pane, choose Clusters. The cluster list is displayed.
- Click the name of a cluster to go to the cluster details page.
- On the Configuration page, click Download Certificate next to Security Mode.
-
-
-
diff --git a/docs/css/umn/css_02_0118.html b/docs/css/umn/css_02_0118.html
deleted file mode 100644
index a7dabd53..00000000
--- a/docs/css/umn/css_02_0118.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-How Do I Set the Numbers of Index Copies to 0 in Batches?
-- Log in to the Kibana page of the cluster. In the navigation pane, choose Dev Tools.
- Modify and run the PUT /*/_settings{"number_of_replicas":0} command.
Do not directly run the preceding command, because the asterisk (*) may match security indexes. You are advised to specify the index required for the batch operation. Example: PUT /test*/_settings{"number_of_replicas":0}
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0119.html b/docs/css/umn/css_02_0119.html
deleted file mode 100644
index b2ca351b..00000000
--- a/docs/css/umn/css_02_0119.html
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-How Do I Update the CSS Lifecycle Policy?
-The CSS lifecycle is implemented using the Index State Management (ISM) of Open Distro. For details about how to configure policies related to the ISM template, see the Open Distro documentation.
-
- When a policy is created, the system writes a record to the .opendistro-ism-config index. In the record, _id is the policy name, and the content is the policy definition.
Figure 1 Writing a data record
- - After a policy is bound to an index, the system writes another record to the .opendistro-ism-config index. The following figure shows the initial status of a record.
Figure 2 Initial data status
- - Run the explain command. Only a policy ID will be returned.
GET _opendistro/_ism/explain/data2
-{
- "data2" : {
- "index.opendistro.index_state_management.policy_id" : "policy1"
- }
-}
-Open Distro will execute an initialization process to fill the policy content in the record. The following figure shows the initialized data.
-Figure 3 Initialized data
-After the initialization, min_index_age in the policy will be copied.
-
The initialized index uses a copy of this policy. The policy update will not take effect on the index.
-
-
- After the policy is modified, call the change_policy API to update the policy.
POST _opendistro/_ism/change_policy/data1
-{
- "policy_id": "policy1"
-}
-
-
-
-
diff --git a/docs/css/umn/css_02_0120.html b/docs/css/umn/css_02_0120.html
deleted file mode 100644
index bb8fc994..00000000
--- a/docs/css/umn/css_02_0120.html
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-Can I Restore a Deleted Cluster?
-Yes. You can use a snapshot stored in OBS to restore a cluster. A deleted cluster that has no snapshots in OBS cannot be restored. Exercise caution when deleting a cluster.
-
To restore a deleted cluster, perform the following steps:
-
- Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters. On the displayed Clusters page, click Create Cluster in the upper right corner to create a cluster and enable the snapshot function. Set the OBS bucket and backup path to those of the cluster to be restored.
To restore a deleted cluster to an existing cluster, set the OBS bucket and backup path to those of the deleted cluster.
-
To restore a deleted cluster to a new cluster, ensure they are in the same region. The new cluster version must be the same as or later than that of the deleted cluster. The number of nodes in the new cluster must be greater than half of that in the deleted cluster. Otherwise, the cluster may fail to be restored.
-
- - If the status of the new cluster changes to Available, click the cluster name to go to the Cluster Information page.
- In the navigation pane on the left, choose Cluster Snapshots.
In the snapshot management list, you can view the snapshot information of the deleted cluster. If no information is displayed, wait for several minutes and refresh the page.
- - Locate the target snapshot and click Restore in the Operation column. The Restore page is displayed.
- On the Restore page, set restoration parameters.
Index: Enter the name of the index you want to restore. If you do not specify any index name, data of all indexes will be restored. The value can contain 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?) are not allowed.
-Rename Pattern: Enter a regular expression. Indexes that match the regular expression are restored. The default value index_(.+) indicates restoring data of all indices. The value contains 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?,) are not allowed.
-Rename Replacement: Enter the index renaming rule. The default value restored_index_$1 indicates that restored_ is added in front of the names of all restored indexes. The value can contain 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?,) are not allowed. You can set Rename Replacement only if you have specified Rename Pattern.
-Cluster: Select the cluster that you want to restore. You can select the current cluster or others. However, you can only restore the snapshot to clusters whose status is Available. If the status of the current cluster is Unavailable, you cannot restore the snapshot to the current cluster. If you select another cluster and two or more indexes in the cluster have the same name, data of all indices with the same name as the name you specify will be overwritten. Therefore, exercise caution when you set the parameters.
-Figure 1 Restoring a snapshot
- - Click OK. If restoration succeeds, Task Status of the snapshot in the snapshot list will change to Restoration succeeded, and the index data is generated again according to the snapshot information.
-
-
-
diff --git a/docs/css/umn/css_02_0124.html b/docs/css/umn/css_02_0124.html
deleted file mode 100644
index 7ec7a354..00000000
--- a/docs/css/umn/css_02_0124.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-What Are the Impacts If an Elasticsearch Cluster Has Too Many Shards?
-- A large number of shards in a cluster slows down shard creation.
- If automatic index creation is enabled, slow index creation may cause a large number of write requests to be stacked in the memory or result in a cluster break down.
- If there are too many shards and you cannot properly monitor workloads, the number of records in a single shard may exceed the threshold, and write requests may be denied.
-
-
-
diff --git a/docs/css/umn/css_02_0125.html b/docs/css/umn/css_02_0125.html
deleted file mode 100644
index 91fad38e..00000000
--- a/docs/css/umn/css_02_0125.html
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-How Do I Set the Default Maximum Number of Records Displayed on a Page for an Elasticsearch Cluster
-Solution
- Method 1
Open Kibana and run the following commands on the Dev Tools page:
-PUT _all/_settings?preserve_existing=true
-{
-"index.max_result_window" : "10000000"
-}
- - Method 2
Run the following commands in the background:
-curl –XPUT 'http://localhost:9200/_all/_setting?preserve_existing=true'-d
-{
-"index.max_result_window":"1000000"
-}
-
-
-
This configuration consumes memory and CPU resources. Exercise caution when setting this parameter.
-
-
-
-
-
diff --git a/docs/css/umn/css_02_0126.html b/docs/css/umn/css_02_0126.html
deleted file mode 100644
index 46022774..00000000
--- a/docs/css/umn/css_02_0126.html
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-Why Does the Disk Usage Increase After the delete_by_query Command Was Executed to Delete Data?
-Running the delete_by_query command can only add a deletion mark to the target data instead of really deleting it. When you search for data, all data is searched and the data with the deletion mark is filtered out.
-
The space occupied by an index with the deletion mark will not be released immediately after you call the disk deletion API. The disk space is released only when the segment merge is performed next time.
-
Querying the data with deletion mark occupies disk space. In this case, the disk usage increases when you run the disk deletion commands.
-
-
-
-
diff --git a/docs/css/umn/css_02_0127.html b/docs/css/umn/css_02_0127.html
deleted file mode 100644
index 88569fb1..00000000
--- a/docs/css/umn/css_02_0127.html
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
-Does the Value i of node.roles Indicate an Injest Node?
-Function
If the value of
node.roles of a client node is
i, then is this client node an injest node?
- Are there coordinating only nodes in clusters? Are the client requests distributed to coordinating nodes?
- Are ingest nodes in the idle state when there are no ingest requests?
-
-
-
Solution
If the value of node.roles of a client node is i, the ingest node mode is enabled.
-
- The coordinating only nodes of Elasticsearch are called client nodes in CSS. If a cluster has no client nodes, client requests will be distributed to all nodes.
- An ingest node functions as a set of ELK for data conversion. If there is no ingest requests, ingest nodes are not in the idle state.
-
-
-
-
diff --git a/docs/css/umn/css_02_0128.html b/docs/css/umn/css_02_0128.html
deleted file mode 100644
index 1b1f185c..00000000
--- a/docs/css/umn/css_02_0128.html
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
-How Do I Convert the Format of a CER Security Certificate?
-The security certificate (CloudSearchService.cer) can be downloaded only for security clusters that have enabled HTTPS access. Most software supports certificates in the .pem or .jks format. You need to convert the format of the CSS security certificate.
-
-
In the preceding commands, newname indicates the user-defined certificate name.
-
After the command is executed, set the certificate password and confirm the password as prompted. Securely store the password. It will be used for accessing the cluster.
-
-
-
diff --git a/docs/css/umn/css_02_0130.html b/docs/css/umn/css_02_0130.html
deleted file mode 100644
index 7d5bfb44..00000000
--- a/docs/css/umn/css_02_0130.html
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-How Do I Clear the Cache of a CSS Cluster?
-- Clear the fielddata
During aggregation and sorting, data are converted to the fielddata structure, which occupies a large amount of memory.
-- Run the following commands on Kibana to check the memory occupied by index fielddata:
DELETE /_search/scroll
-{
-"scroll_id" :
-"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ=="
-}
- - If the memory usage of fielddata is too high, you can run the following command to clear fielddata:
POST /test/_cache/clear?fielddata=true
-
-In the preceding command, test indicates the name of the index whose fielddata occupies a large amount of memory.
- - Clear segments
The FST structure of each segment is loaded to the memory and will not be cleared. If the number of index segments is too large, the memory usage will be high. You are advised to periodically clear the segments.
-- Run the following command on Kibana to check the number of segments and their memory usage on each node:
GET /_cat/nodes?v&h=segments.count,segments.memory&s=segments.memory:desc
- - If the memory usage of segments is too high, you can delete or disable unnecessary indexes, or periodically combine indexes that are not updated.
- - Clear the cache
Run the following command on Kibana to clear the cache:
-POST _cache/clear
-
-
-
-
diff --git a/docs/css/umn/css_02_0131.html b/docs/css/umn/css_02_0131.html
deleted file mode 100644
index a01b4245..00000000
--- a/docs/css/umn/css_02_0131.html
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
-The Average Memory Usage of an Elasticsearch Cluster Reaches 98%
-Symptom
The cluster monitoring result shows that the average memory usage of a cluster is 98%. Does it affect cluster performance?
-
-
Possible Cause
In an ES cluster, 50% of the memory is occupied by Elasticsearch and the other 50% is used by Lucene to cache files. It is normal that the average memory usage reaches 98%.
-
-
Solution
You can monitor the cluster memory usage by checking the maximum JVM heap usage and average JVM heap usage.
-
-
-
-
diff --git a/docs/css/umn/css_02_0132.html b/docs/css/umn/css_02_0132.html
deleted file mode 100644
index ae3fb719..00000000
--- a/docs/css/umn/css_02_0132.html
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
-How Do I Create a Type Under an Index in an Elasticsearch 7.x Cluster?
-In Elasticsearch 7.x and later versions, types cannot be created for indexes.
-
If you need to use types, add include_type_name=true to the command. For example:
-
PUT _template/urldialinfo_template?include_type_name=true
-
After the command is executed, the following information is displayed:
-
"#! Deprecation: [types removal] Specifying include_type_name in put index template requests is deprecated. The parameter will be removed in the next major version. "
-
-
-
diff --git a/docs/css/umn/css_02_0201.html b/docs/css/umn/css_02_0201.html
deleted file mode 100644
index 1a4b677b..00000000
--- a/docs/css/umn/css_02_0201.html
+++ /dev/null
@@ -1,75 +0,0 @@
-
-
-How to access Kibana from outside cloud using ELB?
-Overview
Currently to access Kibana dashboard of CSS Service, a user has to login to OTC consoleand navigate to Kibana login page.
-
-
To make the access convenient a user can utilize the provided python script which willconfigure the Dedicated Loadbalancer of OTC and a user would be able to access Kibanadashboard with a public IP.
-
-
ELB Configuration Script
Script to Configure ELB to be able to access CSS Kibana Dashboard in https mode. ThisScript will create a Dedicated Loadbalancer with a HTTPS Listener which will be forwardingthe traffic to CSS nodes at 5601 port in order to access Kibana Dashboard.
-
Download Script
-
-
Installing Dependency
The script depends on otcextensions library.
-
If you already have Python with pip installed, you can simply run:
-
pip install otcextensions
-
- To know more details about using otcextensions library you can check otcextensions docs.
A file called clouds.yaml holds all necessary configuration parameters. The file can beplaced either in the local directory, below the user home directory in .config/openstack orin the system-wide directory /etc/openstack. You may use a second file secure.yaml in thesame directories to extra protect clear-text password credentials. For more details see thesection configuration in the official documentation.
-Minimal sample clouds.yaml file:
-clouds:
- otc:
- profile: otc
- auth:
- username: '<USER_NAME>'
- password: '<PASSWORD>'
- project_name: '<eu-de_project>'
- # or project_id: '<123456_PROJECT_ID>'
- user_domain_name: 'OTC00000000001000000xxx'
- # or user_domain_id: '<123456_DOMAIN_ID>'
- auth_url: 'https://iam.eu-de.otc.t-systems.com:443/v3'
-With this configuration you can start using the CLI with openstack --os-cloud otc *command* or by export OS_CLOUD=otc; openstack *command*.
- - Environment variables: Authentication using username/password is often used:
export OS_AUTH_URL=<url-to-openstack-identity>
-export OS_PROJECT_NAME=<project-name>
-export OS_USERNAME=<username>
-export OS_PASSWORD=<password>
-export OS_USER_DOMAIN_NAME=<user-domain-name>
-export OS_IDENTITY_API_VERSION=3
-
-
In addition to that a regular clouds.yaml configuration file can be used.
-
More information is available at:
-
https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html
-
-
Pre-Requisites
The Script requires ID of a CSS Cluster and Certificate ID for creating a HTTPS listener.
-
- You can get a CSS Cluster ID by visiting the OTC console -> CSS Dashboard ->Clusters page, and click on your CSS Cluster to get its details.
- To learn more about Creating and Getting a TLS Certificate, check ELB User Guide
-
Generating a TSL Certificate with openssl command.
-
openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:4096 -keyout private.key -out certificate.crt
-
When adding the certificate and private key, Certificate Type must be "Server Certificate".
-
-
-
Running The Script
Once you have certificate_id and cluster_id, you are ready to run the script.
-
-
List ELB Flavors
python3 script.py elb-flavors
-
This will print the L7 Flavors supported by Dedicated Loadbalancer. To print all types of flavors supported you may add --all argument to the command. But we need only L7 flavor type, that's why --all argument is set as optional.
-
-
Configure ELB
python3 script.py elb-configure --cluster-id <cluster_id> --certificate-id <certificate_id>
-
Argument --certificate-id is optional, if it's not provided then Loadbalancer will be configured with HTTP listener.
-
Configure ELB with specific Flavor
-
- By default ELB will be configured with smallest L7 flavor type. But if you want to have some specific flavor of your choice, follow below commands.
- Only L7 flavor type must be used since we are creating HTTPS listener.
-
# Prints list of Loadbalancer flavor Types
- python3 script.py elb-flavors
-
- # Run the script
- python3 script.py elb-configure --cluster-id <cluster_id> --certificate-id <certificate_id> --flavor-id <flavor_id>
-
-
Delete ELB
python3 script.py elb-delete <loadbalancer_name_or_id>
-
Delete ELB and Release Public EIP
-
python3 script.py elb-delete <loadbalancer_id> --release-public-ip
-
Please use the elb-delete command with caution.
-
-
-
Logging
When you run the script a log file is created with name debug.log where you can find details of all the API requests.
-
-
-
-
diff --git a/docs/css/umn/css_04_0001.html b/docs/css/umn/css_04_0001.html
deleted file mode 100644
index c72f65b4..00000000
--- a/docs/css/umn/css_04_0001.html
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-What Is Cloud Search Service?
-CSS
Cloud Search Service (CSS) is a fully hosted distributed search service based on Elasticsearch. You can use it for structured and unstructured data search, and use AI vectors for combine search, statistics, and reports. CSS is a fully managed cloud service of the ELK Stack and is compatible with open-source Elasticsearch, Kibana, and Cerebro.
-
Elasticsearch is an open-source distributed search engine that can be deployed in standalone or cluster mode. The heart of the ELK Stack, Elasticsearch clusters support multi-condition search, statistical analysis, and create visualized reports of structured and unstructured text. For details about Elasticsearch, see the Elasticsearch: The Definitive Guide.
-
CSS can be automatically deployed, allowing you to quickly create Elasticsearch clusters. It provides the search engine optimization practices and does not require your O&M. Additionally, it has a robust monitoring system to present you key metrics, including clusters and query performance so that you can focus on the business logic.
-
-
Functions
- Compatible with Elasticsearch
Freely use native Elasticsearch APIs and other software in the ecosystem, such as Beats and Kibana.
- - Support various data sources
A few simple configurations can allow you to smoothly connect to multiple data sources, such as FTP, OBS, HBase, and Kafka. No extra coding is required.
- - One-click operation
One-click cluster application, capacity expansion, and restart from small-scale testing to large-scale rollout
- - User-defined snapshot policies
Trigger backup snapshots manually or configure an automated schedule.
-
-
-
-
-
diff --git a/docs/css/umn/css_04_0002.html b/docs/css/umn/css_04_0002.html
deleted file mode 100644
index b6bcaea3..00000000
--- a/docs/css/umn/css_04_0002.html
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
-Scenarios
-CSS can be used to build search boxes for websites and apps to improve user experience. You can also build a log analysis platform with it, facilitating data-driven O&M and business operations. CSS vector search can help you quickly build smart applications, such as AI-based image search, recommendation, and semantic search.
-
Site Search
CSS can be used to search for website content by keyword as well as search for and recommend commodities on e-commerce sites.
-
- Real-time search: When site content is updated, you can find the updated content in your search within minutes, or even just seconds.
- Categorized statistics: You can apply search filters to sort products by category.
- Custom highlight style: You can define how the search results are highlighted.
-
Figure 1 Site search
-
-
All-Scenario Log Analysis
Analyze the logs of Elastic Load Balance (ELB), servers, containers, and applications. In CSS, the Kafka message buffer queue is used to balance loads in peak and off-peak hours. Logstash is used for data extract, transform and load (ETL). Elasticsearch retrieves and analyzes data. The analysis results are visualized by Kibana and presented to you.
-
- High cost-effectiveness: CSS separates cold and hot storage, and decouples computing and storage resources, achieving high performance and reducing costs by over 30%.
- Ease of use: Perform queries in a GUI editor. Easily create reports using drag-and-drop components.
- Powerful processing capability: CSS can import hundreds of terabytes of data per day, and can process petabytes of data.
-
Figure 2 All-scenario log analysis
-
-
Database Query Acceleration
CSS can be used to accelerate database queries. E-commerce and logistics companies have to respond to a huge number of concurrent order queries within a short period of time. Relational databases, although having good transaction atomicity, are weak in transaction processing, and can rely on CSS to enhance OLTP and OLAP capabilities.
-
- High performance: Retrieve data from hundreds of millions of records within milliseconds. Text, time, numeric, and spatial data types are supported.
- High scalability: CSS can be scaled to have over 200 data nodes and over 1000 columns.
- Zero service interruption: The rolling restart and dual-copy mechanisms can avoid service interruption in case of specifications change or configuration update.
-
-
Vector Search
When you search for unstructured data, such as images, videos, and corpuses, the nearest neighbors or approximate nearest neighbors are searched based on feature vectors. This has the following advantages:
-
- Efficient and reliable: The vector search engine provides optimal search performance and distributed DR capabilities.
- Abundant indexes: Multiple indexing algorithms and similarity measurement methods are available and can meet diverse needs.
- Easy learning: CSS is fully compatible with the open-source Elasticsearch ecosystem.
-
Figure 3 Vector search
-
-
-
-
diff --git a/docs/css/umn/css_04_0004.html b/docs/css/umn/css_04_0004.html
deleted file mode 100644
index 063372a7..00000000
--- a/docs/css/umn/css_04_0004.html
+++ /dev/null
@@ -1,62 +0,0 @@
-
-
-Related Services
-Figure 1 shows the relationships between CSS and other services.
-
Figure 1 Relationships between CSS and other services
-
-
Table 1 Relationships between CSS and other servicesService
- |
-Description
- |
-
-
-Virtual Private Cloud (VPC)
- |
-CSS clusters are created in the subnets of a VPC. VPCs provide a secure, isolated, and logical network environment for your clusters.
- |
-
-Elastic Cloud Server (ECS)
- |
-In a CSS cluster, each node represents an ECS. When you create a cluster, ECSs are automatically created.
- |
-
-Elastic Volume Service (EVS)
- |
-CSS uses EVS to store index data. When you create a cluster, EVSs are automatically created for cluster data storage.
- |
-
-Object Storage Service (OBS)
- |
-Snapshots of CSS clusters are stored in OBS buckets.
- |
-
-Identity and Access Management (IAM)
- |
-IAM authenticates access to CSS.
- |
-
-Cloud Eye
- |
-CSS uses Cloud Eye to monitor cluster metrics in real time. The supported CSS metrics include the disk usage and cluster health status. You can learn about the disk usage of the cluster based on the disk usage metric. You can learn about the health status of a cluster based on the cluster health status metric.
- |
-
-Cloud Trace Service (CTS)
- |
-With CTS, you can record operations associated with CSS for query, audit, and backtracking operations.
- |
-
-Key Management Service (KMS)
- |
-If disk encryption is enabled on CSS clusters, you need to obtain the key provided by KMS to encrypt and decrypt the disk data.
- |
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_04_0005.html b/docs/css/umn/css_04_0005.html
deleted file mode 100644
index 52ec5297..00000000
--- a/docs/css/umn/css_04_0005.html
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
-Constraints
-Restrictions on Clusters and Nodes
The following table describes restrictions on clusters and nodes in CSS.
-
-
Table 1 Restrictions on Elasticsearch clusters and nodesCluster and Node
- |
-Restriction
- |
-
-
-Maximum number of nodes in a cluster
- |
-32
- |
-
-Minimum number of nodes in a cluster
- |
-1
- |
-
-
-
-
-
-
Restrictions on Browsers
- You are advised to use the following browsers to access the CSS management console:
- Google Chrome 36.0 or later
- Mozilla Firefox 35.0 or later
- - You are advised to use the following browsers to access Kibana integrated in CSS:
- Google Chrome 36.0 or later
- Mozilla Firefox 35.0 or later
-
-
-
-
-
diff --git a/docs/css/umn/css_04_0007.html b/docs/css/umn/css_04_0007.html
deleted file mode 100644
index c3d4d2ee..00000000
--- a/docs/css/umn/css_04_0007.html
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-Product Components
-CSS supports Kibana and Cerebro.
-
Kibana
Kibana is an open-source data analytics and visualization platform that works with Elasticsearch. You can use Kibana to search for and view data stored in Elasticsearch indexes and display data in charts and maps. For details about Kibana, visit https://www.elastic.co/guide/en/kibana/current/index.html.
-
By default, the Elasticsearch cluster of CSS provides the access channel to Kibana. You can quickly access Kibana without installing it. CSS is compatible with Kibana visualizations and Elasticsearch statistical and analysis capabilities.
-
- Over 10 data presentation modes
- Nearly 20 data statistics methods
- Classification in various dimensions, such as time and tag
-
-
Cerebro
Cerebro is an open-source Elasticsearch web admin tool built using Scala, Play Framework, AngularJS, and Bootstrap. Cerebro allows you to manage clusters on a visualized page, such as executing REST requests, modifying Elasticsearch configurations, monitoring real-time disks, cluster loads, and memory usage.
-
By default, the Elasticsearch cluster of CSS provides the access channel to Cerebro. You can quickly access Cerebro without installing it. CSS is fully compatible with the open-source Cerebro and adapts to the latest 0.8.4 version.
-
- Elasticsearch visualized and real-time load monitoring
- Elasticsearch visualized data management
-
-
-
-
diff --git a/docs/css/umn/css_04_0010.html b/docs/css/umn/css_04_0010.html
deleted file mode 100644
index 7947f807..00000000
--- a/docs/css/umn/css_04_0010.html
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-Advantages
-CSS has the following features and advantages.
-
Efficient and Ease of Use
You can get insights from terabyte-scale data in milliseconds. In addition, you can use the visualized platform for data display and analysis.
-
-
Flexible and Scalable
You can request resources as needed and perform capacity expansion online with zero service interruption.
-
-
Easy O&M
CSS is a fully-managed, out-of-the-box service. You can start using it with several clicks, instead of managing clusters.
-
-
Kernel Enhancement
- Vector search
When you search for unstructured data, such as images, videos, and corpuses, the nearest neighbors or approximate nearest neighbors are searched based on feature vectors.
- - Decoupled storage and compute
CSS provides an API for freezing indexes. Hot data stored on SSD can be dumped to OBS to reduce data storage costs and decouple compute from storage.
- - Flow control
CSS can control traffic at the node level. You can configure the blacklist and whitelist, the maximum concurrent HTTPS connections, and the maximum HTTP connections for a node. Each function has an independent control switch.
- - Large query isolation
CSS allows you to separately manage large queries. You can isolate query requests that consume a large amount of memory or take a long period of time.
- - Index monitoring
CSS monitors various metrics of the running status and change trend of cluster indexes to measure service usage and handle potential risks in a timely manner, ensuring that clusters can run stably.
- - Enhanced monitoring
CSS supports enhanced cluster monitoring. It can monitor the P99 latency of cluster search requests and the HTTP status codes of clusters.
-
-
-
High Reliability
You can choose to trigger snapshots manually or on a periodic basis for backup and restore snapshots to the current or other clusters. Snapshots of a cluster can be restored to another cluster to implement cluster data migration.
-
- Automatic backup using snapshots
CSS provides the backup function. You can enable the automatic backup function on the CSS management console and set the backup period based on the actual requirements.
-Automatic backup is to back up the index data of a cluster. Index backup is implemented by creating cluster snapshots. For backup of the first time, you are advised to back up all index data.
-CSS allows you to store the snapshot data of Elasticsearch instances to OBS, thereby achieving cross-region backup with the cross-region replication function of OBS.
-
-
-
-
High Security
CSS ensures secure running of data and services from the following aspects:
-
- Network isolation
The network is divided into two planes, service plane and management plane. The two planes are deployed and isolated physically to ensure the security of the service and management networks.
-- Service plane: refers to the network plane of the cluster. It provides service channels for users and delivers data definition, index, and search capabilities.
- Management plane: refers to the management console. It is used to manage CSS.
- VPC security groups or isolated networks ensure the security of hosts.
- - Access control
- Using the network access control list (ACL), you can permit or deny the network traffic entering and exiting the subnets.
- Internal security infrastructure (including the network firewall, intrusion detection system, and protection system) can monitor all network traffic that enters or exits the VPC through the IPsec VPN.
- User authentication and index-level authentication are supported. CSS also supports interconnection with third-party user management systems.
- - Data security
- In CSS, the multi-replica mechanism is used to ensure user data security.
- Communication between the client and server can be encrypted using SSL.
- - Operation audit
Cloud Trace Service (CTS) can be used to perform auditing on key logs and operations.
-
-
-
High Availability
To prevent data loss and minimize the cluster downtime in case of service interruption, CSS supports cross-AZ cluster deployment. When creating a cluster, you can select two or three AZs in the same region. The system will automatically allocate nodes to these AZs. If an AZ is faulty, the remaining AZs can still run properly, significantly enhancing cluster availability and improving service stability.
-
-
-
-
diff --git a/docs/css/umn/css_04_0012.html b/docs/css/umn/css_04_0012.html
deleted file mode 100644
index 8c18449e..00000000
--- a/docs/css/umn/css_04_0012.html
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-Basic Concepts
-Cluster
CSS provides functions on a per cluster basis. A cluster represents an independent search service that consists of multiple nodes.
-
-
Index
An index stores Elasticsearch data. It is a logical space in which one or more shards are grouped.
-
-
Shard
An index can potentially store a large amount of data that can exceed the hardware limits of a single node. To solve this problem, Elasticsearch provides the ability to subdivide your index into multiple pieces called shards. When you create an index, you can simply define the number of shards that you want. Each shard is in itself a fully-functional and independent "index" that can be hosted on any node in the cluster.
-
You need to specify the number of shards before creating an index and cannot change the number after the index is successfully created.
-
-
Replica
A replica is a copy of the actual storage index in a shard. It can be understood as a backup of the shard. Replicas help prevent single point of failures (SPOFs). You can increase or decrease the number of replicas based on your service requirements.
-
-
Document
An entity for Elasticsearch storage. Equivalent to the row in the RDB, the document is the basic unit that can be indexed.
-
-
Document Type
Similar to a table in the RDB, type is used to distinguish between different data.
-
In versions earlier than Elasticsearch 7.x, each index can contain multiple document types. Elasticsearch defines a type for each document.
-
Elasticsearch 7.x and later versions only support documents of the .doc type.
-
-
Mapping
A mapping is used to restrict the type of a field and can be automatically created based on data. It is similar to the schema in the database.
-
-
Field
The field is the minimum unit of a document. It is similar to the column in the database.
-
-
-
-
diff --git a/docs/css/umn/css_04_0014.html b/docs/css/umn/css_04_0014.html
deleted file mode 100644
index c99c961e..00000000
--- a/docs/css/umn/css_04_0014.html
+++ /dev/null
@@ -1,362 +0,0 @@
-
-
-Permissions Management
-If you need to assign different permissions to employees in your organization to access your CSS resources, IAM is a good choice for fine-grained permissions management. IAM provides identity authentication, permissions management, and access control.
-
If the current account has met your requirements, you do not need to create an independent IAM user for permission management. Then you can skip this section. This will not affect other functions of CSS.
-
With IAM, you can use your account to create IAM users for your employees and assign permissions to the users to control their access to your resources. IAM is free of charge. You pay only for the resources you purchase.
-
Permissions Management
New IAM users do not have any permissions assigned by default. You need to first add them to one or more groups and attach policies or roles to these groups. The users then inherit permissions from the groups and can perform specified operations on cloud services based on the permissions they have been assigned.
-
CSS is a project-level service deployed in specific physical regions. Therefore, CSS permissions are assigned to projects in specific regions and only take effect in these regions. If you want the permissions to take effect in all regions, you need to assign the permissions to projects in each region. When accessing CSS, the users need to switch to a region where they have been authorized to use cloud services.
-
You can use roles and policies to grant users permissions.
-
- Roles are a type of coarse-grained authorization mechanism that defines permissions related to user responsibilities. There are only a limited number of service-level roles for granting permissions to users. When using roles to grant permissions, you need to also assign dependency roles. Roles are not ideal for fine-grained authorization and secure access control.
- Policies are a type of fine-grained authorization mechanism that defines the permissions for performing operations on specific cloud resources under certain conditions. This mechanism allows for more flexible authorization. Policies allow you to meet requirements for more secure access control. For example, CSS administrators can only grant CSS users the permissions needed for managing a particular type of CSS resources.
-
Table 1 lists all the system-defined roles and policies supported by CSS.
-
- Elasticsearch Administrator depends on the roles of other services to execute its permissions. Therefore, if you assign the Elasticsearch Administrator role to a user, assign its dependency roles at the same time.
- CSS FullAccess and CSS ReadOnlyAccess can be used to control the resources that users can access. For example, if you want your software developers to use CSS resources but not delete them or perform any high-risk operations, you can create IAM users for these software developers and assign them only the permissions required for using CSS resources.
-
-
Table 1 CSS system permissionRole/Policy Name
- |
-Type
- |
-Role/Policy Description
- |
-Dependency
- |
-
-
-Elasticsearch Administrator
- |
-System-defined role
- |
-Full permissions for CSS.
-This role depends on the Tenant Guest and Server Administrator roles in the same project.
- |
-- Tenant Guest: A global role, which must be assigned in the global project.
- Server Administrator: A project-level role, which must be assigned in the same project
- |
-
-CSS FullAccess
- |
-System-defined policy
- |
-Full CSS permissions granted through policies. Users with these permissions can perform all operations on CSS.
-Some functions depend on corresponding permissions. To use certain functions, you need to enable the dependent permissions in the same project.
- |
-The VPCEndpoint Administrator system role is required for accessing a cluster through a VPC endpoint.
-Some operations depend on the following permissions:
-- Automatically create an agency:
iam:agencies:createAgency
- - View the agency list:
iam:agencies:listAgencies
-iam:permissions:listRolesForAgencyOnDomain
-iam:permissions:listRolesForAgencyOnProject
-iam:permissions:listRolesForAgency
- - Display enterprise projects and predefined tags on the console:
eps:enterpriseProjects:list
-tms:predefineTags:list
- - Use the snapshot, word dictionary, and log management functions:
obs:bucket:Get*
-obs:bucket:List*
-obs:object:List*
-obs:object:Get*
-obs:bucket:HeadBucket
-obs:object:PutObject
-obs:object:DeleteObject
-
- |
-
-CSS ReadOnlyAccess
- |
-System-defined policy
- |
-Read-only permissions for CSS. Users with these permissions can only view CSS data.
-Some functions depend on corresponding permissions. To use certain functions, you need to enable the dependent permissions in global services.
- |
-Some operations depend on the following permissions:
-- View the agency list:
iam:agencies:listAgencies
-iam:permissions:listRolesForAgencyOnDomain
-iam:permissions:listRolesForAgencyOnProject
-iam:permissions:listRolesForAgency
- - Display enterprise projects and predefined tags on the console:
eps:enterpriseProjects:list
-tms:predefineTags:list
- - Use the snapshot, word dictionary, and log management functions:
obs:bucket:Get*
-obs:bucket:List*
-obs:object:List*
-obs:object:Get*
-obs:bucket:HeadBucket
-
- |
-
-
-
-
-
Table 2 lists the common operations supported by each system permission of CSS. Please choose proper system permissions according to this table.
-
-
Table 2 Common operations supported by each system-defined policyOperation
- |
-CSS FullAccess
- |
-CSS ReadOnlyAccess
- |
-Elasticsearch Administrator
- |
-Remarks
- |
-
-
-Creating a cluster
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-Querying a cluster list
- |
-√
- |
-√
- |
-√
- |
--
- |
-
-Querying cluster details
- |
-√
- |
-√
- |
-√
- |
--
- |
-
-Deleting a cluster
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-Restarting a cluster
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-Expanding cluster capacity
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-Adding instances and expanding instance storage capacity
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-Querying tags of a specified cluster
- |
-√
- |
-√
- |
-√
- |
--
- |
-
-Querying all tags
- |
-√
- |
-√
- |
-√
- |
--
- |
-
-Loading a custom word dictionary
- |
-√
- |
-x
- |
-√
- |
-Depends on OBS and IAM permissions
- |
-
-Querying the status of a custom word dictionary
- |
-√
- |
-√
- |
-√
- |
--
- |
-
-Deleting a custom word dictionary
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-Automatically setting basic configurations of a cluster snapshot
- |
-√
- |
-x
- |
-√
- |
-Depends on OBS and IAM permissions
- |
-
-Modifying basic configurations of a cluster snapshot
- |
-√
- |
-x
- |
-√
- |
-Depends on OBS and IAM permissions
- |
-
-Setting the automatic snapshot creation policy
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-Querying the automatic snapshot creation policy
- |
-√
- |
-√
- |
-√
- |
--
- |
-
-Manually creating a snapshot
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-Querying the snapshot list
- |
-√
- |
-√
- |
-√
- |
--
- |
-
-Restoring a snapshot
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-Deleting a snapshot
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-Disabling the snapshot function
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-Modifying specifications
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-Scaling in clusters
- |
-√
- |
-x
- |
-√
- |
--
- |
-
-
-
-
-
-
-
-
diff --git a/docs/css/umn/css_04_0019.html b/docs/css/umn/css_04_0019.html
deleted file mode 100644
index 2db27c4c..00000000
--- a/docs/css/umn/css_04_0019.html
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-Quotas
-CSS uses the following resources:
-
- Instance
- CPU
- Memory (GB)
- Disk quantity
- Disk size (GB)
-
-
-
diff --git a/docs/css/umn/en-us_formulaimage_0000001667002558.png b/docs/css/umn/en-us_formulaimage_0000001575321958.png
similarity index 100%
rename from docs/css/umn/en-us_formulaimage_0000001667002558.png
rename to docs/css/umn/en-us_formulaimage_0000001575321958.png
diff --git a/docs/css/umn/en-us_formulaimage_0000001714802349.png b/docs/css/umn/en-us_formulaimage_0000001575640870.png
similarity index 100%
rename from docs/css/umn/en-us_formulaimage_0000001714802349.png
rename to docs/css/umn/en-us_formulaimage_0000001575640870.png
diff --git a/docs/css/umn/en-us_formulaimage_0000001666842858.png b/docs/css/umn/en-us_formulaimage_0000001625680497.png
similarity index 100%
rename from docs/css/umn/en-us_formulaimage_0000001666842858.png
rename to docs/css/umn/en-us_formulaimage_0000001625680497.png
diff --git a/docs/css/umn/en-us_formulaimage_0000001714802345.png b/docs/css/umn/en-us_formulaimage_0000001626000845.png
similarity index 100%
rename from docs/css/umn/en-us_formulaimage_0000001714802345.png
rename to docs/css/umn/en-us_formulaimage_0000001626000845.png
diff --git a/docs/css/umn/en-us_image_0000001667002358.png b/docs/css/umn/en-us_image_0000001223434560.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002358.png
rename to docs/css/umn/en-us_image_0000001223434560.png
diff --git a/docs/css/umn/en-us_image_0000001666842622.png b/docs/css/umn/en-us_image_0000001223594508.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842622.png
rename to docs/css/umn/en-us_image_0000001223594508.png
diff --git a/docs/css/umn/en-us_image_0000001519064934.png b/docs/css/umn/en-us_image_0000001519064934.png
new file mode 100644
index 00000000..68651e9f
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001519064934.png differ
diff --git a/docs/css/umn/en-us_image_0000001519225242.png b/docs/css/umn/en-us_image_0000001519225242.png
new file mode 100644
index 00000000..42dcd9b9
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001519225242.png differ
diff --git a/docs/css/umn/en-us_image_0000001519541498.png b/docs/css/umn/en-us_image_0000001519541498.png
new file mode 100644
index 00000000..1f2bec4b
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001519541498.png differ
diff --git a/docs/css/umn/en-us_image_0000001569480321.png b/docs/css/umn/en-us_image_0000001569480321.png
new file mode 100644
index 00000000..1a5abf74
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001569480321.png differ
diff --git a/docs/css/umn/en-us_image_0000001569656909.png b/docs/css/umn/en-us_image_0000001569656909.png
new file mode 100644
index 00000000..07a34ea4
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001569656909.png differ
diff --git a/docs/css/umn/en-us_image_0000001667002486.png b/docs/css/umn/en-us_image_0000001575311218.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002486.png
rename to docs/css/umn/en-us_image_0000001575311218.png
diff --git a/docs/css/umn/en-us_image_0000001714802265.png b/docs/css/umn/en-us_image_0000001575312654.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802265.png
rename to docs/css/umn/en-us_image_0000001575312654.png
diff --git a/docs/css/umn/en-us_image_0000001666842594.png b/docs/css/umn/en-us_image_0000001575313618.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842594.png
rename to docs/css/umn/en-us_image_0000001575313618.png
diff --git a/docs/css/umn/en-us_image_0000001666842674.png b/docs/css/umn/en-us_image_0000001575316322.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842674.png
rename to docs/css/umn/en-us_image_0000001575316322.png
diff --git a/docs/css/umn/en-us_image_0000001667002462.png b/docs/css/umn/en-us_image_0000001575319174.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002462.png
rename to docs/css/umn/en-us_image_0000001575319174.png
diff --git a/docs/css/umn/en-us_image_0000001667002546.png b/docs/css/umn/en-us_image_0000001575470374.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002546.png
rename to docs/css/umn/en-us_image_0000001575470374.png
diff --git a/docs/css/umn/en-us_image_0000001667002502.png b/docs/css/umn/en-us_image_0000001575471322.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002502.png
rename to docs/css/umn/en-us_image_0000001575471322.png
diff --git a/docs/css/umn/en-us_image_0000001666842766.png b/docs/css/umn/en-us_image_0000001575471938.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842766.png
rename to docs/css/umn/en-us_image_0000001575471938.png
diff --git a/docs/css/umn/en-us_image_0000001667002610.png b/docs/css/umn/en-us_image_0000001575475526.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002610.png
rename to docs/css/umn/en-us_image_0000001575475526.png
diff --git a/docs/css/umn/en-us_image_0000001666842746.png b/docs/css/umn/en-us_image_0000001575478486.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842746.png
rename to docs/css/umn/en-us_image_0000001575478486.png
diff --git a/docs/css/umn/en-us_image_0000001714922197.png b/docs/css/umn/en-us_image_0000001575482950.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922197.png
rename to docs/css/umn/en-us_image_0000001575482950.png
diff --git a/docs/css/umn/en-us_image_0000001714922093.png b/docs/css/umn/en-us_image_0000001575631554.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922093.png
rename to docs/css/umn/en-us_image_0000001575631554.png
diff --git a/docs/css/umn/en-us_image_0000001714921949.png b/docs/css/umn/en-us_image_0000001575631754.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714921949.png
rename to docs/css/umn/en-us_image_0000001575631754.png
diff --git a/docs/css/umn/en-us_image_0000001666842898.png b/docs/css/umn/en-us_image_0000001575631898.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842898.png
rename to docs/css/umn/en-us_image_0000001575631898.png
diff --git a/docs/css/umn/en-us_image_0000001666842670.png b/docs/css/umn/en-us_image_0000001575631966.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842670.png
rename to docs/css/umn/en-us_image_0000001575631966.png
diff --git a/docs/css/umn/en-us_image_0000001714921933.png b/docs/css/umn/en-us_image_0000001575632538.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714921933.png
rename to docs/css/umn/en-us_image_0000001575632538.png
diff --git a/docs/css/umn/en-us_image_0000001667002402.png b/docs/css/umn/en-us_image_0000001575635254.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002402.png
rename to docs/css/umn/en-us_image_0000001575635254.png
diff --git a/docs/css/umn/en-us_image_0000001666842726.png b/docs/css/umn/en-us_image_0000001575635862.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842726.png
rename to docs/css/umn/en-us_image_0000001575635862.png
diff --git a/docs/css/umn/en-us_image_0000001666842902.jpg b/docs/css/umn/en-us_image_0000001575637658.jpg
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842902.jpg
rename to docs/css/umn/en-us_image_0000001575637658.jpg
diff --git a/docs/css/umn/en-us_image_0000001714802245.png b/docs/css/umn/en-us_image_0000001575638082.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802245.png
rename to docs/css/umn/en-us_image_0000001575638082.png
diff --git a/docs/css/umn/en-us_image_0000001666842866.png b/docs/css/umn/en-us_image_0000001575642526.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842866.png
rename to docs/css/umn/en-us_image_0000001575642526.png
diff --git a/docs/css/umn/en-us_image_0000001714802117.png b/docs/css/umn/en-us_image_0000001575791646.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802117.png
rename to docs/css/umn/en-us_image_0000001575791646.png
diff --git a/docs/css/umn/en-us_image_0000001667002574.png b/docs/css/umn/en-us_image_0000001575802422.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002574.png
rename to docs/css/umn/en-us_image_0000001575802422.png
diff --git a/docs/css/umn/en-us_image_0000001667002434.png b/docs/css/umn/en-us_image_0000001575802426.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002434.png
rename to docs/css/umn/en-us_image_0000001575802426.png
diff --git a/docs/css/umn/en-us_image_0000001714922229.png b/docs/css/umn/en-us_image_0000001578525026.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922229.png
rename to docs/css/umn/en-us_image_0000001578525026.png
diff --git a/docs/css/umn/en-us_image_0000001667002374.png b/docs/css/umn/en-us_image_0000001578844214.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002374.png
rename to docs/css/umn/en-us_image_0000001578844214.png
diff --git a/docs/css/umn/en-us_image_0000001666842614.png b/docs/css/umn/en-us_image_0000001579004138.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842614.png
rename to docs/css/umn/en-us_image_0000001579004138.png
diff --git a/docs/css/umn/en-us_image_0000001714922185.png b/docs/css/umn/en-us_image_0000001583151098.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922185.png
rename to docs/css/umn/en-us_image_0000001583151098.png
diff --git a/docs/css/umn/en-us_image_0000001714922189.png b/docs/css/umn/en-us_image_0000001583151102.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922189.png
rename to docs/css/umn/en-us_image_0000001583151102.png
diff --git a/docs/css/umn/en-us_image_0000001714802137.png b/docs/css/umn/en-us_image_0000001583310486.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802137.png
rename to docs/css/umn/en-us_image_0000001583310486.png
diff --git a/docs/css/umn/en-us_image_0000001714802393.png b/docs/css/umn/en-us_image_0000001583470414.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802393.png
rename to docs/css/umn/en-us_image_0000001583470414.png
diff --git a/docs/css/umn/en-us_image_0000001714922221.png b/docs/css/umn/en-us_image_0000001583470442.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922221.png
rename to docs/css/umn/en-us_image_0000001583470442.png
diff --git a/docs/css/umn/en-us_image_0000001714802389.png b/docs/css/umn/en-us_image_0000001606771374.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802389.png
rename to docs/css/umn/en-us_image_0000001606771374.png
diff --git a/docs/css/umn/en-us_image_0000001667002382.png b/docs/css/umn/en-us_image_0000001606924426.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002382.png
rename to docs/css/umn/en-us_image_0000001606924426.png
diff --git a/docs/css/umn/en-us_image_0000001606925014.png b/docs/css/umn/en-us_image_0000001606925014.png
new file mode 100644
index 00000000..56bd7575
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001606925014.png differ
diff --git a/docs/css/umn/en-us_image_0000001606925022.png b/docs/css/umn/en-us_image_0000001606925022.png
new file mode 100644
index 00000000..e2186310
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001606925022.png differ
diff --git a/docs/css/umn/en-us_image_0000001666842702.png b/docs/css/umn/en-us_image_0000001607164138.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842702.png
rename to docs/css/umn/en-us_image_0000001607164138.png
diff --git a/docs/css/umn/en-us_image_0000001714802405.png b/docs/css/umn/en-us_image_0000001607164742.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802405.png
rename to docs/css/umn/en-us_image_0000001607164742.png
diff --git a/docs/css/umn/en-us_image_0000001607405894.png b/docs/css/umn/en-us_image_0000001607405894.png
new file mode 100644
index 00000000..d5648897
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001607405894.png differ
diff --git a/docs/css/umn/en-us_image_0000001607447036.png b/docs/css/umn/en-us_image_0000001607447036.png
new file mode 100644
index 00000000..ea9d0cc7
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001607447036.png differ
diff --git a/docs/css/umn/en-us_image_0000001607591784.png b/docs/css/umn/en-us_image_0000001607591784.png
new file mode 100644
index 00000000..a523992f
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001607591784.png differ
diff --git a/docs/css/umn/en-us_image_0000001607766004.png b/docs/css/umn/en-us_image_0000001607766004.png
new file mode 100644
index 00000000..36cbf08c
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001607766004.png differ
diff --git a/docs/css/umn/en-us_image_0000001607890430.png b/docs/css/umn/en-us_image_0000001607890430.png
new file mode 100644
index 00000000..c5b18177
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001607890430.png differ
diff --git a/docs/css/umn/en-us_image_0000001607915032.png b/docs/css/umn/en-us_image_0000001607915032.png
new file mode 100644
index 00000000..62bd806b
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001607915032.png differ
diff --git a/docs/css/umn/en-us_image_0000001607933650.png b/docs/css/umn/en-us_image_0000001607933650.png
new file mode 100644
index 00000000..a2abd35e
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001607933650.png differ
diff --git a/docs/css/umn/en-us_image_0000001607935630.png b/docs/css/umn/en-us_image_0000001607935630.png
new file mode 100644
index 00000000..61a00481
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001607935630.png differ
diff --git a/docs/css/umn/en-us_image_0000001666842834.png b/docs/css/umn/en-us_image_0000001625669589.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842834.png
rename to docs/css/umn/en-us_image_0000001625669589.png
diff --git a/docs/css/umn/en-us_image_0000001714802113.png b/docs/css/umn/en-us_image_0000001625671361.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802113.png
rename to docs/css/umn/en-us_image_0000001625671361.png
diff --git a/docs/css/umn/en-us_image_0000001625673045.png b/docs/css/umn/en-us_image_0000001625673045.png
new file mode 100644
index 00000000..23d55f96
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001625673045.png differ
diff --git a/docs/css/umn/en-us_image_0000001666842890.png b/docs/css/umn/en-us_image_0000001625674753.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842890.png
rename to docs/css/umn/en-us_image_0000001625674753.png
diff --git a/docs/css/umn/en-us_image_0000001714922113.png b/docs/css/umn/en-us_image_0000001625790509.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922113.png
rename to docs/css/umn/en-us_image_0000001625790509.png
diff --git a/docs/css/umn/en-us_image_0000001714922157.png b/docs/css/umn/en-us_image_0000001625791329.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922157.png
rename to docs/css/umn/en-us_image_0000001625791329.png
diff --git a/docs/css/umn/en-us_image_0000001625794717.png b/docs/css/umn/en-us_image_0000001625794717.png
new file mode 100644
index 00000000..f78b9bf7
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001625794717.png differ
diff --git a/docs/css/umn/en-us_image_0000001714802241.png b/docs/css/umn/en-us_image_0000001625797681.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802241.png
rename to docs/css/umn/en-us_image_0000001625797681.png
diff --git a/docs/css/umn/en-us_image_0000001714802369.png b/docs/css/umn/en-us_image_0000001625802121.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802369.png
rename to docs/css/umn/en-us_image_0000001625802121.png
diff --git a/docs/css/umn/en-us_image_0000001666842786.png b/docs/css/umn/en-us_image_0000001625870985.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842786.png
rename to docs/css/umn/en-us_image_0000001625870985.png
diff --git a/docs/css/umn/en-us_image_0000001667002482.png b/docs/css/umn/en-us_image_0000001625871637.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002482.png
rename to docs/css/umn/en-us_image_0000001625871637.png
diff --git a/docs/css/umn/en-us_image_0000001714922161.png b/docs/css/umn/en-us_image_0000001625875305.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922161.png
rename to docs/css/umn/en-us_image_0000001625875305.png
diff --git a/docs/css/umn/en-us_image_0000001714802249.png b/docs/css/umn/en-us_image_0000001625878165.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802249.png
rename to docs/css/umn/en-us_image_0000001625878165.png
diff --git a/docs/css/umn/en-us_image_0000001714802261.png b/docs/css/umn/en-us_image_0000001625991489.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802261.png
rename to docs/css/umn/en-us_image_0000001625991489.png
diff --git a/docs/css/umn/en-us_image_0000001714922097.png b/docs/css/umn/en-us_image_0000001625991493.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922097.png
rename to docs/css/umn/en-us_image_0000001625991493.png
diff --git a/docs/css/umn/en-us_image_0000001667002614.png b/docs/css/umn/en-us_image_0000001625991693.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002614.png
rename to docs/css/umn/en-us_image_0000001625991693.png
diff --git a/docs/css/umn/en-us_image_0000001714802089.png b/docs/css/umn/en-us_image_0000001625992469.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802089.png
rename to docs/css/umn/en-us_image_0000001625992469.png
diff --git a/docs/css/umn/en-us_image_0000001666842894.png b/docs/css/umn/en-us_image_0000001625995085.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842894.png
rename to docs/css/umn/en-us_image_0000001625995085.png
diff --git a/docs/css/umn/en-us_image_0000001666842750.png b/docs/css/umn/en-us_image_0000001625998045.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842750.png
rename to docs/css/umn/en-us_image_0000001625998045.png
diff --git a/docs/css/umn/en-us_image_0000001714922225.png b/docs/css/umn/en-us_image_0000001628524809.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922225.png
rename to docs/css/umn/en-us_image_0000001628524809.png
diff --git a/docs/css/umn/en-us_image_0000001714922233.png b/docs/css/umn/en-us_image_0000001633030485.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922233.png
rename to docs/css/umn/en-us_image_0000001633030485.png
diff --git a/docs/css/umn/en-us_image_0000001633111029.png b/docs/css/umn/en-us_image_0000001633111029.png
new file mode 100644
index 00000000..1c730028
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001633111029.png differ
diff --git a/docs/css/umn/en-us_image_0000001666842850.png b/docs/css/umn/en-us_image_0000001633311021.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842850.png
rename to docs/css/umn/en-us_image_0000001633311021.png
diff --git a/docs/css/umn/en-us_image_0000001633709689.png b/docs/css/umn/en-us_image_0000001633709689.png
new file mode 100644
index 00000000..1075b5af
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001633709689.png differ
diff --git a/docs/css/umn/en-us_image_0000001634029125.png b/docs/css/umn/en-us_image_0000001634029125.png
new file mode 100644
index 00000000..9fb55b6d
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001634029125.png differ
diff --git a/docs/css/umn/en-us_image_0000001634149053.png b/docs/css/umn/en-us_image_0000001634149053.png
new file mode 100644
index 00000000..fb1e28ac
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001634149053.png differ
diff --git a/docs/css/umn/en-us_image_0000001634149089.png b/docs/css/umn/en-us_image_0000001634149089.png
new file mode 100644
index 00000000..35ad1a9e
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001634149089.png differ
diff --git a/docs/css/umn/en-us_image_0000001637133729.png b/docs/css/umn/en-us_image_0000001637133729.png
new file mode 100644
index 00000000..35ad1a9e
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001637133729.png differ
diff --git a/docs/css/umn/en-us_image_0000001714921993.png b/docs/css/umn/en-us_image_0000001641018664.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714921993.png
rename to docs/css/umn/en-us_image_0000001641018664.png
diff --git a/docs/css/umn/en-us_image_0000001655964997.png b/docs/css/umn/en-us_image_0000001655964997.png
new file mode 100644
index 00000000..aa77ecc9
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001655964997.png differ
diff --git a/docs/css/umn/en-us_image_0000001656084945.png b/docs/css/umn/en-us_image_0000001656084945.png
new file mode 100644
index 00000000..e48d2711
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001656084945.png differ
diff --git a/docs/css/umn/en-us_image_0000001656204689.png b/docs/css/umn/en-us_image_0000001656204689.png
new file mode 100644
index 00000000..9fb55b6d
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001656204689.png differ
diff --git a/docs/css/umn/en-us_image_0000001656284657.png b/docs/css/umn/en-us_image_0000001656284657.png
new file mode 100644
index 00000000..9fb55b6d
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001656284657.png differ
diff --git a/docs/css/umn/en-us_image_0000001656284673.png b/docs/css/umn/en-us_image_0000001656284673.png
new file mode 100644
index 00000000..8ea4540f
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001656284673.png differ
diff --git a/docs/css/umn/en-us_image_0000001656290797.png b/docs/css/umn/en-us_image_0000001656290797.png
new file mode 100644
index 00000000..f392edbf
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001656290797.png differ
diff --git a/docs/css/umn/en-us_image_0000001656848929.png b/docs/css/umn/en-us_image_0000001656848929.png
new file mode 100644
index 00000000..8fa5923b
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001656848929.png differ
diff --git a/docs/css/umn/en-us_image_0000001656851577.png b/docs/css/umn/en-us_image_0000001656851577.png
new file mode 100644
index 00000000..1079bf68
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001656851577.png differ
diff --git a/docs/css/umn/en-us_image_0000001714922041.png b/docs/css/umn/en-us_image_0000001656902246.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922041.png
rename to docs/css/umn/en-us_image_0000001656902246.png
diff --git a/docs/css/umn/en-us_image_0000001667002438.png b/docs/css/umn/en-us_image_0000001656902250.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002438.png
rename to docs/css/umn/en-us_image_0000001656902250.png
diff --git a/docs/css/umn/en-us_image_0000001714802217.png b/docs/css/umn/en-us_image_0000001656902254.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802217.png
rename to docs/css/umn/en-us_image_0000001656902254.png
diff --git a/docs/css/umn/en-us_image_0000001656931909.png b/docs/css/umn/en-us_image_0000001656931909.png
new file mode 100644
index 00000000..8dba5865
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001656931909.png differ
diff --git a/docs/css/umn/en-us_image_0000001667002442.png b/docs/css/umn/en-us_image_0000001657061586.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002442.png
rename to docs/css/umn/en-us_image_0000001657061586.png
diff --git a/docs/css/umn/en-us_image_0000001667002454.png b/docs/css/umn/en-us_image_0000001657061590.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001667002454.png
rename to docs/css/umn/en-us_image_0000001657061590.png
diff --git a/docs/css/umn/en-us_image_0000001714922065.png b/docs/css/umn/en-us_image_0000001657061594.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922065.png
rename to docs/css/umn/en-us_image_0000001657061594.png
diff --git a/docs/css/umn/en-us_image_0000001657091157.png b/docs/css/umn/en-us_image_0000001657091157.png
new file mode 100644
index 00000000..dfb4ad73
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001657091157.png differ
diff --git a/docs/css/umn/en-us_image_0000001657091853.png b/docs/css/umn/en-us_image_0000001657091853.png
new file mode 100644
index 00000000..62bd806b
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001657091853.png differ
diff --git a/docs/css/umn/en-us_image_0000001657221737.png b/docs/css/umn/en-us_image_0000001657221737.png
new file mode 100644
index 00000000..aa4bc783
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001657221737.png differ
diff --git a/docs/css/umn/en-us_image_0000001657244165.png b/docs/css/umn/en-us_image_0000001657244165.png
new file mode 100644
index 00000000..d69f486c
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001657244165.png differ
diff --git a/docs/css/umn/en-us_image_0000001657249665.png b/docs/css/umn/en-us_image_0000001657249665.png
new file mode 100644
index 00000000..75f95f92
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001657249665.png differ
diff --git a/docs/css/umn/en-us_image_0000001666842626.png b/docs/css/umn/en-us_image_0000001666842626.png
deleted file mode 100644
index 802ffdd2..00000000
Binary files a/docs/css/umn/en-us_image_0000001666842626.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001666842658.png b/docs/css/umn/en-us_image_0000001666842658.png
deleted file mode 100644
index 799b6098..00000000
Binary files a/docs/css/umn/en-us_image_0000001666842658.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001666842734.png b/docs/css/umn/en-us_image_0000001666842734.png
deleted file mode 100644
index 8f892b0b..00000000
Binary files a/docs/css/umn/en-us_image_0000001666842734.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001667002386.png b/docs/css/umn/en-us_image_0000001667002386.png
deleted file mode 100644
index 3fa11985..00000000
Binary files a/docs/css/umn/en-us_image_0000001667002386.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001667002450.png b/docs/css/umn/en-us_image_0000001667002450.png
deleted file mode 100644
index 2ebdb21e..00000000
Binary files a/docs/css/umn/en-us_image_0000001667002450.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001696678850.png b/docs/css/umn/en-us_image_0000001696678850.png
new file mode 100644
index 00000000..6b11888b
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001696678850.png differ
diff --git a/docs/css/umn/en-us_image_0000001696838310.png b/docs/css/umn/en-us_image_0000001696838310.png
new file mode 100644
index 00000000..582b1618
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001696838310.png differ
diff --git a/docs/css/umn/en-us_image_0000001696838318.png b/docs/css/umn/en-us_image_0000001696838318.png
new file mode 100644
index 00000000..e4fcbfa9
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001696838318.png differ
diff --git a/docs/css/umn/en-us_image_0000001714802205.png b/docs/css/umn/en-us_image_0000001705061713.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802205.png
rename to docs/css/umn/en-us_image_0000001705061713.png
diff --git a/docs/css/umn/en-us_image_0000001666842730.png b/docs/css/umn/en-us_image_0000001705061717.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842730.png
rename to docs/css/umn/en-us_image_0000001705061717.png
diff --git a/docs/css/umn/en-us_image_0000001714802229.png b/docs/css/umn/en-us_image_0000001705061721.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802229.png
rename to docs/css/umn/en-us_image_0000001705061721.png
diff --git a/docs/css/umn/en-us_image_0000001714922057.png b/docs/css/umn/en-us_image_0000001705220953.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714922057.png
rename to docs/css/umn/en-us_image_0000001705220953.png
diff --git a/docs/css/umn/en-us_image_0000001666842722.png b/docs/css/umn/en-us_image_0000001705220957.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842722.png
rename to docs/css/umn/en-us_image_0000001705220957.png
diff --git a/docs/css/umn/en-us_image_0000001666842710.png b/docs/css/umn/en-us_image_0000001705227645.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001666842710.png
rename to docs/css/umn/en-us_image_0000001705227645.png
diff --git a/docs/css/umn/en-us_image_0000001714802213.png b/docs/css/umn/en-us_image_0000001705958261.png
similarity index 100%
rename from docs/css/umn/en-us_image_0000001714802213.png
rename to docs/css/umn/en-us_image_0000001705958261.png
diff --git a/docs/css/umn/en-us_image_0000001714802085.png b/docs/css/umn/en-us_image_0000001714802085.png
deleted file mode 100644
index cb722fe7..00000000
Binary files a/docs/css/umn/en-us_image_0000001714802085.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001714802105.png b/docs/css/umn/en-us_image_0000001714802105.png
deleted file mode 100644
index da72c4d6..00000000
Binary files a/docs/css/umn/en-us_image_0000001714802105.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001714802109.png b/docs/css/umn/en-us_image_0000001714802109.png
deleted file mode 100644
index f2a02c8d..00000000
Binary files a/docs/css/umn/en-us_image_0000001714802109.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001714802149.png b/docs/css/umn/en-us_image_0000001714802149.png
deleted file mode 100644
index b8f00cb2..00000000
Binary files a/docs/css/umn/en-us_image_0000001714802149.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001714802201.png b/docs/css/umn/en-us_image_0000001714802201.png
deleted file mode 100644
index 24188e7a..00000000
Binary files a/docs/css/umn/en-us_image_0000001714802201.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001714802297.png b/docs/css/umn/en-us_image_0000001714802297.png
deleted file mode 100644
index 01638645..00000000
Binary files a/docs/css/umn/en-us_image_0000001714802297.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001714802397.png b/docs/css/umn/en-us_image_0000001714802397.png
deleted file mode 100644
index fd2acf53..00000000
Binary files a/docs/css/umn/en-us_image_0000001714802397.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001714921929.png b/docs/css/umn/en-us_image_0000001714921929.png
deleted file mode 100644
index 3af09777..00000000
Binary files a/docs/css/umn/en-us_image_0000001714921929.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001714921945.png b/docs/css/umn/en-us_image_0000001714921945.png
deleted file mode 100644
index 5400e67b..00000000
Binary files a/docs/css/umn/en-us_image_0000001714921945.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001714922005.png b/docs/css/umn/en-us_image_0000001714922005.png
deleted file mode 100644
index 33445a92..00000000
Binary files a/docs/css/umn/en-us_image_0000001714922005.png and /dev/null differ
diff --git a/docs/css/umn/en-us_image_0000001744598325.png b/docs/css/umn/en-us_image_0000001744598325.png
new file mode 100644
index 00000000..3e15dbf9
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001744598325.png differ
diff --git a/docs/css/umn/en-us_image_0000001744678489.jpg b/docs/css/umn/en-us_image_0000001744678489.jpg
new file mode 100644
index 00000000..fac6892b
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001744678489.jpg differ
diff --git a/docs/css/umn/en-us_image_0000001758618249.png b/docs/css/umn/en-us_image_0000001758618249.png
new file mode 100644
index 00000000..0d21733f
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001758618249.png differ
diff --git a/docs/css/umn/en-us_image_0000001768547780.png b/docs/css/umn/en-us_image_0000001768547780.png
new file mode 100644
index 00000000..68651e9f
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001768547780.png differ
diff --git a/docs/css/umn/en-us_image_0000001771562782.png b/docs/css/umn/en-us_image_0000001771562782.png
new file mode 100644
index 00000000..6fcea43c
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001771562782.png differ
diff --git a/docs/css/umn/en-us_image_0000001815267817.png b/docs/css/umn/en-us_image_0000001815267817.png
new file mode 100644
index 00000000..1a5abf74
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001815267817.png differ
diff --git a/docs/css/umn/en-us_image_0000001815267821.png b/docs/css/umn/en-us_image_0000001815267821.png
new file mode 100644
index 00000000..42dcd9b9
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001815267821.png differ
diff --git a/docs/css/umn/en-us_image_0000001818277097.png b/docs/css/umn/en-us_image_0000001818277097.png
new file mode 100644
index 00000000..6fa89034
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001818277097.png differ
diff --git a/docs/css/umn/en-us_image_0000001823414260.png b/docs/css/umn/en-us_image_0000001823414260.png
new file mode 100644
index 00000000..8b68ca90
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001823414260.png differ
diff --git a/docs/css/umn/en-us_image_0000001823619658.png b/docs/css/umn/en-us_image_0000001823619658.png
new file mode 100644
index 00000000..2c0e523b
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001823619658.png differ
diff --git a/docs/css/umn/en-us_image_0000001823780106.png b/docs/css/umn/en-us_image_0000001823780106.png
new file mode 100644
index 00000000..ad260324
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001823780106.png differ
diff --git a/docs/css/umn/en-us_image_0000001824400680.png b/docs/css/umn/en-us_image_0000001824400680.png
new file mode 100644
index 00000000..5a5418b2
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001824400680.png differ
diff --git a/docs/css/umn/en-us_image_0000001824562672.png b/docs/css/umn/en-us_image_0000001824562672.png
new file mode 100644
index 00000000..4589d008
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001824562672.png differ
diff --git a/docs/css/umn/en-us_image_0000001870298901.png b/docs/css/umn/en-us_image_0000001870298901.png
new file mode 100644
index 00000000..8b68ca90
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001870298901.png differ
diff --git a/docs/css/umn/en-us_image_0000001870333105.png b/docs/css/umn/en-us_image_0000001870333105.png
new file mode 100644
index 00000000..a4cf1d41
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001870333105.png differ
diff --git a/docs/css/umn/en-us_image_0000001870378393.png b/docs/css/umn/en-us_image_0000001870378393.png
new file mode 100644
index 00000000..fe0eb6b8
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001870378393.png differ
diff --git a/docs/css/umn/en-us_image_0000001871319057.png b/docs/css/umn/en-us_image_0000001871319057.png
new file mode 100644
index 00000000..18765b94
Binary files /dev/null and b/docs/css/umn/en-us_image_0000001871319057.png differ
diff --git a/docs/css/umn/en-us_topic_0000001476817894.html b/docs/css/umn/en-us_topic_0000001476817894.html
new file mode 100644
index 00000000..e4ad9a18
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001476817894.html
@@ -0,0 +1,12 @@
+
+
+How Do I Obtain the Security Certificate of CSS?
+The security certificate (CloudSearchService.cer) can be downloaded only for security clusters that have enabled HTTPS access.
+
- Log in to the CSS management console.
- In the navigation pane, choose Clusters. The cluster list is displayed.
- Click the name of a cluster to go to the cluster details page.
- On the Configuration page, click Download Certificate next to Security Mode.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001476817902.html b/docs/css/umn/en-us_topic_0000001476817902.html
new file mode 100644
index 00000000..fb758719
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001476817902.html
@@ -0,0 +1,22 @@
+
+
+Clusters
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001476817906.html b/docs/css/umn/en-us_topic_0000001476817906.html
new file mode 100644
index 00000000..1a5006cb
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001476817906.html
@@ -0,0 +1,16 @@
+
+
+How Do I Convert the Format of a CER Security Certificate?
+The security certificate (CloudSearchService.cer) can be downloaded only for security clusters that have enabled HTTPS access. Most software supports certificates in the .pem or .jks format. You need to convert the format of the CSS security certificate.
+
+
In the preceding commands, newname indicates the user-defined certificate name.
+
After the command is executed, set the certificate password and confirm the password as prompted. Securely store the password. It will be used for accessing the cluster.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001476817910.html b/docs/css/umn/en-us_topic_0000001476817910.html
new file mode 100644
index 00000000..7a7a0f54
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001476817910.html
@@ -0,0 +1,20 @@
+
+
+How Do I Set the search.max_buckets Parameter for an ES Cluster?
+Function
If the query results on shards exceed the upper limit of records that can be returned (default value: 10000), you need to increase the limit by changing the value of search.max_buckets.
+
+
Solution
Run the following command on the
Dev Tools page of Kibana:
PUT _cluster/settings
+{
+ "persistent": {
+ "search.max_buckets": 20000
+ }
+}
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001476817914.html b/docs/css/umn/en-us_topic_0000001476817914.html
new file mode 100644
index 00000000..f9599ab4
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001476817914.html
@@ -0,0 +1,22 @@
+
+
+Can I Restore a Deleted Cluster?
+Yes. You can use a snapshot stored in OBS to restore a cluster. A deleted cluster that has no snapshots in OBS cannot be restored. Exercise caution when deleting a cluster.
+
To restore a deleted cluster, perform the following steps:
+
- Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters. On the displayed Clusters page, click Create Cluster in the upper right corner to create a cluster and enable the snapshot function. Set the OBS bucket and backup path to those of the cluster to be restored.
To restore a deleted cluster to an existing cluster, set the OBS bucket and backup path to those of the deleted cluster.
+
To restore a deleted cluster to a new cluster, ensure they are in the same region. The new cluster version must be the same as or later than that of the deleted cluster. The number of nodes in the new cluster must be greater than half of that in the deleted cluster. Otherwise, the cluster may fail to be restored.
+
+ - If the status of the new cluster changes to Available, click the cluster name to go to the Cluster Information page.
- In the navigation pane on the left, choose Cluster Snapshots.
In the snapshot management list, you can view the snapshot information of the deleted cluster. If no information is displayed, wait for several minutes and refresh the page.
+ - Locate the target snapshot and click Restore in the Operation column. The Restore page is displayed.
- On the Restore page, set restoration parameters.
Index: Enter the name of the index you want to restore. If you do not specify any index name, data of all indexes will be restored. The value can contain 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?) are not allowed.
+Rename Pattern: Enter a regular expression. Indexes that match the regular expression are restored. The default value index_(.+) indicates restoring data of all indices. The value contains 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?,) are not allowed.
+Rename Replacement: Enter the index renaming rule. The default value restored_index_$1 indicates that restored_ is added in front of the names of all restored indexes. The value can contain 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?,) are not allowed. You can set Rename Replacement only if you have specified Rename Pattern.
+Cluster: Select the cluster that you want to restore. You can select the current cluster or others. However, you can only restore the snapshot to clusters whose status is Available. If the status of the current cluster is Unavailable, you cannot restore the snapshot to the current cluster. If you select another cluster and two or more indexes in the cluster have the same name, data of all indices with the same name as the name you specify will be overwritten. Therefore, exercise caution when you set the parameters.
+Figure 1 Restoring a snapshot
+ - Click OK. If restoration succeeds, Task Status of the snapshot in the snapshot list will change to Restoration succeeded, and the index data is generated again according to the snapshot information.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001476817918.html b/docs/css/umn/en-us_topic_0000001476817918.html
new file mode 100644
index 00000000..4c197cc7
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001476817918.html
@@ -0,0 +1,21 @@
+
+
+
+ Ports
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001476977542.html b/docs/css/umn/en-us_topic_0000001476977542.html
new file mode 100644
index 00000000..cab7be28
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001476977542.html
@@ -0,0 +1,20 @@
+
+
+Why Does My ECS Fail to Connect to a Cluster?
+Perform the following steps to troubleshoot this problem:
+
- Check whether the ECS instance and cluster are in the same VPC.
- If they are, go to 2.
- If they are not, create an ECS instance and ensure that the ECS instance is in the same VPC as the cluster.
+ - View the security group rule setting of the cluster to check whether port 9200 (TCP protocol) is allowed or port 9200 is included in the port range allowed in both the outbound and inbound directions.
- If it is allowed, go to 3.
- If it is not allowed, switch to the VPC management console and configure the security group rule of the cluster to allow port 9200 in both the outbound and inbound directions.
+ - Check whether the ECS instance has been added to a security group.
- If the instance has been added to a security group, check whether the security group configuration rules are appropriate. You can view the Security Group information on the Basic Information tab page of the cluster. Then, go to step 4.
Figure 1 Viewing security group information
+ - If the instance has not been added to the security group, go to the VPC page from the ECS instance details page, select a security group, and add the ECS to the group.
+ - Check whether the ECS instance can connect to the cluster.
ssh <Private network address and port number of a node>
If the cluster contains multiple nodes, check whether the ECS can be connected to each node in the cluster.
+
+
+- If the connection is normal, the network is running properly.
- If the connection still fails, contact technical support.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001476977546.html b/docs/css/umn/en-us_topic_0000001476977546.html
new file mode 100644
index 00000000..c087c97d
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001476977546.html
@@ -0,0 +1,17 @@
+
+
+How Does CSS Ensure Data and Service Security?
+CSS uses network isolation, in addition to various host and data security measures.
+
- Network isolation
The entire network is divided into two planes: service plane and management plane. The two planes are deployed and isolated physically to ensure the security of the service and management networks.
+- Service plane: This is the network plane of the cluster. It provides service channels for users and delivers data definitions, indexing, and search capabilities.
- Management plane: This is the management console, where you manage CSS.
+ - Host security
CSS provides the following security measures:
+- The VPC security group ensures the security of the hosts in a VPC.
- Network access control lists (ACLs) allow you to control what data can enter or exit your network.
- The internal security infrastructure (including the network firewall, intrusion detection system, and protection system) monitors all network traffic that enters or exits the VPC through an IPsec VPN.
+ - Data security
Multiple replicas, cross-AZ deployment of clusters, and third-party (OBS) backup of index data ensure the security of user data.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001476977550.html b/docs/css/umn/en-us_topic_0000001476977550.html
new file mode 100644
index 00000000..d6778ef8
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001476977550.html
@@ -0,0 +1,12 @@
+
+
+Can I Install Search Guard on CSS?
+CSS does not currently support installation of Search Guard.
+
CSS provides clusters in security mode, which have the same functions as Search Guard.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001476977554.html b/docs/css/umn/en-us_topic_0000001476977554.html
new file mode 100644
index 00000000..f7d0e4b9
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001476977554.html
@@ -0,0 +1,11 @@
+
+
+What Are the Impacts If an Elasticsearch Cluster Has Too Many Shards?
+- A large number of shards in a cluster slows down shard creation.
- If automatic index creation is enabled, slow index creation may cause a large number of write requests to be stacked in the memory or result in a cluster break down.
- If there are too many shards and you cannot properly monitor workloads, the number of records in a single shard may exceed the threshold, and write requests may be denied.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001476977558.html b/docs/css/umn/en-us_topic_0000001476977558.html
new file mode 100644
index 00000000..8b727fb2
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001476977558.html
@@ -0,0 +1,19 @@
+
+
+How Do I Configure the Threshold for CSS Slow Query Logs?
+The slow query log settings of CSS are the same as those of Elasticsearch. You can configure slow query logs via the _settings API. For example, you can run the following command in Kibana to set the index level:
+
PUT /my_index/_settings
+{
+ "index.search.slowlog.threshold.query.warn": "10s",
+ "index.search.slowlog.threshold.fetch.debug": "500ms",
+ "index.indexing.slowlog.threshold.index.info": "5s"
+}
+
- If a query takes longer than 10 seconds, a WARN log will be generated.
- If retrieval takes longer than 500 milliseconds, a DEBUG log will be generated.
- If an index takes longer than 5 seconds, an INFO log will be generated.
+
For details, visit the official website: https://www.elastic.co/guide/cn/elasticsearch/guide/current/logging.html
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001476977562.html b/docs/css/umn/en-us_topic_0000001476977562.html
new file mode 100644
index 00000000..3e2c19e4
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001476977562.html
@@ -0,0 +1,18 @@
+
+
+Can I Change the Number of Shards to Four with Two Replicas When There Is One Shard Set in the JSON File?
+Once an index is created, the number of primary shards cannot be changed.
+
You can run the following command in Kibana to change the number of replicas:
+
PUT /indexname/_settings
+{
+"number_of_replicas":1 //Number of replicas
+}
+
index specifies the index name. Set this parameter based on site requirements.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477137526.html b/docs/css/umn/en-us_topic_0000001477137526.html
new file mode 100644
index 00000000..d9f38e6e
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477137526.html
@@ -0,0 +1,16 @@
+
+
+Does the Value i of node.roles Indicate an Injest Node?
+Function
If the value of
node.roles of a client node is
i, then is this client node an injest node?
- Are there coordinating only nodes in clusters? Are the client requests distributed to coordinating nodes?
- Are ingest nodes in the idle state when there are no ingest requests?
+
+
+
Solution
If the value of node.roles of a client node is i, the ingest node mode is enabled.
+
- The coordinating only nodes of Elasticsearch are called client nodes in CSS. If a cluster has no client nodes, client requests will be distributed to all nodes.
- An ingest node functions as a set of ELK for data conversion. If there is no ingest requests, ingest nodes are not in the idle state.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477137530.html b/docs/css/umn/en-us_topic_0000001477137530.html
new file mode 100644
index 00000000..f7b84649
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477137530.html
@@ -0,0 +1,43 @@
+
+
+
+ Functions
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477137534.html b/docs/css/umn/en-us_topic_0000001477137534.html
new file mode 100644
index 00000000..af0751c3
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477137534.html
@@ -0,0 +1,32 @@
+
+
+General Consulting
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477137538.html b/docs/css/umn/en-us_topic_0000001477137538.html
new file mode 100644
index 00000000..1de81837
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477137538.html
@@ -0,0 +1,11 @@
+
+
+What Is the Relationship Between the Filebeat Version and Cluster Version?
+- Non-security mode: no restrictions.
- Cluster in security mode: The Filebeat OSS version must match the cluster version. For details on how to download the Filebeat OSS version, see Past Releases of Elastic Stack Software.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477137542.html b/docs/css/umn/en-us_topic_0000001477137542.html
new file mode 100644
index 00000000..62cfa3ab
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477137542.html
@@ -0,0 +1,12 @@
+
+
+Can I Upgrade a Cluster from an Earlier Version to a Later Version?
+A cluster cannot be directly upgraded. You can purchase a cluster of a later version and migrate your data to it.
+
- Creating a Cluster: Create a cluster of a later version in the region where your current cluster is deployed.
- Migrating a Cluster: Migrate your cluster by backing data up and restoring indexes.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477137546.html b/docs/css/umn/en-us_topic_0000001477137546.html
new file mode 100644
index 00000000..c45bf09a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477137546.html
@@ -0,0 +1,17 @@
+
+
+How Can I Manage CSS?
+You can use any of the following three methods to manage CSS or to use search engine APIs. You can initiate requests based on constructed request messages.
- curl
curl is a command-line tool used to transfer data to or from a given URL. It serves as an HTTP client that can send HTTP requests to the HTTP server and receive response messages. You can also use curl to debug APIs. For more information about curl, visit https://curl.haxx.se/.
+ - Code
You can call APIs through code to assemble, send, and process request messages.
+ - REST client
Both Mozilla Firefox and Google Chrome provide a graphical browser plugin, the REST client, which you can use to send and process requests.
+– For Mozilla Firefox, see Firefox REST Client.
+– For Google Chrome, see Postman.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477297334.html b/docs/css/umn/en-us_topic_0000001477297334.html
new file mode 100644
index 00000000..89caabd3
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477297334.html
@@ -0,0 +1,29 @@
+
+
+How Do I Update the CSS Lifecycle Policy?
+The CSS lifecycle is implemented using the Index State Management (ISM) of Open Distro. For details about how to configure policies related to the ISM template, see the Open Distro documentation.
+
- When a policy is created, the system writes a record to the .opendistro-ism-config index. In the record, _id is the policy name, and the content is the policy definition.
Figure 1 Writing a data record
+ - After a policy is bound to an index, the system writes another record to the .opendistro-ism-config index. The following figure shows the initial status of a record.
Figure 2 Initial data status
+ - Run the explain command. Only a policy ID will be returned.
GET _opendistro/_ism/explain/data2
+{
+ "data2" : {
+ "index.opendistro.index_state_management.policy_id" : "policy1"
+ }
+}
+Open Distro will execute an initialization process to fill the policy content in the record. The following figure shows the initialized data.
+Figure 3 Initialized data
+After the initialization, min_index_age in the policy will be copied.
+
The initialized index uses a copy of this policy. The policy update will not take effect on the index.
+
+
- After the policy is modified, call the change_policy API to update the policy.
POST _opendistro/_ism/change_policy/data1
+{
+ "policy_id": "policy1"
+}
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477297338.html b/docs/css/umn/en-us_topic_0000001477297338.html
new file mode 100644
index 00000000..517bb7d7
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477297338.html
@@ -0,0 +1,11 @@
+
+
+Can I Associate My EIP If I Want to Access the Cluster from the Internet?
+No. To access a cluster from the Internet, see Public IP Address Access.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477297346.html b/docs/css/umn/en-us_topic_0000001477297346.html
new file mode 100644
index 00000000..fd47c8ce
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477297346.html
@@ -0,0 +1,11 @@
+
+
+Can I Use x-pack-sql-jdbc to Access CSS Clusters and Query Data?
+No. Currently, CSS does not integrate the x-pack component.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477297350.html b/docs/css/umn/en-us_topic_0000001477297350.html
new file mode 100644
index 00000000..52611362
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477297350.html
@@ -0,0 +1,12 @@
+
+
+What Can the Disk Space of a CSS Cluster Be Used For?
+You can store the following logs and files:
+
- Log files: Elasticsearch logs
- Data files: Elasticsearch index files
- Other files: cluster configuration files
- OS: 5% storage space reserved for the OS by default
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477297354.html b/docs/css/umn/en-us_topic_0000001477297354.html
new file mode 100644
index 00000000..cec3c467
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477297354.html
@@ -0,0 +1,25 @@
+
+
+What Data Compression Algorithms Does CSS Use?
+CSS supports two data compression algorithms: LZ4 (by default) and best_compression.
+
- LZ4 algorithm
LZ4 is the default compression algorithm of Elasticsearch. This algorithm can compress and decompress data quickly, but its compression ratio is low.
+LZ4 scans data with a 4-byte window, which slides 1 byte forward at a time. Duplicate data is compressed. This algorithm applies to scenarios where a large amount of data to be read while a small amount of data to be written.
+ - best_compression algorithm
This algorithm can be used when a large amount of data is written and the index storage cost is high, such as logs and time sequence analysis. This algorithm can greatly reduce the index storage cost.
+
+
Run the following command to switch the default compression algorithm (LZ4) to best_compression:
PUT index-1
+{
+ "settings": {
+ "index": {
+ "codec": "best_compression"
+ }
+ }
+}
+
+
The LZ4 algorithm can quickly compress and decompress data while the best_compression algorithm has a higher compression and decompression ratio.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477297358.html b/docs/css/umn/en-us_topic_0000001477297358.html
new file mode 100644
index 00000000..3b6fab80
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477297358.html
@@ -0,0 +1,13 @@
+
+
+How Do I Set the Numbers of Index Copies to 0 in Batches?
+- Log in to the Kibana page of the cluster. In the navigation pane, choose Dev Tools.
- Modify and run the PUT /*/_settings{"number_of_replicas":0} command.
Do not directly run the preceding command, because the asterisk (*) may match security indexes. You are advised to specify the index required for the batch operation. Example: PUT /test*/_settings{"number_of_replicas":0}
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477297362.html b/docs/css/umn/en-us_topic_0000001477297362.html
new file mode 100644
index 00000000..4348d536
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477297362.html
@@ -0,0 +1,11 @@
+
+
+Can a New Cluster Use the IP Address of the Old Cluster?
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419712.html b/docs/css/umn/en-us_topic_0000001477419712.html
new file mode 100644
index 00000000..91551284
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419712.html
@@ -0,0 +1,17 @@
+
+
+Enhanced Cluster Monitoring
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419716.html b/docs/css/umn/en-us_topic_0000001477419716.html
new file mode 100644
index 00000000..9dfeb586
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419716.html
@@ -0,0 +1,74 @@
+
+
+Cluster Planning for Vector Retrieval
+Off-heap memory is used for index construction and query in vector retrieval. Therefore, the required cluster capacity is related to the index type and off-heap memory size. You can estimate the off-heap memory required by full indexing to select proper cluster specifications. The memory usage of vector search is high, CSS disables the vector search plug-in by default for clusters whose memory is 8 GB or less.
+
There are different methods for estimating the size of off-heap memory required by different types of indexes. The calculation formulas are as follows:
- GRAPH Index

+
If you need to update indexes in real time, consider the off-heap memory overhead required for vector index construction and automatic merge. The actual size of required mem_needs is at least 1.5 to 2 times of the original estimation.
+
+ - PQ Index

+ - FALT and IVF Indexes

+
+
+
Table 1 Parameter descriptionParameter
+ |
+Description
+ |
+
+
+dim
+ |
+Vector dimensions
+ |
+
+neighbors
+ |
+Number of neighbors of a graph node. The default value is 64.
+ |
+
+dim_size
+ |
+Number of bytes required by each dimension. The default value is four bytes in the float type.
+ |
+
+num
+ |
+Total number of vectors
+ |
+
+delta
+ |
+Metadata size. This parameter can be left blank.
+ |
+
+frag_num
+ |
+Number of vector segments during quantization and coding. If this parameter is not specified when an index is created, the value is determined by the vector dimension dim.
+if dim <= 256:
+ frag_num = dim / 4
+elif dim <= 512:
+ frag_num = dim / 8
+else :
+ frag_num = 64
+ |
+
+frag_size
+ |
+Size of the center point during quantization and coding. The default value is 1. If the value of frag_num is greater than 256, the value of frag_size is 2.
+ |
+
+
+
+
+
+
These calculation methods can estimate the size of off-heap memory required by a complete vector index. To determine cluster specifications, you also need to consider the heap memory overhead of each node.
+
Heap memory allocation policy: The size of the heap memory of each node is half of the node physical memory, and the maximum size is 31 GB.
+
For example, if you create a Graph index for the SIFT10M dataset, set dim to 128, dim_size to 4, neighbors to default value 64, and num to 10 million, the off-heap memory required by the Graph index is as follows:
+

+
Considering the overhead of heap memory, a single server with 8 vCPUs and 16 GB memory is recommended. If real-time write or update is required, you need to apply for larger memory.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419720.html b/docs/css/umn/en-us_topic_0000001477419720.html
new file mode 100644
index 00000000..28f24327
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419720.html
@@ -0,0 +1,107 @@
+
+
+Access Statistics and Traffic Control Information Query
+Flow control can be implemented via an independent API.
+
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run the commands to query traffic control information.
- Check the traffic control status of all nodes.
GET /_nodes/stats/filter/v2
+ - View traffic control details of all nodes.
GET /_nodes/stats/filter/v2?detail
+ - View the traffic control status of a specific node.
GET /_nodes/{nodeId}/stats/filter/v2
+{nodeId} indicates the ID of the node you want to check.
+Example response:
+{
+ "_nodes" : {
+ "total" : 1,
+ "successful" : 1,
+ "failed" : 0
+ },
+ "cluster_name" : "css-xxxx",
+ "nodes" : {
+ "d3qnVIpPTtSoadkV0LQEkA" : {
+ "name" : "css-xxxx-ess-esn-1-1",
+ "host" : "192.168.x.x",
+ "timestamp" : 1672236425112,
+ "flow_control" : {
+ "http" : {
+ "current_connect" : 52,
+ "rejected_concurrent" : 0,
+ "rejected_rate" : 0,
+ "rejected_black" : 0,
+ "rejected_breaker" : 0
+ },
+ "access_items" : [
+ {
+ "remote_address" : "10.0.0.x",
+ "search_count" : 0,
+ "bulk_count" : 0,
+ "other_count" : 4
+ }
+ ],
+ "holding_requests" : 0
+ }
+ }
+ }
+}
+
+Table 1 Response parametersParameter
+ |
+Description
+ |
+
+
+current_connect
+ |
+Number of HTTP connections of a node, which is recorded even if flow control is disabled. This value is equal to the current_open value of GET /_nodes/stats/http API. It includes the current client connections of nodes.
+ |
+
+rejected_concurrent
+ |
+Number of concurrent connections rejected during HTTP flow control. This value is not cleared when HTTP flow control is disabled.
+ |
+
+rejected_rate
+ |
+Number of new connections rejected during HTTP flow control. This value is not cleared when HTTP flow control is disabled.
+ |
+
+rejected_black
+ |
+Number of requests rejected based on the blacklist during HTTP flow control. This value is not cleared when HTTP flow control is disabled.
+ |
+
+rejected_breaker
+ |
+Number of rejected new connections after one-click traffic blocking is enabled.
+ |
+
+remote_address
+ |
+IP addresses and the number of requests.
+ |
+
+search_count
+ |
+Number of times that a client accessed a database using _search and _msearch.
+ |
+
+bulk_count
+ |
+Number of times that a client accessed a database using _bulk.
+ |
+
+other_count
+ |
+Number of times that a client accessed a database using other requests.
+ |
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419724.html b/docs/css/umn/en-us_topic_0000001477419724.html
new file mode 100644
index 00000000..3f96d552
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419724.html
@@ -0,0 +1,161 @@
+
+
+Deploying a Cross-AZ Cluster
+To prevent data loss and minimize the cluster downtime in case of service interruption, CSS supports cross-AZ cluster deployment. When creating a cluster, you can select two or three AZs in the same region. The system will automatically allocate nodes to these AZs.
+
Allocating Nodes
If you select two or three AZs when creating a cluster, CSS automatically enables the cross-AZ HA function and properly allocates nodes to different AZs. Table 1 describes how the nodes are allocated.
+
- When creating a cluster, ensure that the number of selected nodes is no less than the number of AZs. Otherwise, cross-AZ deployment is not supported.
- If you enable master nodes when deploying a cross-AZ cluster, the master nodes will also be distributed to different AZs.
- The node quantity difference between any two AZs is no more than one.
+
+
+
Table 1 Number of nodes and AZ distributionNodes
+ |
+Single AZ
+ |
+Two AZs
+ |
+Three AZs
+ |
+
+AZ1
+ |
+AZ1
+ |
+AZ2
+ |
+AZ1
+ |
+AZ2
+ |
+AZ3
+ |
+
+1
+ |
+1
+ |
+Not supported
+ |
+Not supported
+ |
+
+2
+ |
+2
+ |
+1
+ |
+1
+ |
+Not supported
+ |
+
+3
+ |
+3
+ |
+2
+ |
+1
+ |
+1
+ |
+1
+ |
+1
+ |
+
+4
+ |
+4
+ |
+2
+ |
+2
+ |
+2
+ |
+1
+ |
+1
+ |
+
+...
+ |
+...
+ |
+...
+ |
+...
+ |
+...
+ |
+...
+ |
+...
+ |
+
+
+
+
+
+
Setting Replicas
Setting replicas enables clusters to effectively use the HA capability of AZs.
+
- In two-AZ deployment, if one AZ becomes unavailable, the other AZ continues to provide services. In this case, at least one replica is required. Elasticsearch has one replica by default. You can retain the default value if you do not require higher read performance.
- In three-AZ deployment, if one AZ becomes unavailable, the other AZs continue to provide services. In this case, at least one replica is required. Elasticsearch has one replica by default. If you need more replicas to improve the cluster's ability to handle queries, modify settings to change the number of replicas.
You can run the following command to modify the number of index replicas:
+curl -XPUT http://ip:9200/{index_name}/_settings -d '{"number_of_replicas":2}'
+Alternatively, run the following command to specify the number of replicas in the template:
+curl -XPUT http://ip:9200/ _template/templatename -d '{ "template": "*", "settings": {"number_of_replicas": 2}}'
+
+
- ip: private network address
- index_name: index name
- number_of_replicas: number of replicas after modification. The value in the preceding command indicates that two replicas are required.
+
+
+
Possible Service Interruptions
The following table describes the possible service interruptions when an AZ of a two- or three-AZ cluster is faulty.
+
+
Table 2 Possible service interruptionsAZs
+ |
+Master Nodes
+ |
+Service Interruption Analysis
+ |
+
+
+2
+ |
+0
+ |
+- When the number of nodes is an even number:
- If half of data nodes are faulty, replace one node in the faulty AZ before you select the master node.
+ - When the number of nodes is an odd number:
- If the faulty AZ contains one more node than the normal AZ, you need to replace one node in the faulty AZ before you select the master node. For details about how to replace nodes, contact technical support.
- If the faulty AZ contains one less node than the normal AZ, services will not be interrupted and you can select the master node.
+
+ |
+
+2
+ |
+3
+ |
+There is a 50% possibility for service interruption. When two dedicated master nodes are allocated to one AZ and another master node is allocated to the other AZ:
+- If service interruption happens in the AZ with one master node, you can select a master node from the AZ that has two dedicated master nodes.
- If service interruption happens in the AZ with two dedicated master nodes, you have no choice in the remaining AZ, because it has only one dedicated master node. In this case, services will be interrupted and you need to contact technical support.
+ |
+
+3
+ |
+0
+ |
+If you configure four nodes in three AZs, each AZ will have at least one node. If the AZ with two nodes is faulty, the services will be interrupted. You are not advised configuring four nodes when selecting three AZs.
+Generally, service interruption will not occur.
+ |
+
+3
+ |
+3
+ |
+Service interruption does not occur.
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419728.html b/docs/css/umn/en-us_topic_0000001477419728.html
new file mode 100644
index 00000000..8994565d
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419728.html
@@ -0,0 +1,17 @@
+
+
+Logging In to Kibana
+After creating a CSS cluster, you can log in to Kibana through the console or public network.
+
Kibana Usage Restrictions
You can customize the username, role name, and tenant name in Kibana.
+
+
Procedure
- Logging in to the console
- Log in to the CSS management console.
- On the Clusters page, locate the target cluster and click Access Kibana in the Operation column to go to the Kibana login page.
- Non-security cluster: The Kibana console is displayed.
- Security cluster: Enter the username and password on the login page and click Log In to go to the Kibana console. The default username is admin and the password is the one specified during cluster creation.
+ - After the login is successful, you can access the Elasticsearch cluster through Kibana.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419736.html b/docs/css/umn/en-us_topic_0000001477419736.html
new file mode 100644
index 00000000..788c5437
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419736.html
@@ -0,0 +1,104 @@
+
+
+Memory Flow Control
+Context
Elasticsearch provides a circuit breaker, which will terminate requests or return the error code 429 if the memory usage exceeds its threshold. However, the circuit breaker rejects a request only after the node reads the entire request, which occupies heap memory. To prevent a request from being fully received by a node before the request is rejected, you can control the client traffic based on the real-time status of the node heap memory.
+
+
Parameter Description
The following table describes memory flow control parameters.
+
+
Table 1 Memory flow control parametersParameter
+ |
+Type
+ |
+Description
+ |
+
+
+flowcontrol.memory.enabled
+ |
+Boolean
+ |
+Whether to enable memory flow control. After this function is enabled, the memory usage is continuously monitored. The value can be:
+- true
- false (default value)
+ |
+
+flowcontrol.memory.heap_limit
+ |
+String
+ |
+Maximum global heap memory usage of a node. If the value of this parameter is exceeded, traffic backpressure is performed.
+Value range: 10%–100%
+Default value: 90%
+ |
+
+flowcontrol.holding.in_flight_factor
+ |
+Float
+ |
+Backpressure release factor. The principle is similar to that of the circuit breaker parameter network.breaker.inflight_requests.overhead. When the memory usage reaches the limit, a larger value indicates stronger backpressure. The write traffic will be limited.
+Value range: ≥ 0.5
+Default value: 1.0
+ |
+
+flowcontrol.holding.max
+ |
+TimeValue
+ |
+Maximum delay of each request. If the delay exceeds the value of this parameter, you can disconnect the request backpressure or disconnect the request link. For details, see the configuration of flowcontrol.holding.max_strategy.
+Value range: ≥ 15s
+Default value: 60s
+ |
+
+flowcontrol.holding.max_strategy
+ |
+String
+ |
+Policy after the maximum delay time is exceeded. The value can be:
+- keep (default value): If the heap memory is still high, continue the backpressure. The server determines when to execute the request based on the real-time memory.
- soft: The requests will be executed even if the heap memory is still high. The inFlight circuit breaker will determine whether to execute or reject the requests.
- hard: If the heap memory is still high, requests will be discarded and the client connection of the requests will be disconnected.
+ |
+
+flowcontrol.memory.once_free_max
+ |
+String
+ |
+Maximum memory that can be opened at a time for a suspended request queue. This parameter is used to prevent a cluster from being entirely suspended due to temporary low memory under high pressure.
+Value range: 1 to 50
+Default value: 10%
+ |
+
+flowcontrol.memory.nudges_gc
+ |
+Boolean
+ |
+Whether to trigger garbage collection to ensure write stability when the write pressure is too high. (The backpressure connection pool is checked every second. The write pressure is regarded high if all the existing connections are blocked and new write requests cannot be released.) The value can be:
+- true (default value)
- false
+ |
+
+
+
+
+
- flowcontrol.memory.enabled and flowcontrol.memory.heap_limit are the most important parameters. enabled indicates the memory flow control switch, and heap_limit indicates the heap memory threshold of a node.
- The default value 90% of flowcontrol.memory.heap_limit is a conservative threshold. When the heap memory usage is greater than 90%, the system stops reading large requests that exceed 64 KB from the client until the heap memory decreases. If the heap memory decreases to 85%, the maximum client data that can be read is 5% of the maximum heap memory. If the heap memory usage has been higher than 90% for a long time, client connection requests cannot be read. In this case, the GC algorithm is triggered to perform garbage collection until the heap memory usage is lower than the threshold.
- Generally, you can set the flowcontrol.memory.heap_limit threshold to 80% or less to ensure that the node has certain heap memory for operations besides data writing, such as Elasticsearch query and segment merge.
+
+
+
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable memory flow control.
- Enable memory flow control
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.memory.enabled": true,
+ "flowcontrol.memory.heap_limit": "80%"
+ }
+}
+ - Disable cluster memory flow control
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.memory.enabled": false
+ }
+}
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419740.html b/docs/css/umn/en-us_topic_0000001477419740.html
new file mode 100644
index 00000000..09732675
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419740.html
@@ -0,0 +1,15 @@
+
+
+Monitoring
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419744.html b/docs/css/umn/en-us_topic_0000001477419744.html
new file mode 100644
index 00000000..49be2a8c
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419744.html
@@ -0,0 +1,92 @@
+
+
+CPU Flow Control
+Context
CPU flow control can be implemented based on the CPU usage of a node.
+
You can configure the CPU usage threshold of a node to prevent the node from breaking down due to heavy traffic. You can determine the CPU usage threshold based on the traffic threshold. If the CPU usage of a node exceeds the configured threshold, CPU flow control discards excess node requests to protect the cluster. Traffic within the node or passing through Elasticsearch monitoring APIs are not affected.
+
The following table describes CPU flow control parameters.
+
+
Table 1 CPU flow control parametersParameter
+ |
+Type
+ |
+Description
+ |
+
+
+flowcontrol.cpu.enabled
+ |
+Boolean
+ |
+Whether to enable CPU flow control. If this function is enabled, the node access performance may be affected.
+Value: true or false
+Default value: false
+ |
+
+flowcontrol.cpu.percent_limit
+ |
+Integer
+ |
+Maximum CPU usage of a node.
+Value range: 0–100
+Default value: 90
+ |
+
+flowcontrol.cpu.allow_path
+ |
+List
+ |
+Path whitelist for CPU flow control. The paths specified in the allow_path whitelist are not under CPU flow control.
+The default value is null.
+A path can contain up to 32 characters. A maximum of 10 request paths can be configured. Wildcard characters are supported. For example, if this parameter is set to auto_*/_search, all the search requests of the indexes prefixed with auto_ are not under the flow control.
+ |
+
+flowcontrol.cpu.*.filter_path
+ |
+String
+ |
+Paths under CPU flow control.
+Maximum length: 32 characters
+Example:
+"flowcontrol.cpu.search.filter_path": "/index/_search",
+"flowcontrol.cpu.search.limit": 60,
+The default value is **, indicating all paths. If limit is configured and filter_path is not, it indicates that all the paths, except those in the whitelist, are under control. The whitelist takes precedence over the single-path rule. If a path is specified in both allow_path and filter_path, the requests from the path will be allowed.
+For example, if both filter_path and allow_path both set to abc/_search, then abc/_search will not be under flow control.
+ |
+
+flowcontrol.cpu.*.limit
+ |
+Integer
+ |
+CPU threshold of request paths. If the CPU usage exceeds the threshold, flow control will be triggered.
+Value range: 0–100
+Default value: 90
+ |
+
+
+
+
+
+
Procedure
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable memory flow control.
- Enabling CPU flow control
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.cpu.enabled": true,
+ "flowcontrol.cpu.percent_limit": 80,
+ "flowcontrol.cpu.allow_path": ["index/_search"]
+ }
+}
+ - Disabling CPU flow control
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.cpu.enabled": false
+ }
+}
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419748.html b/docs/css/umn/en-us_topic_0000001477419748.html
new file mode 100644
index 00000000..ff72bfa1
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419748.html
@@ -0,0 +1,70 @@
+
+
+P99 Latency Monitoring
+Context
The Elasticsearch community only discusses how to monitor the average latency of search requests, which cannot reflect the actual search performance of a cluster. To enhance monitoring, CSS allows you to monitor the P99 latency of search requests in clusters.
+
+
Prerequisites
Currently, only clusters of version 7.6.2 and 7.10.2 support P99 latency monitoring.
+
+
Obtaining Monitoring Information
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools and run the following command to check the P99 latency of the current cluster:
GET /search/stats/percentile
+Example response:
+{
+ "overall" : {
+ "1.0" : 2.0,
+ "5.0" : 2.0,
+ "25.0" : 6.5,
+ "50.0" : 19.5,
+ "75.0" : 111.0,
+ "95.0" : 169.0,
+ "99.0" : 169.0,
+ "max" : 169.0,
+ "min" : 2.0
+ },
+ "last_one_day" : {
+ "1.0" : 2.0,
+ "5.0" : 2.0,
+ "25.0" : 6.5,
+ "50.0" : 19.5,
+ "75.0" : 111.0,
+ "95.0" : 169.0,
+ "99.0" : 169.0,
+ "max" : 169.0,
+ "min" : 2.0
+ },
+ "latest" : {
+ "1.0" : 26.0,
+ "5.0" : 26.0,
+ "25.0" : 26.0,
+ "50.0" : 26.0,
+ "75.0" : 26.0,
+ "95.0" : 26.0,
+ "99.0" : 26.0,
+ "max" : 26.0,
+ "min" : 26.0
+ }
+}
+
- In the response, overall indicates all the statistics that have been collected since the cluster startup, last_one_day indicates the statistics collected in the last day, and latest indicates the statistics that have been collected since the last reset.
- The calculated P99 latency is an estimation. It is more precise than the P50 latency.
- The P99 latency of a cluster is cleared and recalculated if the cluster is restarted.
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419752.html b/docs/css/umn/en-us_topic_0000001477419752.html
new file mode 100644
index 00000000..9097ca1e
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419752.html
@@ -0,0 +1,21 @@
+
+
+Creating a User and Granting Permissions
+This section describes how to use a group to grant permissions to a user. Figure 1 shows the process for granting permissions.
+
CSS has two types of user permissions: CSS administrator permission and read-only permission.
+
Prerequisites
Before assigning permissions to user groups, you have learned about the system policies listed in Permissions Management.
+
+
Process Flow
Figure 1 Process of granting CSS permissions
+
- Create a user group and assign permissions.
Create a user group on the IAM console, and assign the CSS permission to the group.
+ - Create an IAM user and add it to a user group.
Create a user on the IAM console and add the user to the group created in 1. Create a user group and assign permissions.
+ - Log in and verify permissions.
Log in to the console as the created user, switch to the authorized region, and verify the permissions.
+- Choose Service List > Cloud Search Service. Then click Create Cluster on the CSS console. If the cluster cannot be bought (assuming that the current permissions include only CSS ReadOnlyAccess), the CSS ReadOnlyAccess policy has already taken effect.
- Choose any other service from Service List. (Assume that the current policy contains only CSS ReadOnlyAccess.) If a message appears indicating insufficient permissions to access the service, the CSS ReadOnlyAccess policy has already taken effect.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419760.html b/docs/css/umn/en-us_topic_0000001477419760.html
new file mode 100644
index 00000000..e749265e
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419760.html
@@ -0,0 +1,44 @@
+
+
+Features
+The enhanced aggregation is an optimization feature for service awareness. With this feature, you can optimize the aggregation analysis capability of observable services.
+
Currently, the enhanced aggregation is supported by only clusters of version 7.10.2.
+
Working Principles
In large-scale dataset aggregation and analysis scenarios, data grouping and aggregation takes a lot of time. Improving the grouping aggregation capability depends on the following key features:
+
- Sorting key: Data is stored in sequence based on the sorting key.
- Clustering key: It is contained in the sorting key. Data is clustered based on the clustering key.
+
In the case of data clustering, enhanced aggregation uses the vectorization technology to process data in batches, improving aggregation performance.
+
+
Table 1 Feature parametersParameter
+ |
+Description
+ |
+
+
+index.search.turbo.enabled
+ |
+Indicates whether to enable the feature. The default value is true.
+ |
+
+index.sort.field
+ |
+Sorting key
+ |
+
+index.cluter.field
+ |
+Clustering key
+ |
+
+
+
+
+
+
Features
Based on different service requirements, enhanced aggregation can be used in the following three scenarios:
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419764.html b/docs/css/umn/en-us_topic_0000001477419764.html
new file mode 100644
index 00000000..5247ace1
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419764.html
@@ -0,0 +1,86 @@
+
+
+Accessing a Cluster from a Kibana Public Network
+For CSS clusters that have security mode enabled, you can enable Kibana public access. After the configuration is complete, an IP address will be provided to access Kibana of this cluster over the Internet.
+
You can configure Kibana public access during cluster creation, or after a cluster in security mode is created.
+
- You can enable Security Mode for clusters of version 6.5.4 and later versions.
- Kibana public access cannot be configured for Elasticsearch clusters created in security mode before this function was rolled out (before June 2020).
- The whitelist for Kibana public network access depends on the ELB whitelist. After you updated the whitelist, the new settings take effect immediately for new connections. For existing persistent connections using the IP addresses that have been removed from the whitelist, the new settings take effect about 1 minute after these connections are stopped.
+
+
Configuring Kibana Public Access When Creating a Cluster
- Log in to the CSS management console.
- Click Create Cluster in the upper right corner. The Create Cluster page is displayed.
- On the Create Cluster page, enable Security Mode.
- Set Advanced Settings to Custom, enable Kibana Public Access, and set parameters.
+
Table 1 Kibana public access parametersParameter
+ |
+Description
+ |
+
+
+Bandwidth
+ |
+Bandwidth for accessing Kibana with the public IP address
+Value range: 1 to 100
+Unit: Mbit/s
+ |
+
+Access Control
+ |
+If you disable this function, all IP addresses can access Kibana through the public IP address. If you enable this function, only IP addresses or IP address in the whitelist can access Kibana through the public IP address.
+ |
+
+Whitelist
+ |
+IP address or IP address range allowed to access a cluster. Use commas (,) to separate multiple addresses. This parameter can be configured only when Access Control is enabled.
+You are advised to enable this function.
+ |
+
+
+
+
+After the cluster is created, click the cluster name to go to the Basic Information page. On the Kibana Public Access page, you can view the Kibana public IP address.
+
+
+
Configuring Kibana Public Access for an Existing Cluster
You can enable, disable, modify, and view Kibana public access for an existing cluster that has security mode enabled.
+
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster.
- Click the Kibana Public Access tab. Turn on the Kibana Public Access switch to enable the Kibana public access function.
- On the displayed page, set parameters.
+
Table 2 Kibana public access parametersParameter
+ |
+Description
+ |
+
+
+Bandwidth
+ |
+Bandwidth for accessing Kibana with the public IP address
+Value range: 1 to 100
+Unit: Mbit/s
+ |
+
+Access Control
+ |
+If you disable this function, all IP addresses can access Kibana through the public IP address. If you enable this function, only IP addresses or IP address in the whitelist can access Kibana through the public IP address.
+ |
+
+Whitelist
+ |
+IP address or IP address range allowed to access a cluster. Use commas (,) to separate multiple addresses. This parameter can be configured only when Access Control is enabled.
+You are advised to enable this function.
+ |
+
+
+
+
+ - After you set the parameters, click OK.
+
+
Modifying Kibana Public Access
For clusters configured Kibana public access, you can modify its bandwidth and access control or disable this function.
+
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster.
- Click the Kibana Public Access tab to modify the Kibana public access configuration.
- Modifying bandwidth
Click Modify on the right of Bandwidth. On the Modify Bandwidth page, modify the bandwidth and click OK.
+ - Modifying access control
Click Modify on the right of Access Control. On the Modify Access Control page, set Access Control and Whitelist, and click OK.
+ - Disabling Kibana public access
Toggle off the Kibana Public Access switch.
+
+
+
+
Accessing Kibana with the Public IP Address
After configuring Kibana public access, you will obtain a public IP address that you can use to access Kibana of this cluster.
+
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster.
- Click the Kibana Public Access tab to obtain the Kibana public IP address.
- Use this IP address to access Kibana of this cluster through the Internet.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419768.html b/docs/css/umn/en-us_topic_0000001477419768.html
new file mode 100644
index 00000000..bc6f26b2
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419768.html
@@ -0,0 +1,13 @@
+
+
+Permissions Management
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419772.html b/docs/css/umn/en-us_topic_0000001477419772.html
new file mode 100644
index 00000000..31db0bdc
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419772.html
@@ -0,0 +1,63 @@
+
+
+HTTP Status Code Monitoring
+Context
When an external system accesses Elasticsearch through the HTTP protocol, a response and the corresponding status code are returned. The open-source Elasticsearch server does not collect the status code, so users cannot monitor Elasticsearch APIs status or cluster request status. CSS allows you to monitor the HTTP status codes of clusters.
+
+
Prerequisites
Currently, only clusters of versions 7.6.2 and 7.10.2 support HTTP status code monitoring.
+
+
Obtaining Status Codes
- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools.
- On the console page of Dev Tools, run commands based on the cluster version.
- For clusters of version 7.6.2, run the following command to obtain the status code statistics:
GET /_nodes/http_stats
+Example response:
+{
+ "_nodes" : {
+ "total" : 1,
+ "successful" : 1,
+ "failed" : 0 },
+ "cluster_name" : "css-8362",
+ "nodes" : {
+ "F9IFdQPARaOJI7oL7HOXtQ" : {
+ "http_code" : {
+ "200" : 114,
+ "201" : 5,
+ "429" : 0,
+ "400" : 7,
+ "404" : 0,
+ "405" : 0
+ }
+ }
+ }
+ }
+ - For clusters of version 7.10.2, run the following command to obtain the status code statistics:
GET _nodes/stats/http
+Example response:
+{
+// ...
+ "cluster_name" : "css-2985",
+ "nodes" : {
+// ...
+ "omvR9_W-TsGApraMApREjA" : {
+
+// ...
+ "http" : {
+ "current_open" : 4,
+ "total_opened" : 37,
+ "http_code" : {
+ "200" : 25,
+ "201" : 7,
+ "429" : 0,
+ "400" : 3,
+ "404" : 0,
+ "405" : 0
+ }
+ }
+ }
+ }
+}
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477419776.html b/docs/css/umn/en-us_topic_0000001477419776.html
new file mode 100644
index 00000000..2ef23796
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477419776.html
@@ -0,0 +1,37 @@
+
+
+Connecting User-Built Kibana to an Elasticsearch Cluster
+To interconnect user-built Kibana with CSS Elasticsearch clusters, the following conditions must be met:
+
- The local environment must support access from external networks.
- Kibana is built using ECS in the same VPC as Elasticsearch. Kibana can be accessed from the local public network.
- Only Kibana images of the OSS version can be connected to Elasticsearch on CSS.
+
Example of a Kibana configuration file:
+
- Security mode:
elasticsearch.username: "***"
+elasticsearch.password: "***"
+elasticsearch.ssl.verificationMode: none
+server.ssl.enabled: false
+server.rewriteBasePath: false
+server.port: 5601
+logging.dest: /home/Ruby/log/kibana.log
+pid.file: /home/Ruby/run/kibana.pid
+server.host: 192.168.xxx.xxx
+elasticsearch.hosts: https://10.0.0.xxx:9200
+elasticsearch.requestHeadersWhitelist: ["securitytenant","Authorization"]
+opendistro_security.multitenancy.enabled: true
+opendistro_security.multitenancy.tenants.enable_global: true
+opendistro_security.multitenancy.tenants.enable_private: true
+opendistro_security.multitenancy.tenants.preferred: ["Private", "Global"]
+opendistro_security.multitenancy.enable_filter: false
+
+ - Non-security mode
server.port: 5601
+logging.dest: /home/Ruby/log/kibana.log
+pid.file: /home/Ruby/run/kibana.pid
+server.host: 192.168.xxx.xxx
+elasticsearch.hosts: http://10.0.0.xxx:9200
+
+
+
+
diff --git a/docs/css/umn/css_01_0183.html b/docs/css/umn/en-us_topic_0000001477419788.html
similarity index 96%
rename from docs/css/umn/css_01_0183.html
rename to docs/css/umn/en-us_topic_0000001477419788.html
index 14302a97..4bf3f7ba 100644
--- a/docs/css/umn/css_01_0183.html
+++ b/docs/css/umn/en-us_topic_0000001477419788.html
@@ -1,8 +1,8 @@
-
+
Sample Code for Two-Way Authentication During the Access to a Cluster
-This section provides the sample code for two-way authentication during the access to a cluster from a Java client.
-
ESSecuredClientWithCerDemo Code
1
+This section provides the sample code for two-way authentication during the access to a cluster from a Java client.
+ ESSecuredClientWithCerDemo Code 1
2
3
4
@@ -211,7 +211,7 @@
-SecuredHttpClientConfigCallback Code 1
+SecuredHttpClientConfigCallback Code 1
2
3
4
@@ -332,7 +332,7 @@
-pom.xml Code<?xml version="1.0" encoding="UTF-8"?>
+pom.xml Code<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
@@ -372,11 +372,11 @@
</dependencies>
</project>
-
+
diff --git a/docs/css/umn/en-us_topic_0000001477579336.html b/docs/css/umn/en-us_topic_0000001477579336.html
new file mode 100644
index 00000000..6797b1da
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579336.html
@@ -0,0 +1,79 @@
+
+
+Configuring Cluster Monitoring
+You can use Cloud Eye to monitor the created clusters. After configuring the cluster monitoring, you can log in to the Cloud Eye management console to view cluster metrics.
+ The procedure for configuring cluster monitoring:
+ - Creating Alarm Rules: Customize alarm rules for the monitoring metrics. Once a metric exceeds the threshold, the system will notify you by sending emails or HTTP/HTTPS requests.
- Configuring Monitoring Metrics: Configure monitoring metrics for a cluster or a node in the cluster.
- Viewing Monitoring Metrics: View the statistics of the monitoring metrics in specific periods.
+ Prerequisites- The cluster is in the Available or Processing status.
- The cluster has been running properly for more than 10 minutes.
+
+ Recommended Monitoring Metrics- Cluster CPU and JVM usage. You are advised to configure the following monitoring metrics: average JVM heap usage, maximum JVM heap usage, average CPU usage, and maximum CPU usage.
- Cluster write and query latency and throughput. You are advised to configure the following monitoring metrics: average index latency, average index rate, average search latency, and average QPS.
- Cluster write and query queue and rejected tasks. You are advised to configure the following monitoring metrics: tasks in write queue, tasks in search queue, rejected tasks in write queue, and rejected tasks in search queue.
+
+ Creating Alarm Rules- Log in to the Cloud Eye console.
- In the navigation pane on the left, choose Alarm Management > Alarm Rules.
- In the Resource Type column, select Cloud Search Service as criteria to search for alarm rules that meet the requirements.
If no alarm rules are available, create one by referring to the "Creating an Alarm Rule" section. For details about how to set Resource Type and Dimension, see Table 1.
+
+Table 1 Alarm rule configuration parameterParameter
+ |
+Description
+ |
+Remark
+ |
+
+
+Resource Type
+ |
+Type of the resource that the alarm rule is created for
+ |
+Select Cloud Search Service.
+ |
+
+Dimension
+ |
+Metric dimension of the selected resource type
+ |
+CSS supports two dimensions. Select a dimension as required.
+- CSS Clusters: Alarm rules are specified by cluster.
- CSS Clusters - CSS Instances: Alarm rules are specified by node in a cluster.
+ |
+
+
+
+
+
+
+ Configuring Monitoring Metrics- Create a monitoring panel by referring to the "Creating a Dashboard" section. If an available monitoring panel has been created, skip this step.
- Add CSS monitoring graphs by referring to the "Adding a Graph" section.
For details about how to set Resource Type and Dimension, see Table 2.
+
+Table 2 Graph configuration parameterParameter
+ |
+Description
+ |
+Remark
+ |
+
+
+Resource Type
+ |
+Type of the resource to be monitored
+ |
+Select Cloud Search Service.
+ |
+
+Dimension
+ |
+Metric dimension
+ |
+CSS supports two dimensions. Select a dimension as required.
+- CSS Clusters: Monitoring is executed by cluster.
- CSS Clusters - CSS Instances: Monitoring is executed by node in a cluster.
+ |
+
+
+
+
+
+
+ Viewing Monitoring Metrics- Log in to the CSS management console.
- Choose Clusters. Locate the target cluster and choose More > View Metric in the Operation column.
- Select a time range.
- View the monitoring metrics.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579340.html b/docs/css/umn/en-us_topic_0000001477579340.html
new file mode 100644
index 00000000..d3026fdd
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579340.html
@@ -0,0 +1,33 @@
+
+
+Cluster Management
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579352.html b/docs/css/umn/en-us_topic_0000001477579352.html
new file mode 100644
index 00000000..83006936
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579352.html
@@ -0,0 +1,99 @@
+
+
+HTTP/HTTPS Flow Control
+ContextYou can run commands in Kibana to enable or disable HTTP/HTTPS flow control for your cluster. The command parameters are as follows.
+
+ Table 1 HTTP/HTTPS flow control parametersParameter
+ |
+Type
+ |
+Description
+ |
+
+
+flowcontrol.http.enabled
+ |
+Boolean
+ |
+Whether to enable HTTP/HTTPS flow control. This function is disabled by default. Enabling it may affect node access performance.
+Value: true or false
+Default value: false
+ |
+
+flowcontrol.http.allow
+ |
+List<String>
+ |
+IP address whitelist.
+It can contain multiple IP addresses and masks, or an IP address list. Use commas (,) to separate multiple values. Example: xx.xx.xx.xx/24,xx.xx.xx.xx/24, or xx.xx.xx.xx.xx,xx.xx.xx.
+The default value is null.
+ |
+
+flowcontrol.http.deny
+ |
+List<String>
+ |
+IP address blacklist.
+Multiple IP addresses and masks or an IP address list can be configured. Use commas (,) to separate multiple IP addresses and masks.
+The default value is null.
+ |
+
+flowcontrol.http.concurrent
+ |
+Integer
+ |
+Maximum concurrent HTTP/HTTPS connections.
+Default value: Number of available cores on a node x 400
+ |
+
+flowcontrol.http.newconnect
+ |
+Integer
+ |
+Maximum new connections that can be created for HTTP/HTTPS requests per second.
+Default value: Number of available cores on a node x 200
+ |
+
+flowcontrol.http.warmup_period
+ |
+Integer
+ |
+Time required for the HTTP/HTTPS connection setup speed to reach the maximum. If flowcontrol.http.newconnect is set to 100 and flowcontrol.http.warmup_period is set to 5000ms, it indicates the system can set up 100 connections per second in 5 seconds.
+Value range: 0–10000
+Unit: ms
+Default value: 0
+ |
+
+
+
+
+
+ Procedure- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable HTTP/HTTPS flow control.
- Enabling HTTP/HTTPS flow control for a node
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.http.enabled": true,
+ "flowcontrol.http.allow": ["192.168.0.1/24", "192.168.2.1/24"],
+ "flowcontrol.http.deny": "192.168.1.1/24",
+ "flowcontrol.http.concurrent": 1000,
+ "flowcontrol.http.newconnect": 1000,
+ "flowcontrol.http.warmup_period": 0
+ }
+}
+ If all parameters are set to null, they will be restored to default values.
+
+ - Disabling HTTP/HTTPS flow control for a node
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.http.enabled": false
+ }
+}
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579356.html b/docs/css/umn/en-us_topic_0000001477579356.html
new file mode 100644
index 00000000..7a7e3409
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579356.html
@@ -0,0 +1,73 @@
+
+
+Request Sampling
+ContextRequest sampling can record the access IP addresses, the number of accessed nodes, request paths, request URLs, and request bodies, which can be used to obtain the IP addresses and paths of clients that have sent a large number of access requests.
+ The following table describes request sampling parameters.
+
+ Table 1 Request sampling parametersParameter
+ |
+Type
+ |
+Description
+ |
+
+
+flowcontrol.statics.enabled
+ |
+Boolean
+ |
+Whether to enable request sampling. Request sampling may affect node performance.
+Value: true or false
+Default value: false
+ |
+
+flowcontrol.statics.threshold
+ |
+Integer
+ |
+Number of recent access requests whose statistics are collected. The value 100 indicates that statistics will be collected on the 100 IP addresses and 100 URLs that are most frequently accessed.
+Minimum value: 10
+Maximum value: 1000
+Default value: 100
+ |
+
+flowcontrol.statics.sample_frequency
+ |
+Integer
+ |
+Path sampling frequency. If this parameter is set to 100, samples are collected from every 100 requests.
+Minimum value: 50
+Default value: 100
+ |
+
+
+
+
+ - The IP address statistics and URL sampling statistics are cached based on their access time. If the cache space reaches the threshold (flowcontrol.statics.threshold), the records of the earliest access will be deleted.
- In URL sampling, an access path is uniquely identified by its URL hash.
+
+
+ Procedure- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable sampling.
- Enabling sampling
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.statics.enabled": true,
+ "flowcontrol.statics.threshold": 100,
+ "flowcontrol.statics.sample_frequency": 50
+ }
+}
+ - Disabling sampling
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.statics.enabled": false
+ }
+}
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579364.html b/docs/css/umn/en-us_topic_0000001477579364.html
new file mode 100644
index 00000000..b1edaee4
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579364.html
@@ -0,0 +1,26 @@
+
+
+One-click Traffic Blocking
+You can block all connections in one click, except the connections that passes through O&M APIs, to handle unexpected traffic burst and quickly recover your cluster.
+ Procedure- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable one-click traffic blocking.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579368.html b/docs/css/umn/en-us_topic_0000001477579368.html
new file mode 100644
index 00000000..f1a461d4
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579368.html
@@ -0,0 +1,67 @@
+
+
+Viewing the Cluster Runtime Status and Storage Capacity Status
+On the Dashboard page of the CSS management console, you can view information about the status and storage capacity of existing clusters.
+
+ Table 1 Cluster status descriptionStatus
+ |
+Description
+ |
+
+
+Available
+ |
+The cluster is running properly and is providing services.
+ |
+
+Abnormal
+ |
+The cluster creation failed or the cluster is unavailable.
+If a cluster is in the unavailable status, you can delete the cluster or use snapshots created when the cluster is available to restore data to other clusters. However, operations such as expanding cluster capacity, accessing Kibana, creating snapshots, and restoring snapshots to the cluster are not allowed. When a cluster is in the unavailable status, data importing is not recommended to avoid data loss. You can view the cluster metrics or restart the cluster. However, the operations may fail. If the operations fail, contact technical support in a timely manner.
+ |
+
+Processing
+ |
+The cluster is being restarted, scaled, backed up, or recovered.
+ |
+
+Creating
+ |
+The cluster is being created.
+ |
+
+
+
+
+
+ Table 2 Cluster storage capacity status descriptionStatus
+ |
+Description
+ |
+
+
+Normal
+ |
+The storage capacity usage of all nodes in a cluster is less than 50%.
+ |
+
+Warning
+ |
+The storage capacity usage of any node in a cluster is greater than or equal to 50% and less than 80%.
+ |
+
+Danger
+ |
+The storage capacity usage of any node in a cluster is greater than or equal to 80%. You are advised to increase the storage space of the cluster to achieve normal data search or analysis.
+ |
+
+Abnormal
+ |
+The cluster storage capacity usage is unknown. For example, if the status of a cluster is Abnormal due to faults, the storage space status of the cluster will be Abnormal.
+ |
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579372.html b/docs/css/umn/en-us_topic_0000001477579372.html
new file mode 100644
index 00000000..74828d4f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579372.html
@@ -0,0 +1,100 @@
+
+
+HTTP/HTTPS Flow Control
+You can run commands in Kibana to enable or disable HTTP/HTTPS flow control for your cluster.
+ Procedure- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable HTTP/HTTPS flow control.
- Enabling HTTP/HTTPS flow control for a node
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.http.enabled": true,
+ "flowcontrol.http.allow": ["192.168.0.1/24", "192.168.2.1/24"],
+ "flowcontrol.http.deny": "192.168.1.1/24",
+ "flowcontrol.http.concurrent": 1000,
+ "flowcontrol.http.newconnect": 1000,
+ "flowcontrol.http.warmup_period": 0
+ }
+}
+ If all parameters are set to null, they will be restored to default values.
+
+ - Disabling HTTP/HTTPS flow control for a node
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.http.enabled": false
+ }
+}
+
+
+For details about the parameters in this command, see Table 1.
+
+Table 1 HTTP/HTTPS flow control parametersParameter
+ |
+Type
+ |
+Description
+ |
+
+
+flowcontrol.http.enabled
+ |
+Boolean
+ |
+Whether to enable HTTP/HTTPS flow control. This function is disabled by default. Enabling it may affect node access performance.
+Value: true or false
+Default value: false
+ |
+
+flowcontrol.http.allow
+ |
+List<String>
+ |
+IP address whitelist.
+It can contain multiple IP addresses and masks, or an IP address list. Use commas (,) to separate multiple values. Example: xx.xx.xx.xx/24,xx.xx.xx.xx/24, or xx.xx.xx.xx.xx,xx.xx.xx.
+The default value is null.
+ |
+
+flowcontrol.http.deny
+ |
+List<String>
+ |
+IP address blacklist.
+Multiple IP addresses and masks or an IP address list can be configured. Use commas (,) to separate multiple IP addresses and masks.
+The default value is null.
+ |
+
+flowcontrol.http.concurrent
+ |
+Integer
+ |
+Maximum concurrent HTTP/HTTPS connections.
+Default value: Number of available cores on a node x 400
+ |
+
+flowcontrol.http.newconnect
+ |
+Integer
+ |
+Maximum new connections that can be created for HTTP/HTTPS requests per second.
+Default value: Number of available cores on a node x 200
+ |
+
+flowcontrol.http.warmup_period
+ |
+Integer
+ |
+Time required for the HTTP/HTTPS connection setup speed to reach the maximum. If flowcontrol.http.newconnect is set to 100 and flowcontrol.http.warmup_period is set to 5000ms, it indicates the system can set up 100 connections per second in 5 seconds.
+Value range: 0–10000
+Unit: ms
+Default value: 0
+ |
+
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579376.html b/docs/css/umn/en-us_topic_0000001477579376.html
new file mode 100644
index 00000000..37914109
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579376.html
@@ -0,0 +1,23 @@
+
+
+Instructions
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579380.html b/docs/css/umn/en-us_topic_0000001477579380.html
new file mode 100644
index 00000000..34a78b64
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579380.html
@@ -0,0 +1,17 @@
+
+
+Managing Indexes
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579388.html b/docs/css/umn/en-us_topic_0000001477579388.html
new file mode 100644
index 00000000..8b269c1d
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579388.html
@@ -0,0 +1,219 @@
+
+
+Procedure
+The large query isolation and global timeout features are disabled by default. If you enable them, the configuration will take effect immediately. Perform the following steps to configure the features:
+ - Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- In the navigation pane of Kibana on the left, choose Dev Tools. Run the following command to enable large query isolation and global timeout features:
PUT _cluster/settings
+{
+ "persistent": {
+ "search.isolator.enabled": true,
+ "search.isolator.time.enabled": true
+ }
+}
+The two features each has an independent switch and the following parameters.
+
+Table 1 Parameters for large query isolation and global timeout durationSwitch
+ |
+Parameter
+ |
+Description
+ |
+
+
+search.isolator.enabled
+ |
+search.isolator.memory.task.limit
+search.isolator.time.management
+ |
+Thresholds of a shard query task. A query task exceeding one of these thresholds is regarded as a large query task.
+ |
+
+search.isolator.memory.pool.limit
+search.isolator.memory.heap.limit
+search.isolator.count.limit
+ |
+Resource usage thresholds in the isolation pool. If the resource usage of a query task exceeds one of these thresholds, the task will be intercepted.
+ NOTE: search.isolator.memory.heap.limit defines the limit on the heap memory consumed by write, query, and other operations of a node. If the limit is exceeded, large query tasks in the isolation pool will be interrupted.
+
+ |
+
+search.isolator.strategy
+search.isolator.strategy.ratio
+ |
+Policy for selecting a query task in the isolation pool.
+ |
+
+search.isolator.time.enabled
+ |
+search.isolator.time.limit
+ |
+Global timeout interval of query tasks.
+ |
+
+
+
+
+ - Configure the large query isolation and global timeout duration separately.
- Configure the thresholds of a shard query task. A query task exceeding one of these thresholds is regarded as a large query task.
PUT _cluster/settings
+{
+ "persistent": {
+ "search.isolator.memory.task.limit": "50MB",
+ "search.isolator.time.management": "10s"
+ }
+}
+
+Table 2 Parameter descriptionParameter
+ |
+Data Type
+ |
+Description
+ |
+
+
+search.isolator.memory.task.limit
+ |
+String
+ |
+Threshold of the memory requested by a query task to perform aggregation or other operations. If the requested memory exceeds the threshold, the task will be isolated and observed.
+Value range: 0b to the maximum heap memory of a node
+Default value: 50MB
+ NOTE: You can run the following command to query the current heap memory and the maximum heap memory of a cluster:
+ GET _cat/nodes?&h=id,ip,port,r,ramPercent,ramCurrent,heapMax,heapCurrent
+
+ |
+
+search.isolator.time.management
+ |
+String
+ |
+Threshold of the duration of a query. (started when cluster resources are used for query). If the duration of a query exceeds the threshold, it will be isolated and observed.
+Value range: ≥ 0ms
+Default value: 10s
+ |
+
+
+
+
+ - Configure the resource usage thresholds in the isolation pool. If the resource usage of a query task exceeds one of these thresholds, the task will be intercepted.
PUT _cluster/settings
+{
+ "persistent": {
+ "search.isolator.memory.pool.limit": "50%",
+ "search.isolator.memory.heap.limit": "90%",
+ "search.isolator.count.limit": 1000
+ }
+}
+
+Table 3 Parameter descriptionParameter
+ |
+Data Type
+ |
+Description
+ |
+
+
+search.isolator.memory.pool.limit
+ |
+String
+ |
+Threshold of the heap memory percentage of the current node. If the total memory requested by large query tasks in the isolation pool exceeds the threshold, the interrupt control program will be triggered to cancel one of the tasks.
+Value range: 0.0 to 100.0%
+Default value: 50%
+ |
+
+search.isolator.memory.heap.limit
+ |
+String
+ |
+Heap memory threshold of the current node. If the heap memory of the node exceeds the threshold, the interrupt control program will be triggered to cancel a large query task in the isolation pool.
+Value range: 0.0 to 100.0%
+Default value: 90%
+ |
+
+search.isolator.count.limit
+ |
+Integer
+ |
+Threshold of the number of large query tasks in the current node isolation pool. If the number of observed query tasks exceeds the threshold, the interrupt control program will be triggered to stop accepting new large queries. New large query requests will be directly canceled.
+Value range: 10–50000
+Default value: 1000
+ |
+
+
+
+
+ In addition to search.isolator.memory.pool.limit and search.isolator.count.limit parameters, you can configure search.isolator.memory.task.limit and search.isolator.time.management to control the number of query tasks that enter the isolation pool.
+
+ - Policy for selecting a query task in the isolation pool.
PUT _cluster/settings
+{
+ "persistent": {
+ "search.isolator.strategy": "fair",
+ "search.isolator.strategy.ratio": "0.5%"
+ }
+}
+
+Parameter
+ |
+Data Type
+ |
+Description
+ |
+
+
+search.isolator.strategy
+ |
+String
+ |
+Policy for selecting large queries when the interrupt control program is triggered. The selected query will be interrupted.
+ NOTE: The large query isolation pool is checked every second until the heap memory is within the safe range.
+
+Values: fair, mem-first, or time-first- mem-first: The query task that uses the most heap memory in the isolation pool is interrupted.
- time-first: The query task that has been running for the longest time in the isolation pool is interrupted.
- fair: If the difference between the heap memory of shard queries is smaller than Maximum_heap_memory x search.isolator.strategy.ratio, the query that takes the longest time should be interrupted. Otherwise, the query that uses the most heap memory is interrupted.
+
+Default value: fair
+ |
+
+search.isolator.strategy.ratio
+ |
+String
+ |
+Threshold of the fair policy. This parameter takes effect only if search.isolator.strategy is set to fair. If the difference between the memory usage of large query tasks does not exceed the threshold, the query that takes the longest time should be interrupted. If the difference between the memory usage of large query tasks exceeds the threshold, the query that uses the most memory is interrupted.
+Value range: 0.0 to 100.0%
+Default value: 1%
+ |
+
+
+
+
+ - Configure the global timeout duration of query tasks.
PUT _cluster/settings
+{
+ "persistent": {
+ "search.isolator.time.limit": "120s"
+ }
+}
+
+Parameter
+ |
+Data Type
+ |
+Description
+ |
+
+
+search.isolator.time.limit
+ |
+String
+ |
+Global query timeout duration. If this function is enabled, all the query tasks that exceed the specified duration will be canceled.
+Value range: ≥ 0ms
+Default value: 120s
+ |
+
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579396.html b/docs/css/umn/en-us_topic_0000001477579396.html
new file mode 100644
index 00000000..79f88eed
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579396.html
@@ -0,0 +1,15 @@
+
+
+Deleting a Cluster
+You can delete clusters that you no longer need.
+ - If you delete a cluster, the cluster service data will be cleared. Exercise caution when performing this operation.
- The snapshots of a cluster stored in OBS are not deleted with the cluster. You can restore a deleted cluster using its snapshots stored in the OBS bucket.
+
+ Procedure- Log in to the CSS management console.
- In the navigation tree on the left, select a cluster type. The cluster list page is displayed.
- Locate the target cluster and click More > Delete in the Operation column.
- In the displayed dialog box, enter the name of the cluster to be deleted and click OK.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579404.html b/docs/css/umn/en-us_topic_0000001477579404.html
new file mode 100644
index 00000000..4b794fd2
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579404.html
@@ -0,0 +1,23 @@
+
+
+Replacing a Specified Node
+If a node in the cluster is faulty, you can create a new node with the same specifications to replace it. During the replacement of a specified node, data of that node will be migrated in advance and will not be lost.
+ PrerequisitesThe target cluster is available and has no tasks in progress.
+
+ Constraints- Only one node can be replaced at a time.
- The ID, IP address, specifications, and AZ of the new node will be the same as those of the original one.
- The configurations you modified manually will not be retained after node replacement. For example, if you have manually added a return route to the original node, you need to add it to the new node again after the node replacement is complete.
- If the node you want to replace is a data node (ess) or cold data node (ess-cold), pay attention to the following precautions:
- Before a data node or cold data node is replaced, its data needs to be migrated to other nodes. To properly store the data, ensure the maximum sum of replicas and primary shards of an index is smaller than the total number of data nodes (ess and ess-cold nodes) in the cluster. The node replacement duration depends heavily on the migration speed.
- Clusters whose version is earlier than 7.6.2 cannot have closed indexes. Otherwise, data nodes or cold data nodes cannot be replaced.
- The AZ of the node to be replaced must have two or more data nodes (including ess and ess-cold).
- If the cluster of the node to be replaced does not have a master node (ess-master), the number of available data nodes (including ess and ess-cold) in the cluster must be greater than or equal to 3.
- The preceding precautions do not apply if you are replacing a master node (ess-master) or client node (ess-client).
- The precautions 1 to 4 do not apply if you are replacing a faulty node, regardless of its type. Faulty nodes are not included in _cat/nodes.
+
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, click the Replace Node tab.
- On the Replace Node tab page, set the following parameters:
+
- Whether to perform data migration: If this option is selected, data migration is performed. If the target node has disabled indexes or indexes that have no replicas, this option must be selected.
- Select the node to be replaced in the data node table.
+ - Click Submit.
- Click Back to Cluster List to switch to the Clusters page. The Task Status is Upgrading. When Cluster Status changes to Available, the node has been successfully replaced.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579408.html b/docs/css/umn/en-us_topic_0000001477579408.html
new file mode 100644
index 00000000..5aa14aa0
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579408.html
@@ -0,0 +1,76 @@
+
+
+Enabling Index Monitoring
+- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- Choose Dev Tools in the navigation pane on the left and run the following command to enable index monitoring:
PUT _cluster/settings
+{
+ "persistent": {
+ "css.monitoring.index.enabled": "true"
+ }
+}
+ - (Optional) To monitor a specific index, run the following command on the Dev Tools page of Kibana:
PUT _cluster/settings
+{
+ "persistent": {
+ "css.monitoring.index.enabled": "true",
+ "css.monitoring.index.interval": "30s",
+ "css.monitoring.index.indices": ["index_name"],
+ "css.monitoring.history.duration": "3d"
+ }
+}
+
+Table 1 Parameter descriptionParameter
+ |
+Data Type
+ |
+Description
+ |
+
+
+css.monitoring.index.enabled
+ |
+Boolean
+ |
+Whether to enable index monitoring. If this parameter is set to true, the monitoring will be enabled.
+Default value: false
+ |
+
+css.monitoring.index.interval
+ |
+Time
+ |
+Interval for collecting index monitoring data.
+Minimum value: 1s
+Default value: 10s
+ |
+
+css.monitoring.index.indices
+ |
+String
+ |
+Name of an index to be monitored. By default, all indexes are monitored. You can configure specific indexes or a type of indexes to monitor.
+Example:
+- ""css.monitoring.index.indices": ["index_name"]" indicates only index_name is monitored.
- "css.monitoring.index.indices": ["log_*"] indicates that only indexes starting with log_ are monitored.
- "css.monitoring.index.indices": ["index1", "index2"] indicates that index1 and index2 are monitored.
+Default value: * (indicating that all indexes are monitored)
+ |
+
+css.monitoring.history.duration
+ |
+Time
+ |
+Retention period of monitoring data storage. The default period is a week.
+Minimum value: 1d
+Default value: 7d
+ |
+
+
+
+
+ Indexes starting with monitoring-eye-css-* are regarded as monitoring indexes and will not be monitored.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477579412.html b/docs/css/umn/en-us_topic_0000001477579412.html
new file mode 100644
index 00000000..b776e215
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477579412.html
@@ -0,0 +1,38 @@
+
+
+Accessing a Cluster Using a VPC Endpoint
+If the VPC endpoint service is enabled, you can use a private domain name or node IP address generated by the endpoint to access the cluster. When the VPC endpoint service is enabled, a VPC endpoint will be created by default. You can select Private Domain Name Creation as required. VPC endpoint creation requires specific permissions. For details, see "VPCEP Permissions".
+ VPC Endpoint uses a shared load balancer for intranet access. If your workloads require quick access, you are advised to connect a dedicated load balancer to the cluster. For details, see Connecting to a Dedicated Load Balancer.
+ The public IP address access and VPC endpoint service share a load balancer. If you have configured a public access whitelist, public and private IP addresses that access the cluster through VPCEP are restricted because the public IP address access shares the load balancer with the VPC endpoint service. In this case, you need to add IP address 198.19.128.0/17 to the public access whitelist to allow traffic through VPCEP.
+
+ Enabling the VPC Endpoint Service- Log in to the CSS management console.
- Click Create Cluster in the upper right corner.
- On the Create Cluster page, set Advanced Settings to Custom. Enable the VPC endpoint service.
- Private Domain Name Creation: If you enable this function, the system automatically creates a private domain name for you, which you can use to access the cluster.
- VPC Endpoint Service Whitelist: You can add an authorized account ID to the VPC endpoint service whitelist. Then you can access the cluster using the private domain name or the node IP address.
- You can click Add to add multiple accounts.
- Click Delete in the Operation column to delete the accounts that are not allowed to access the cluster.
+ - If the authorized account ID is set to *, all users are allowed to access the cluster.
- You can view authorized account IDs on the My Credentials page.
+
+
+
+ Managing VPC Endpoint ServiceYou can enable the VPC endpoint service while creating a cluster, and also enable it by performing the following steps after cluster creation.
+ - Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster.
- Click the VPC Endpoint Service tab, and turn on the button next to VPC Endpoint Service.
In the displayed dialog box, you can determine whether to enable the private domain name. Click Yes to enable the VPC endpoint service.
+
+ - (Optional) Click Modify next to VPC Endpoint Service Whitelist to update the existing whitelist.
- Manage VPC endpoints.
The VPC Endpoint Service page displays all VPC endpoints connected to the current VPC endpoint service.
+Figure 1 Managing VPC endpoints
+Click Accept or Reject in the Operation column to change the node status. If you reject the connection with a VPC endpoint, you cannot access the cluster through the private domain name generated by that VPC endpoint.
+
+
+ Accessing the Cluster Using the Private Domain Name or Node IP Address- Obtain the private domain name or node IP address.
Log in to the CSS console, click the target cluster name and go to the Cluster Information page. Click the VPC Endpoint Service tab and view the private domain name.
+
+ - Run the cURL command to execute the API or call the API by using a program before accessing the cluster. For details about Elasticsearch operations and APIs, see the Elasticsearch Reference.
The ECS must meet the following requirements:
+- Sufficient disk space is allocated for the ECS.
- The ECS and the cluster must be in the same VPC. After enabling the VPC endpoint service, you can access the cluster from the ECS even when the cluster is not in the same VPC as the ECS.
- The security group of the ECS must be the same as that of the cluster.
If this requirement is not met, modify the ECS security group or configure the inbound and outbound rules of the ECS security group to allow the ECS security group to be accessed by all security groups of the cluster. For details, see Configuring Security Group Rules.
+ - Configure security group rule settings of the target CSS cluster. Set Protocol to TCP and Port Range to 9200 or a port range including port 9200 for both the outbound and inbound directions.
+- If the cluster you access does not have the security mode enabled, run the following command:
curl 'http://vpcep-7439f7f6-2c66-47d4-b5f3-790db4204b8d.region01.xxxx.com:9200/_cat/indices'
+ - If the cluster you access has the security mode enabled, access the cluster using HTTPS and add the username, password and -u to the cURL command.
curl -u username:password -k 'https://vpcep-7439f7f6-2c66-47d4-b5f3-790db4204b8d.region01.xxxx.com:9200/_cat/indices'
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739336.html b/docs/css/umn/en-us_topic_0000001477739336.html
new file mode 100644
index 00000000..1681b856
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739336.html
@@ -0,0 +1,104 @@
+
+
+Managing Logs
+CSS provides log backup and search functions to help you locate faults. You can back up cluster logs to OBS buckets and download required log files to analyze and locate faults.
+ Log Query- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster. The cluster information page is displayed.
- In the navigation pane on the left, choose Log Management.
- Query logs on the log management page.
Select the node, log type, and log level you want to query, and then click . The query result is displayed.
+When you search for logs, the latest 10,000 logs are matched. A maximum of 100 logs are displayed.
+
+
+ Enabling Log Backup- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster. The cluster information page is displayed.
- Click the Logs tab and toggle on the Log Management switch.
- In the Edit Log Backup Configuration dialog box, set the parameters.
In the displayed dialog box, OBS Bucket and IAM Agency are automatically created for log backup. You can change the default value by referring to Table 1.
+If the Log Management function has been enabled for the cluster, you can click on the right of Log Backup Configuration and modify the configuration in the displayed Edit Log Backup Configuration dialog box. For details, see Table 1.
+
+Table 1 Parameters for configuring log backupParameter
+ |
+Description
+ |
+Remarks
+ |
+
+
+OBS Bucket
+ |
+Select an OBS bucket from the drop-down list for storing logs. You can also click Create Bucket on the right to create an OBS bucket.
+ |
+The OBS bucket and the cluster must be in the same region.
+ NOTE: To let an IAM user access an OBS bucket, you need to grant the GetBucketStoragePolicy, GetBucketLocation, ListBucket, and ListAllMyBuckets permissions to the user.
+
+ |
+
+Backup Path
+ |
+Storage path of logs in the OBS bucket
+ |
+The backup path configuration rules are as follows: - The backup path cannot contain the following characters: \:*?"<>|
- The backup path cannot start with a slash (/).
- The backup path cannot start or end with a period (.).
- The total length of the backup path cannot exceed 1,023 characters.
+
+ |
+
+IAM Agency
+ |
+IAM agency authorized by the current account for CSS to access or maintain data stored in the OBS bucket. You can also click Create IAM Agency on the right to create an IAM agency.
+ |
+The IAM agency must meet the following requirements:
+- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- Mandatory policies: Tenant Administrator
+ |
+
+
+
+
+ - Back up logs.
- Automatically backing up logs
Click the icon on the right of Auto Backup to enable the auto backup function.
+After the automatic backup function is enabled, set the backup start time in the Configure Auto Backup dialog box. When the scheduled time arrives, the system will back up logs automatically.
+After the Automatic Snapshot Creation function is enabled, you can click on the right of the parameter to change the backup start time.
+ - Manually backing up logs
On the Log Backup tab page, click Back Up. On the displayed page, click Yes to start backup.
+If Task Status in the log backup list is Successful, the backup is successful.
+ All logs in the cluster are copied to a specified OBS path. You can view or download log files from the path of the OBS bucket.
+
+
+ - Search for logs.
On the Log Search page, select the target node, log type, and log level, and click . The search results are displayed.
+When you search for logs, the latest 10,000 logs are matched. A maximum of 100 logs are displayed.
+
+
+ Viewing LogsAfter backing up logs, you can click Backup Path to go to the OBS console and view the logs.
+ Backed up logs mainly include deprecation logs, run logs, index slow logs, and search slow logs. Table 2 lists the storage types of the OBS bucket.
+
+ Table 2 Log typesLog Name
+ |
+Description
+ |
+
+
+clustername_deprecation.log
+ |
+Deprecation log
+ |
+
+clustername_index_indexing_slowlog.log
+ |
+Search slow log
+ |
+
+clustername_index_search_slowlog.log
+ |
+Index slow log
+ |
+
+clustername.log
+ |
+Elasticsearch run log
+ |
+
+clustername_access.log
+ |
+Access log
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739344.html b/docs/css/umn/en-us_topic_0000001477739344.html
new file mode 100644
index 00000000..1ff625f2
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739344.html
@@ -0,0 +1,346 @@
+
+
+Creating a Cluster in Security Mode
+This section describes how to create an Elasticsearch cluster in security mode.
+ Public IP address access and Kibana public access can be used only after security mode is enabled.
+
+ Context- When creating a cluster, the number of nodes that can be added varies according to the node type. For details, see Table 1.
+
Table 1 Number of nodes in different typesNode Type
+ |
+Number
+ |
+
+
+ess
+ |
+ess: 1-32
+ |
+
+ess, ess-master
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ |
+
+ess, ess-client
+ |
+ess: 1-32
+ess-client: 1-32
+ |
+
+ess, ess-cold
+ |
+ess: 1-32
+ess-cold: 1-32
+ |
+
+ess, ess-master, ess-client
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-client: 1-32
+ |
+
+ess, ess-master, ess-cold
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-cold: 1-32
+ |
+
+ess, ess-client, ess-cold
+ |
+ess: 1-32
+ess-client: 1-32
+ess-cold: 1-32
+ |
+
+ess, ess-master, ess-client, ess-cold
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-client: 1-32
+ess-cold: 1-32
+ |
+
+Details about the four node types: - ess: the default node type that is mandatory for cluster creation. The other three node types are optional.
- ess-master: master node
- ess-client: client node
- ess-cold: cold data node
+
+ |
+
+
+
+
+
+
+ Procedure- Log in to the CSS management console.
- On the Dashboard page, click Create Cluster in the upper right corner. The Create page is displayed.
Alternatively, choose Clusters > Elasticsearch in the navigation tree on the left. Click Create Cluster in the upper right corner. The Create page is displayed.
+ - Specify Region and AZ.
+
Table 2 Parameter description for Region and AZParameter
+ |
+Description
+ |
+
+
+Region
+ |
+Select a region for the cluster from the drop-down list on the right. Currently, only eu-de and eu-nl are supported.
+ |
+
+AZ
+ |
+Select AZs associated with the cluster region.
+You can select a maximum of three AZs. For details, see Deploying a Cross-AZ Cluster.
+ |
+
+
+
+
+ - Configure basic cluster information.
+
Table 3 Description of basic parametersParameter
+ |
+Description
+ |
+
+
+Version
+ |
+Select a cluster version from the drop-down list box.
+ |
+
+Name
+ |
+Cluster name, which contains 4 to 32 characters. Only letters, numbers, hyphens (-), and underscores (_) are allowed and the value must start with a letter.
+ NOTE: After a cluster is created, you can modify the cluster name as required. Click the name of a cluster to be modified. On the displayed Basic Information page, click next to the cluster name. After the modification is completed, click to save the modification. If you want to cancel the modification, click .
+
+ |
+
+
+
+
+ - Configure cluster specifications.
+
Table 4 Parameter descriptionParameter
+ |
+Description
+ |
+
+
+Nodes
+ |
+Number of nodes in a cluster. Select a number from 1 to 32. You are advised to configure three or more nodes to ensure high availability of the cluster.
+- If neither a master node nor client node is enabled, the nodes specified by this parameter are used to serve as both the master node and client node. Nodes provide the cluster management, data storage, cluster access, and data analysis functions. To ensure data stability in the cluster, it is recommended that you set this parameter to a value no less than 3.
- If only the master node function is enabled, nodes specified by this parameter are used to store data and provide functions of client nodes.
- If both the master and client node functions are enabled, the nodes specified by this parameter are only used for storing data.
- If only the client node function is enabled, nodes specified by this parameter are used to store data and provide functions of the master node.
+ |
+
+CPU Architecture
+ |
+The supported type is determined by the actual regional environment. You can select x86.
+ |
+
+Node Specifications
+ |
+Specifications of nodes in a cluster. You can select a specified specification based on your needs. Each cluster supports only one specification.
+After you select a flavor, the CPU and memory corresponding to the current specification are displayed below the parameter. For example, if you select css.medium.8, then 1 vCPUs | 8 GB will be displayed, indicating that the node flavor you select contains one vCPU and 8 GB memory.
+ |
+
+Node Storage Type
+ |
+Select a storage type. Common I/O, High I/O, Ultra-high I/O are supported.
+ |
+
+Node Storage Capacity
+ |
+Storage space. Its value varies with node specifications.
+The node storage capacity must be a multiple of 20.
+ |
+
+Disk Encryption
+ |
+If you select this option, the nodes in the cluster you create will use encrypted EVS disks to protect data. By default, this option is not selected. Note that you cannot modify this setting after the cluster is created. Therefore, exercise caution when performing the setting.
+After you select this option, you need to select an available key from the Key Name drop-down list. If no key is available, click Create/View Key to go to the KMS management console and create or modify a key. For details, see Creating a CMK.
+Enabling disk encryption has no impact on your operations on a cluster (such as accessing the cluster and importing data to the cluster). However, after you enable disk encryption, operation performance deteriorates by about 10%.
+ NOTE: - If the cluster is in the Available status and the key used for disk encryption is in the Pending deletion or disable status or has been deleted after a cluster is created, cluster scale-out is not allowed. However, other operations on the cluster, such as restarting the cluster, creating snapshots, restoring the cluster, and importing data to the cluster are not affected. In addition, this key cannot be used for cluster creation in the future.
- After a cluster is created, do not delete the key used by the cluster. Otherwise, the cluster will become unavailable.
- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
+
+ |
+
+Master node
+ |
+The master node manages all nodes in the cluster. If more than 20 nodes are required to store and analyze a large amount of data, you are advised to enable the master node to ensure cluster stability. Otherwise, you are advised to set only the Nodes parameter and use the nodes as both master and client nodes.
+After enabling the master node, specify Node Specifications, Nodes, and Node Storage Type. The value of Nodes must be an odd number greater than or equal to 3. Up to nine nodes are supported. The value of Node Storage Capacity is fixed. You can select a storage type based on your needs.
+ |
+
+Client node
+ |
+The client node allows clients to access clusters and analyze data. If more than 20 nodes are required to store and analyze a large amount of data, you are advised to enable the client node to ensure cluster stability. Otherwise, you are advised to set only the Nodes parameter and use the nodes as both master and client nodes.
+After enabling the client node, specify Node Specifications, Nodes and Node Storage Type. The value of Nodes ranges from 1 to 32. The value of Node Storage Capacity is fixed. You can select a storage type based on your needs.
+ |
+
+Cold data node
+ |
+The cold data node is used to store historical data, for which query responses can be returned in minutes. If you do not quire a quick query response, store historical data on cold data nodes to reduce costs.
+After enabling cold data node, configure Node Specifications, Nodes, Node Storage Type, and Node Storage Capacity. The value of Nodes ranges from 1 to 32. Select Node Storage Type and Node Storage Capacity as required.
+After the cold data node is enabled, CSS automatically adds cold and hot tags to related nodes.
+ |
+
+
+
+
+Figure 1 Configuring host specifications
+ - Set the enterprise project.
When creating a CSS cluster, you can bind an enterprise project to the cluster if you have enabled the enterprise project function. You can select an enterprise project created by the current user from the drop-down list on the right or click View Project Management to go to the Enterprise Project Management console and create a new project or view existing projects.
+ - Click Next: Configure Network. Configure the cluster network.
+
Table 5 Network configuration parametersParameter
+ |
+Description
+ |
+
+
+VPC
+ |
+A VPC is a secure, isolated, and logical network environment.
+Select the target VPC. Click View VPC to enter the VPC management console and view the created VPC names and IDs. If no VPCs are available, create one.
+ NOTE: The VPC must contain CIDRs. Otherwise, cluster creation will fail. By default, a VPC will contain CIDRs.
+
+ |
+
+Subnet
+ |
+A subnet provides dedicated network resources that are isolated from other networks, improving network security.
+Select the target subnet. You can access the VPC management console to view the existed subnet names and IDs.
+ |
+
+Security Group
+ |
+A security group is a collection of access control rules for ECSs that have the same security protection requirements and are mutually trusted in a VPC. To view more details about the security group, click View Security Group.
+ NOTE: - For cluster access purposes, ensure that the security group contains port 9200.
- If your cluster version is 7.6.2 or later, ensure that all the ports used for communication between nodes in the same security group are allowed. If such settings cannot be configured, ensure at least the access to port 9300 is allowed.
- After the port 9300 is enabled, if the cluster disk usage is high, delete expired data to release the disk storage space.
+
+ |
+
+Security Mode
+ |
+After the security mode is enabled, communication will be encrypted and authentication required for the cluster.
+- The default administrator account is admin.
- Set and confirm the Administrator Password. This password will be required when you access this cluster.
+ |
+
+HTTPS Access
+ |
+HTTPS access can be enabled only after the security mode of the cluster is enabled. After HTTPS access is enabled, communication is encrypted when you access the cluster.
+ NOTE: A cluster in security mode uses HTTPS for communication and will provide deteriorated read performance when compared with a normal cluster using HTTP. Its performance may be 20% less than the performance of a normal HTTP cluster under high concurrency. If you want fast read performance and the permission provided by the security mode to isolate resources (such as indexes, documents, and fields), you can disable the HTTPS Access function. After HTTPS Access is disabled, HTTP protocol is used for cluster communication. In this case, data security cannot be ensured and public IP address cannot be used.
+
+ |
+
+Public IP Address
+ |
+If HTTPS Access is enabled, you can configure Public Network Access and obtain an IP address for public network access. This IP address can be used to access this security cluster through the public network. For details, see Accessing a Cluster from a Public Network.
+ |
+
+
+
+
+Figure 2 Configuring network specifications
+ - Click Next: Configure Advanced Settings. Configure the automatic snapshot creation and other functions.
- Configure Cluster Snapshot. Set basic configuration and snapshot configuration.
The cluster snapshot function is enabled by default. You can also disable this function as required. To store automatic snapshots in OBS, an agency will be created to access OBS. Additional cost will be incurred if snapshots are stored in standard storage.
+
+Table 6 Cluster snapshot parameterParameter
+ |
+Description
+ |
+
+
+OBS bucket
+ |
+Select an OBS bucket for storing snapshots from the drop-down list box. You can also click Create Bucket on the right to create an OBS bucket. For details, see Creating a Bucket.
+The created or existing OBS bucket must meet the following requirements:
+- Storage Class is Standard or Warm.
- Region must be the same as that of the created cluster.
+ |
+
+Backup Path
+ |
+Storage path of the snapshot in the OBS bucket.
+The backup path configuration rules are as follows: - The backup path cannot contain the following characters: \:*?"<>|
- The backup path cannot start with a slash (/).
- The backup path cannot start or end with a period (.).
- The backup path cannot contain more than 1,023 characters.
+
+ |
+
+IAM Agency
+ |
+IAM agency authorized by the current account for CSS to access or maintain data stored in OBS You can also click Create IAM Agency on the right to create an IAM agency. For details, see Creating an Agency.
+The created or existing IAM agency must meet the following requirements:
+- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- The agency must have the Tenant Administrator permission for the OBS(S3) project in OBS(S3).
+ |
+
+Snapshot Encryption
+ |
+Whether to enable the snapshot encryption function. Enabling the snapshot encryption function ensures the security of your snapshot data.
+After the snapshot encryption function is enabled, select a key from the Key Name drop-down list. If no key is available, click Create/View Key to go to the KMS management console and create or modify a key. For details, see Creating a CMK.
+- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
- If a snapshot has been stored in the OBS bucket, you cannot modify the parameters used for encrypting the snapshot.
- If the key used for encryption is in the Pending deletion or disable status, you cannot perform backup and restoration operations on the cluster. Specifically, you cannot create new snapshots for the cluster, or use existing snapshots to restore clusters. In this case, switch to the KMS management console and change the status of the target key to enable so that backup and restore operations are allowed on the cluster.
- If you delete the key used for encryption, you cannot perform backup and restore operations on the cluster. In addition, you cannot restore the deleted key. Therefore, exercise caution when deleting a key. If the key is deleted or is in the Pending deletion or disable state, automatic snapshot creation is allowed based on the configured snapshot policy. However, all automatic snapshot creation tasks will fail, and the failed tasks are displayed in the failed task list in the Failed Tasks dialog box. In such scenario, you are advised to disable the automatic snapshot creation function.
+ |
+
+
+
+
+
+Table 7 Automatic snapshot creation parameterParameter
+ |
+Description
+ |
+
+
+Snapshot Name Prefix
+ |
+The snapshot name prefix contains 1 to 32 characters and must start with a lowercase letter. Only lowercase letters, digits, hyphens (-), and underscores (_) are allowed. A snapshot name consists of a snapshot name prefix and a timestamp, for example, snapshot-1566921603720.
+ |
+
+Time Zone
+ |
+Time zone for the backup time, which cannot be changed. Specify Backup Started Time based on the time zone.
+ |
+
+Backup Start Time
+ |
+The time when the backup starts automatically every day. You can specify this parameter only in full hours, for example, 00:00 or 01:00. The value ranges from 00:00 to 23:00. Select a time from the drop-down list.
+ |
+
+Retention Period (days)
+ |
+The number of days that snapshots are retained in the OBS bucket. The value ranges from 1 to 90. You can specify this parameter as required. The system automatically deletes expired snapshots every hour at half past the hour.
+ |
+
+
+
+
+Figure 3 Setting parameters for automatic snapshot creation
+ - Configure advanced settings for the cluster.
- Default: The VPC Endpoint Service, Kibana Public Access, and Tag functions are disabled by default. You can manually enable these functions after the cluster is created.
- Custom: You can enable the VPC Endpoint Service, Kibana Public Access, and Tag functions as required.
+
+Table 8 Parameters for advanced settingsParameter
+ |
+Description
+ |
+
+
+VPC Endpoint Service
+ |
+After enabling this function, you can obtain a private domain name for accessing the cluster in the same VPC. For details, see Accessing a Cluster Using a VPC Endpoint.
+ NOTE: The VPC endpoint service cannot be enabled for a shared VPC.
+
+ |
+
+Kibana Public Access
+ |
+You can configure this parameter only when security mode is enabled for a cluster. After enabling this function, you can obtain a public IP address for accessing Kibana. For details, see Accessing a Cluster from a Kibana Public Network.
+ |
+
+Tag
+ |
+Adding tags to clusters can help you identify and manage your cluster resources. You can customize tags or use tags predefined by Tag Management Service (TMS). For details, see Managing Tags.
+If your organization has enabled tag policies for CSS, you must comply with the tag policy rules when creating clusters, otherwise, clusters may fail to be created. Contact the organization administrator to learn more about tag policies.
+ |
+
+
+
+
+
+ - Click Next: Confirm. Check the configuration and click Next to create a cluster.
- Click Back to Cluster List to switch to the Clusters page. The cluster you created is listed on the displayed page and its status is Creating. If the cluster is successfully created, its status will change to Available.
If the cluster creation fails, create the cluster again.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739348.html b/docs/css/umn/en-us_topic_0000001477739348.html
new file mode 100644
index 00000000..f8632f87
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739348.html
@@ -0,0 +1,396 @@
+
+
+Connecting to a Dedicated Load Balancer
+This section describes how to connect a CSS cluster to a dedicated load balancer.
+ (Optional) Preparing a Self-signed CertificateIf the target ELB listener uses the HTTP protocol, skip this step.
+ Prepare and upload a self-signed certificate.
+ You are advised to use a certificate purchased in Cloud Certificate Manager (CCM) or issued by an authoritative organization.
+
+ - Log in to a Linux client where the OpenSSL tool and JDK are installed.
- Run the following commands to create a self-signed certificate:
1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73 | mkdir ca
+mkdir server
+mkdir client
+
+#Use OpenSSL to create a CA certificate.
+cd ca
+#Create the OpenSSL configuration file ca_cert.conf for the CA certificate.
+cat >ca_cert.conf <<EOF
+[ req ]
+distinguished_name = req_distinguished_name
+prompt = no
+
+[ req_distinguished_name ]
+ O = ELB
+EOF
+#Create private key file ca.key for the CA certificate.
+openssl genrsa -out ca.key 2048
+#Create the CSR file ca.csr for the CA certificate.
+openssl req -out ca.csr -key ca.key -new -config ./ca_cert.conf
+#Create a self-signed CA certificate ca.crt.
+openssl x509 -req -in ca.csr -out ca.crt -sha1 -days 5000 -signkey ca.key
+#Convert the CA certificate format to p12.
+openssl pkcs12 -export -clcerts -in ca.crt -inkey ca.key -out ca.p12
+#Convert the CA certificate format to JKS.
+keytool -importkeystore -srckeystore ca.p12 -srcstoretype PKCS12 -deststoretype JKS -destkeystore ca.jks
+
+
+#Use the CA certificate to issue a server certificate.
+cd ../server
+#Create the OpenSSL configuration file server_cert.conf for the server certificate. Change the CN field to the domain name or IP address of the server as required.
+cat >server_cert.conf <<EOF
+[ req ]
+distinguished_name = req_distinguished_name
+prompt = no
+
+[ req_distinguished_name ]
+ O = ELB
+ CN = 127.0.0.1
+EOF
+#Create the private key file server.key for the server certificate.
+openssl genrsa -out server.key 2048
+#Create the CSR request file server.csr for the server certificate.
+openssl req -out server.csr -key server.key -new -config ./server_cert.conf
+#Use the CA certificate to issue the server certificate server.crt.
+openssl x509 -req -in server.csr -out server.crt -sha1 -CAcreateserial -days 5000 -CA ../ca/ca.crt -CAkey ../ca/ca.key
+#Convert the server certificate format to p12.
+openssl pkcs12 -export -clcerts -in server.crt -inkey server.key -out server.p12
+#Convert the service certificate format to JKS.
+keytool -importkeystore -srckeystore server.p12 -srcstoretype PKCS12 -deststoretype JKS -destkeystore server.jks
+
+
+#Use the CA certificate to issue a client certificate.
+cd ../client
+#Create the OpenSSL configuration file client_cert.conf for the client certificate. Change the CN field to the domain name or IP address of the server as required.
+cat >client_cert.conf <<EOF
+[ req ]
+distinguished_name = req_distinguished_name
+prompt = no
+
+[ req_distinguished_name ]
+O = ELB
+CN = 127.0.0.1
+EOF
+#Create private key client.key for the client certificate.
+openssl genrsa -out client.key 2048
+#Create the CSR file client.csr for the client certificate.
+openssl req -out client.csr -key client.key -new -config ./client_cert.conf
+#Use the CA certificate to issue the client certificate client.crt.
+openssl x509 -req -in client.csr -out client.crt -sha1 -CAcreateserial -days 5000 -CA ../ca/ca.crt -CAkey ../ca/ca.key
+#Convert the client certificate to a p12 file that can be identified by the browser.
+openssl pkcs12 -export -clcerts -in client.crt -inkey client.key -out client.p12
+#Convert the client certificate format to JKS.
+keytool -importkeystore -srckeystore client.p12 -srcstoretype PKCS12 -deststoretype JKS -destkeystore client.jks
+ |
+
+
+ - Upload the self-signed certificate. For details, see Configuring the Server Certificate and Private Key.
+
+ Creating a Dedicated Load Balancer- Log in to the ELB management console.
- Create a dedicated load balancer. For details, see Creating a Dedicated Load Balancer. Table 1 describes the parameters required for connecting a CSS cluster with a dedicated load balancer.
+
Table 1 Parameters for interconnecting a CSS cluster with a dedicated load balancerParameter
+ |
+Description
+ |
+Example
+ |
+
+
+Type
+ |
+Load balancer type. Select Dedicated.
+ |
+Dedicated
+ |
+
+Billed By
+ |
+Billing mode of the dedicated load balancer.
+ |
+Pay-per-use
+ |
+
+Region
+ |
+Region where the CSS cluster is located.
+ |
+-
+ |
+
+IP as Backend Servers
+ |
+A CSS cluster can be connected only after the cross-VPC backend is enabled.
+ |
+Enabled
+ |
+
+Network Type
+ |
+Type of the network used by the load balancer to provide services for external systems.
+ |
+Private IPv4 network
+ |
+
+VPC
+ |
+VPC where the load balancer works. This parameter is mandatory no matter which network type is selected.
+Select the VPC of the CSS cluster
+ |
+-
+ |
+
+Subnet
+ |
+Subnet where the load balancer is to be created. This parameter is mandatory no matter which network type is selected.
+Select the subnet of the CSS cluster
+ |
+-
+ |
+
+Specifications
+ |
+You are advised to select Application load balancing (HTTP/HTTPS), which provides better functions and performance.
+ |
+Application load balancing (HTTP/HTTPS)
+Small I
+ |
+
+
+
+
+
+
+ Interconnecting with a Load Balancer A cluster in security mode with HTTPS access enabled does not support HTTP protocol authentication. If you need to enable HTTP protocol authentication, disable the security mode of the cluster.
+ Before changing the security mode, disable load balancing. After the security mode is changed, enable load balancing.
+
+ - Log in to the CSS management console.
- On the Clusters page, select the cluster you want to connect to the load balancer and click the cluster name. The cluster basic information page is displayed.
- In the navigation pane, choose Load Balancing. Toggle on the load balancing switch and configure basic load balancing information.
- Load Balancer: Select a created load balancer. You can also click Create Load Balancer to create one.
- Agency: Select an agency name. If no agency is available, click Create Agency to create one. The selected agency must have the ELB Administrator and ELB FullAccess permissions.
Figure 1 Enabling load balancing
+
+ - Click OK. The listener configuration page is displayed.
Figure 2 Creating a listener
+ - In the Listener Configuration area, click
to configure listener information.Figure 3 Configuring a listener
+
+Table 2 Listener configuration informationParameter
+ |
+Description
+ |
+
+
+Frontend Protocol
+ |
+The protocol used by the client and listener to distribute traffic.
+Select a protocol as required.
+ |
+
+Frontend Port
+ |
+The port used by the client and listener to distribute traffic.
+For example, 9200. You need to specify this parameter as required.
+ |
+
+SSL Authentication
+ |
+Authentication mode for the client to access the server.
+Select a parsing mode as required.
+ |
+
+Server Certificate
+ |
+The server certificate is used for SSL handshake negotiation. The certificate content and private key must be provided.
+When SSL Authentication is set to Two-way authentication, this parameter is mandatory.
+ |
+
+CA Certificate
+ |
+Also called client CA public key certificate. It is used to verify the issuer of a client certificate.
+When the HTTPS two-way authentication is enabled, an HTTPS connection can be established only when the client can provide the certificate issued by a specified CA.
+This parameter is mandatory only when the Frontend Protocol is set to HTTPS.
+ |
+
+
+
+
+ - (Optional) In the Connection Mode area, you can click Settings next to Access Control to configure the IP addresses or network segments that are allowed to access the system. If you do not set the IP addresses or network segments, all IP addresses are allowed to access the system by default.
+ In the Health Check area, you can view the health check result of each node IP address. The following table describes the health check results.
+ Health Check Result
+ |
+Description
+ |
+
+
+Normal
+ |
+The IP address of the node is properly connected.
+ |
+
+Abnormal
+ |
+The node IP address is connected and unavailable.
+ |
+
+
+
+
+
+
+ Accessing a Cluster Using the Curl CommandRun the following commands to check whether the dedicated load balancer can be connected to a cluster.
+
+ Table 3 Commands for accessing different clustersSecurity Mode
+ |
+Service Form Provided by ELB for External Systems
+ |
+Curl Command for Accessing a Cluster
+ |
+
+
+Non-security
+ |
+No authentication
+ |
+curl http://IP:9200
+ |
+
+One-way authentication
+ |
+curl -k --cert ./client.crt --key ./client.key https://IP:9200
+ |
+
+Two-way authentication
+ |
+curl --cacert ./ca.crt --cert ./client.crt --key ./client.key https://IP:9200
+ |
+
+Security mode + HTTP
+ |
+Password authentication
+ |
+curl http://IP:9200 -u user:pwd
+ |
+
+One-way authentication + Password authentication
+ |
+curl -k --cert ./client.crt --key ./client.key https://IP:9200 -u user:pwd
+ |
+
+Two-way authentication + Password authentication
+ |
+curl --cacert ./ca.crt --cert ./client.crt --key ./client.key https://IP:9200 -u user:pwd
+ |
+
+Security mode + HTTPS
+ |
+One-way authentication + Password authentication
+ |
+curl -k --cert ./client.crt --key ./client.key https://IP:9200 -u user:pwd
+ |
+
+Two-way authentication + Password authentication
+ |
+curl --cacert ./ca.crt --cert ./client.crt --key ./client.key https://IP:9200 -u user:pwd
+ |
+
+
+
+
+
+ Table 4 VariablesVariable
+ |
+Description
+ |
+
+
+IP
+ |
+ELB IP address
+ |
+
+user
+ |
+Username for accessing the CSS cluster
+ |
+
+pwd
+ |
+Password of the user
+ |
+
+
+
+
+ If the Elasticsearch cluster information is returned, the connection is successful. For example, if a security cluster using the HTTPS protocol is connected to a load balancer using two-way authentication, the information shown in Figure 4 is returned.
+ Figure 4 Accessing a cluster
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739356.html b/docs/css/umn/en-us_topic_0000001477739356.html
new file mode 100644
index 00000000..f916ec54
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739356.html
@@ -0,0 +1,18 @@
+
+
+Best Practices
+This section describes how to switch from the primary cluster to the secondary cluster when the primary cluster is faulty.
+ 1. If the synchronization of specified indexes has been configured between the primary and secondary clusters.
+ (1) Call the API for stopping index synchronization in the secondary cluster. In this case, the read and write traffic can be switched to the secondary cluster.
+ (2) After the primary cluster recovers, call the index synchronization API to synchronize data from the secondary cluster to the primary cluster.
+ 2. If the matching pattern for index synchronization has been established between the primary and secondary clusters.
+ (1) Call the API for deleting the created matching pattern for index synchronization in the secondary cluster.
+ (2) Call the API for stopping index synchronization on the secondary cluster (using * for matching). In this case, the read and write traffic can be switched to the secondary cluster.
+ (3) After the primary cluster recovers, call the index synchronization API to synchronize data from the secondary cluster to the primary cluster.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739360.html b/docs/css/umn/en-us_topic_0000001477739360.html
new file mode 100644
index 00000000..e68f3c37
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739360.html
@@ -0,0 +1,153 @@
+
+
+Enhanced Cold Data Query Performance
+ContextWhen you query data on the Discover page of Kibana for the first time, all data needs to be obtained from OBS because there is no cache. If a large number of documents are returned, it takes a long time to obtain the corresponding time fields and file metadata from OBS. To accelerate queries the first time they run on the Discover page, you can cache data locally.
+
+ PrerequisitesThis feature is available in Elasticsearch clusters of versions 7.6.2 and 7.10.2 and OpenSearch clusters created after February 2023.
+
+ API for Querying Cold Data from Local CacheThis API can be used to query the cold data from local cache.
+ Example request:
+ GET /_frozen_stats/local_cache
+GET /_frozen_stats/local_cache/{nodeId}
+ Response example:
+ {
+ "_nodes" : {
+ "total" : 1,
+ "successful" : 1,
+ "failed" : 0
+ },
+ "cluster_name" : "elasticsearch",
+ "nodes" : {
+ "6by3lPy1R3m55Dcq3liK8Q" : {
+ "name" : "node-1",
+ "transport_address" : "127.0.0.1:9300",
+ "host" : "127.0.0.1",
+ "ip" : "127.0.0.1",
+ "local_cache" : {
+ "get_stats" : {
+ "get_total_count" : 562, //Total number of times data was retrieved from the local cold data cache.
+ "get_hit_count" : 562, //Total number of hits in the local cold data cache.
+ "get_miss_count" : 0, //Total number of local cold data cache misses.
+ "get_total_ns" : 43849200, //Total duration for retrieving data from the local cold data cache.
+ "get_avg_ns" : 78023 //Average duration for retrieving data from the local cold data cache.
+ },
+ "load_stats" : {
+ "load_count" : 2, //Number of times cold data was loaded from the local cache
+ "load_total_ms" : 29, //Total duration for loading cold data from the local cache
+ "load_avg_ms" : 14, //Average duration for loading cold data from the local cache
+ "load_fail_count" : 0, //Number of failure times for loading cold data from the local cache
+ "load_overflow_count" : 0 //Number of times the local cold data cache exceeds the cache pool size.
+ },
+ "reload_stats" : {
+ "reload_count" : 0, //Number of times the local cold data cache was regenerated.
+ "reload_total_ms" : 0, //Total duration for regenerating the local cold data cache.
+ "reload_avg_ms" : 0, //Average duration for regenerating the local cold data cache.
+ "reload_fail_count" : 0 //Number of failures in regenerating the local cold data cache.
+ },
+ "init_stats" : {
+ "init_count" : 0, //Number of times the local cold data cache was initialized.
+ "init_total_ms" : 0, //Total duration for initializing the local cold data cache.
+ "init_avg_ms" : 0, //Average duration for initializing the local cold data cache.
+ "init_fail_count" : 0 //Number of failures in initializing the local cold data cache.
+ }
+ }
+ }
+ }
+ }
+
+
+ Configuring Parameters
+ Configuration Item
+ |
+Type
+ |
+Unit
+ |
+Value Range
+ |
+Scope
+ |
+Can Be Dynamically Modified
+ |
+Description
+ |
+
+
+low_cost.local_cache.max.capacity
+ |
+Integer
+ |
+-
+ |
+The value ranges from 10 to 5000. The default value is 500.
+ |
+node
+ |
+Yes
+ |
+Maximum number of available cold data caches on a node. Each shard corresponds to a cache object.
+ NOTE: - If the heap memory usage remains high, decrease the value.
- If the value of load_overflow_count keeps increasing rapidly, increase the value.
+
+ |
+
+index.low_cost.local_cache.threshold
+ |
+Integer
+ |
+%
+ |
+The value ranges from 0 to 100. The default value is 50.
+ |
+index
+ |
+Yes
+ |
+Threshold for enabling the local cache of cold data.
+ NOTE: - If the percentage of date fields is less than the value of this parameter, the cold data of the date type will be cached locally. Otherwise, this parameter is not used.
- If the date fields of the current index occupy most of the data volume of the current index, you are not advised using this function.
+
+ |
+
+index.low_cost.local_cache.evict_time
+ |
+String
+ |
+Days
+ |
+The value ranges from 1d to 365d. The default value is 30d.
+ |
+index
+ |
+Yes
+ |
+Wait time before cold data is deleted from local cache. The value is determined based on index.frozen_date (time when the freezing is successful).
+ NOTE: - For indexes that have been frozen in old clusters and do not have index.frozen_date specified, the value of this parameter is determined based on the index creation time.
- You are advised to adjust the deletion time based on the disk usage to avoid high disk usage.
+
+ |
+
+
+
+
+
+ Modifying Parameters- Run the following command to modify low_cost.local_cache.max.capacity:
PUT _cluster/settings
+ {
+ "persistent": {
+ "low_cost.local_cache.max.capacity":1000
+ }
+ }
+ - Run the following command to modify index.low_cost.local_cache.threshold:
PUT es_write_pref2-00000000021/_settings
+ {
+ "index.low_cost.local_cache.threshold":20
+ }
+ - Run the following command to modify index.low_cost.local_cache.evict_time:
PUT es_write_pref2-00000000021/_settings
+ {
+ "index.low_cost.local_cache.evict_time":"7d"
+ }
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739364.html b/docs/css/umn/en-us_topic_0000001477739364.html
new file mode 100644
index 00000000..7fa6343a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739364.html
@@ -0,0 +1,43 @@
+
+
+Request Sampling
+ContextRequest sampling can record the access of client IP addresses and the type of requests from the client. Based on the statistics, you can identify the access traffic of client IP addresses and analyze the client write and query requests.
+
+ Table 1 Request statistics parametersParameter
+ |
+Type
+ |
+Description
+ |
+
+
+flowcontrol.log.access.enabled
+ |
+Boolean
+ |
+Whether to collect statistics on the IP addresses of clients that accessed the ES cluster recently and the number of requests. The value can be:
+- true
- false (default value)
+ |
+
+flowcontrol.log.access.count
+ |
+Integer
+ |
+Number of client IP addresses that accessed a cluster recently.
+Value range: 0–100
+Default value: 10
+ |
+
+
+
+
+ - IP address statistics switches control whether to collect request type statistics and whether to enable logging.
- flowcontrol.log.access.enabled controls whether to collect statistics on client requests, including the number of bulk write, search, and msearch requests.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739368.html b/docs/css/umn/en-us_topic_0000001477739368.html
new file mode 100644
index 00000000..6a8e4102
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739368.html
@@ -0,0 +1,24 @@
+
+
+Changing Specifications
+If the workloads on the data plane of a cluster change, you can change its node specifications as needed.
+ Prerequisites- The target cluster is available and has no tasks in progress.
- The target cluster has sufficient quotas available.
- When changing the node specifications, ensure that all service data has copies so the services will not be interrupted.
Run the GET _cat/indices?v command in Kibana. If the returned rep value is greater than 0, the data has copies. If the returned rep value is 0, the data has no copies. In this case, create snapshot for the cluster by referring to Manually Creating a Snapshot.
+ - If the data volume is large, it may take long to modify the node specifications. You are advised to modify specifications during off-peak hours.
+
+ Constraints- The number of nodes and the capacity of node storage cannot be changed. You can add nodes and increase the node storage capacity by referring to Scaling Out a Cluster. For details about how to reduce the number of nodes, see Scaling in a Cluster.
- After decreasing cluster specifications, the cluster performance will deteriorate and service capabilities will be affected. Exercise caution when performing this operation.
- If a cluster has multiple node types, you can change the specifications of only one type at a time. After the change, nodes in other types still maintain their original specifications.
- Kibana is unavailable during specification change.
- During the specification modification, the nodes are stopped and restarted in sequence. It is a rolling process.
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, choose the Scale Cluster tab and click Change Specifications to set parameters.
- Action: select Change specifications.
- Resources: The changed amount of resources.
- Nodes: Specifications of the default data nodes. Select the required specifications from the Node Specifications drop-down list and select the node that you want to change the specifications.
- If a cluster has master nodes, client nodes, or cold data nodes, you can change their specifications.
+ - Click Next.
- Confirm the information and click Submit.
- In the dialog box that is displayed, confirm whether to select Verify index copies and Cluster status check and click OK to start the specifications change.
Index copy verification:
+By default, CSS checks for indexes that do not have copies. You can skip this step, but the lack of index copies may affect services during a cluster specifications change. - If you selected Verify index copies and the cluster has no master node, indexes must have at least one copy and the cluster must have at least three nodes.
- If you selected Verify index copies and the cluster has no master node, indexes must have at least one copy.
+
+Cluster status check:
+The cluster status is checked before the specifications change by default. The specifications of nodes are changed one by one to ensure success and data security. If a cluster is overloaded and services are faulty, the request for a specifications change will not be delivered. In this case, you can disable cluster status check. If you ignore the cluster status check before the specifications change, the cluster may be faulty and services may be interrupted. Exercise caution when performing this operation.
+ - Click Back to Cluster List to switch to the Clusters page. The Cluster Status is Configuration modified. When Cluster Status changes to Available, the cluster specifications have been successfully modified.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739376.html b/docs/css/umn/en-us_topic_0000001477739376.html
new file mode 100644
index 00000000..55155345
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739376.html
@@ -0,0 +1,14 @@
+
+
+Context
+The large query isolation feature allows you to separately manage large queries. You can isolate query requests that consume a large amount of memory or take a long period of time. If the heap memory usage of a node is too high, the interrupt control program will be triggered. The program will interrupt a large query based on the policies you configured and cancel the running query tasks of the query.
+ You can also configure a global query timeout duration. Long queries will be intercepted.
+ Currently, only versions 7.6.2 and 7.10.2 support large query isolation.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739380.html b/docs/css/umn/en-us_topic_0000001477739380.html
new file mode 100644
index 00000000..1deb0b7c
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739380.html
@@ -0,0 +1,25 @@
+
+
+One-click Traffic Blocking
+You can block all traffic in one click, except the traffic that passes through O&M APIs, to handle unexpected traffic burst and quickly recover your cluster.
+ - Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable one-click traffic blocking.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739384.html b/docs/css/umn/en-us_topic_0000001477739384.html
new file mode 100644
index 00000000..4c9e8ec2
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739384.html
@@ -0,0 +1,15 @@
+
+
+Context
+You can store hot data on SSD to achieve the optimal query performance, and store historical data in OBS to reduce data storage costs.
+ Application ScenariosA large volume of data is written to and stored in SSDs. If historical data is no longer updated (is turned into cold data) and its QPS decreases, you can call CSS APIs to dump hot data from SSDs to OBS buckets. This operation freezes indexes, decoupling compute from storage.
+
+ Constraints- Currently, only Elasticsearch clusters of versions 7.6.2 and 7.10.2 and OpenSearch clusters of version 1.3.6 support decoupled storage and computing.
- The storage-compute decoupling feature depends on OBS. Therefore, you must comply with the restrictions on OBS bandwidth and QPS. If these restrictions are violated, the performance of queries on OBS will deteriorate. For example, the speed of restoring shards and querying data will become slow.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739388.html b/docs/css/umn/en-us_topic_0000001477739388.html
new file mode 100644
index 00000000..ca4dcfe1
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739388.html
@@ -0,0 +1,24 @@
+
+
+Viewing the Default Plugin List
+CSS clusters have default plugins. You can view the default plugin information on the console or Kibana.
+ Viewing Plugins on the Console- Log in to the CSS management console.
- In the navigation pane, choose Clusters. Click the target cluster name and go to the Basic Information page of the cluster.
- Click the Plugins tab.
- On the Default page, view default plugins supported by the current version.
+
+ Viewing Plugins on the Kibana- Log in to the CSS management console.
- In the navigation pane, choose Clusters. Locate the target cluster and click Access Kibana in the Operation column to log in to Kibana.
- Non-security cluster: The Kibana console is displayed.
- Security cluster: Enter the username and password on the login page and click Log In to go to the Kibana console. The default username is admin and the password is the administrator password you specified during cluster creation.
+ - Go to Dev Tools and run the following command to view the cluster plugin information:
GET _cat/plugins?v
+The following is an example of the response body:
+name component version
+css-test-ess-esn-1-1 analysis-dynamic-synonym 7.6.2-xxxx-ei-css-v1.0.1
+css-test-ess-esn-1-1 analysis-icu 7.6.2-xxxx-ei-css-v1.1.6
+css-test-ess-esn-1-1 analysis-ik 7.6.2-xxxx-ei-css-v1.0.1
+......
+name indicates the cluster node name, component indicates the plugin name, and version indicates the plugin version.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739392.html b/docs/css/umn/en-us_topic_0000001477739392.html
new file mode 100644
index 00000000..314ec95a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739392.html
@@ -0,0 +1,37 @@
+
+
+Creating and Managing Indexes
+Clusters of version 7.6.2 or later support index status management. ISM is a plugin that allows you to automate periodic and administrative operations based on changes on the index age, index size, or number of documents. When using the ISM plug-in, you can define policies that automatically handle index rollovers or deletions based on your needs.
+ The following procedure uses Elasticsearch 7.6.2 as an example. The Kibana UI varies depending on the Kibana version, but their operations are similar.
+
+ Creating an Index Policy- Log in to Kibana and choose IM or Index Management on the left. The Index Management page is displayed.
- Click Create policy to create an index policy.
- Enter a policy ID in the Policy ID text box and enter your policy in the Define policy text box.
Figure 1 Configuring a policy
+ - Click Create.
+
+ Attaching a Policy to an IndexYou can attach a policy to one or more indexes and add the policy ID to an index template. When you create indexes using that index template pattern, the policy will be attached to all created indexes.
+ - Method 1: Kibana commands
On the Dev Tools page of Kibana, run the following command to associate a policy ID with an index template:
+PUT _template/<template_name>
+{
+ "index_patterns": ["index_name-*"],
+ "settings": {
+ "opendistro.index_state_management.policy_id": "policy_id"
+ }
+}
+- <template_name>: Replace it with the name of a created index template.
- policy_id: Replace it with a custom policy ID.
+For details about how to create an index template, see Index Template.
+ - Method 2: Kibana console
- On the Index Management page of Kibana, choose Indices.
Figure 2 Choosing Indexes
+ - In the Indices list, select the target index to which you want to attach a policy.
- Click Apply policy in the upper right corner.
Figure 3 Adding a policy
+ - Select the policy you created from the Policy ID drop-down list.
Figure 4 Selecting a policy
+ - Click Apply.
After you attach a policy to an index, ISM creates a job that runs every 5 minutes by default, to execute the policy, check conditions, and convert the index to different statuses.
+
+
+
+ Managing Index Policies- Click Managed Indices.
- If you want to change the policy, click Change policy. For details, see Changing Policies.
- To delete a policy, select your policy, and click Remove policy.
- To retry a policy, select your policy, and click Retry policy.
+ For details, see Index State Management.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739396.html b/docs/css/umn/en-us_topic_0000001477739396.html
new file mode 100644
index 00000000..0c19f7ad
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739396.html
@@ -0,0 +1,11 @@
+
+
+Getting Started
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739400.html b/docs/css/umn/en-us_topic_0000001477739400.html
new file mode 100644
index 00000000..eab3086b
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739400.html
@@ -0,0 +1,226 @@
+
+
+Scenario
+CSS integrates shared load balancers and allows you to bind public network access and enable the VPC Endpoint service. Dedicated load balancers provide more functions and higher performance than shared load balancers. This section describes how to connect a cluster to a dedicated load balancer.
+ Advantages of connecting a cluster to a dedicated load balancer: - A non-security cluster can also use capabilities of the Elastic Load Balance (ELB) service.
- You can use customized certificates for HTTPS bidirectional authentication.
- Seven-layer traffic monitoring and alarm configuration are supported, allowing you to view the cluster status at any time.
+
+ There are eight service forms for clusters in different security modes to connect to dedicated load balancers. Table 1 describes the ELB capabilities for the eight service forms. Table 2 describes the configurations for the eight service forms.
+ You are not advised connecting a load balancer that has been bound to a public IP address to a non-security cluster. Access from the public network using such a load balancer may bring security risks because non-security clusters can be accessed over HTTP without security authentication.
+
+
+ Table 1 ELB capabilities for different clustersSecurity Mode
+ |
+Service Form Provided by ELB for External Systems
+ |
+ELB Load Balancing
+ |
+ELB Traffic Monitoring
+ |
+ELB Two-way Authentication
+ |
+
+
+Non-security
+ |
+No authentication
+ |
+Yes
+ |
+Yes
+ |
+No
+ |
+
+One-way authentication
+Two-way authentication
+ |
+Yes
+ |
+Yes
+ |
+Yes
+ |
+
+Security mode + HTTP
+ |
+Password authentication
+ |
+Yes
+ |
+Yes
+ |
+No
+ |
+
+One-way authentication + Password authentication
+Two-way authentication + Password authentication
+ |
+Yes
+ |
+Yes
+ |
+Yes
+ |
+
+Security mode + HTTPS
+ |
+One-way authentication + Password authentication
+Two-way authentication + Password authentication
+ |
+Yes
+ |
+Yes
+ |
+Yes
+ |
+
+
+
+
+
+ Table 2 Configuration for interconnecting different clusters with ELBSecurity Mode
+ |
+Service Form Provided by ELB for External Systems
+ |
+ELB Listener
+ |
+Backend Server Group
+ |
+
+Frontend Protocol
+ |
+Port
+ |
+SSL Parsing Mode
+ |
+Backend Protocol
+ |
+Health Check Port
+ |
+Health Check Path
+ |
+
+Non-security
+ |
+No authentication
+ |
+HTTP
+ |
+9200
+ |
+No authentication
+ |
+HTTP
+ |
+9200
+ |
+/
+ |
+
+One-way authentication
+ |
+HTTPS
+ |
+9200
+ |
+One-way authentication
+ |
+HTTP
+ |
+9200
+ |
+
+Two-way authentication
+ |
+HTTPS
+ |
+9200
+ |
+Two-way authentication
+ |
+HTTP
+ |
+9200
+ |
+
+Security mode + HTTP
+ |
+Password authentication
+ |
+HTTP
+ |
+9200
+ |
+No authentication
+ |
+HTTP
+ |
+9200
+ |
+/_opendistro/_security/health
+ |
+
+One-way authentication + Password authentication
+ |
+HTTPS
+ |
+9200
+ |
+One-way authentication
+ |
+HTTP
+ |
+9200
+ |
+
+Two-way authentication + Password authentication
+ |
+HTTPS
+ |
+9200
+ |
+Two-way authentication
+ |
+HTTP
+ |
+9200
+ |
+
+Security mode + HTTPS
+ |
+One-way authentication + Password authentication
+ |
+HTTPS
+ |
+9200
+ |
+One-way authentication
+ |
+HTTPS
+ |
+9200
+ |
+
+Two-way authentication + Password authentication
+ |
+HTTPS
+ |
+9200
+ |
+Two-way authentication
+ |
+HTTPS
+ |
+9200
+ |
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739404.html b/docs/css/umn/en-us_topic_0000001477739404.html
new file mode 100644
index 00000000..2c6078cc
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739404.html
@@ -0,0 +1,32 @@
+
+
+Cluster Specification Modification
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477739408.html b/docs/css/umn/en-us_topic_0000001477739408.html
new file mode 100644
index 00000000..a286ff58
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477739408.html
@@ -0,0 +1,25 @@
+
+
+Context
+Feature DescriptionCSS can control traffic at the node level. You can configure the blacklist and whitelist, the maximum concurrent HTTP connections, and the maximum HTTP connections for a node. You can also configure the maximum heap memory used by specific request paths, the maximum CPU usage, and block access in one click, and collect statistics on node access IP addresses and URIs. Each function has an independent control switch, which is disabled by default. To restore default values of parameters, set them to null.
+ If flow control is enabled, requests will be blocked at the entry, which relieves the cluster pressure in high-concurrency scenario and avoids unavailability issues.
+ - HTTP/HTTPS Flow Control
- You can control client IP address access by setting IP addresses and subnets in HTTP/HTTPS blacklist or whitelist. If an IP address is in the blacklist, the client is disconnected and all its request are rejected. Whitelist rules take precedence over blacklist rules. If a client IP address exists in both the blacklist and whitelist, the client request will not be rejected.
- HTTP/HTTPS concurrent connection flow control limits the total number of HTTP connections to a node per second.
- HTTP/HTTPS new connection flow control limits the number of new connections to a node.
+ - Memory Flow Control
Memory flow control limits request paths based on the node heap memory. You can configure memory flow control whitelist, global memory usage threshold, and heap memory threshold for a single path. Global memory flow control threshold takes precedence over the memory threshold of a single path. Paths in the whitelist will not be blocked in memory flow control.
+ - Global Path Whitelist for Flow Control
You can configure the global path whitelist for flow control as required when you need to use custom plug-ins.
+ - Request Sampling
Request sampling can record the number of access requests from client IP addresses and the request paths of sampled users. Based on the statistics, you can identify the access traffic of client IP addresses and analyze the access traffic of request paths.
+ - Flow Control
Flow control provides an independent API for viewing traffic statistics and records the number of times the API is triggered. You can evaluate the flow control threshold and analyze the cluster load based on the statistics.
+ - Access Logs
Access logs record the URLs and bodies of HTTP/HTTPS requests received by nodes within a period of time. You can analyze the current traffic pressure based on the access logs.
+ - CPU Flow Control
You can configure the node CPU usage threshold to limit the accessed traffic on a single node.
+ - One-click Traffic Blocking
One-click access blocking can block all the access traffic of a node, excluding the traffic from Kibana and Elasticsearch monitor APIs.
+
+
+ Constraints- Currently, only versions 7.6.2 and 7.10.2 support the flow control feature.
- Flow control may affect the performance of some nodes.
+ - If flow control is enabled, user requests that exceed the flow control threshold will be rejected.
- Memory flow control and CPU flow control are based on request paths. The length and number of paths cannot be too large, or the cluster performance will be affected.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899148.html b/docs/css/umn/en-us_topic_0000001477899148.html
new file mode 100644
index 00000000..22cc532f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899148.html
@@ -0,0 +1,84 @@
+
+
+CSS Custom Policies
+Custom policies can be created to supplement the system-defined policies of CSS. For the actions supported for custom policies, see section "Permissions Policies and Supported Actions" in the Cloud Search Service API Reference.
+ You can create custom policies in either of the following ways:
+ - Visual editor: Select cloud services, actions, resources, and request conditions. You do not need to have knowledge of the policy syntax.
- JSON: Create a JSON policy or edit based on an existing policy.
+ For details about how to create custom policies, see Creating a Custom Policy. The following section provides examples of common CSS custom policies.
+ IAM permissions and data plane cluster permissions are managed separately. To enable the security capability of the data plane, you need to use the security mode.
+
+ Example Custom Policies To let an IAM user access an OBS bucket, you need to grant the GetBucketStoragePolicy, GetBucketLocation, ListBucket, and ListAllMyBuckets permissions to the user.
+
+ Example 1: Allowing users to create a CSS cluster {
+ "Version": "1.1",
+ "Statement": [
+ {
+ "Action": [
+ "css:cluster:create",
+ "vpc:securityGroups:get",
+ "vpc:securityGroups:create",
+ "vpc:securityGroups:delete",
+ "vpc:securityGroupRules:get",
+ "vpc:securityGroupRules:create",
+ "vpc:securityGroupRules:delete",
+ "vpc:vpcs:list",
+ "vpc:privateIps:list",
+ "vpc:ports:get",
+ "vpc:ports:create",
+ "vpc:ports:update",
+ "vpc:ports:delete",
+ "vpc:quotas:list",
+ "vpc:subnets:get",
+ "ecs:cloudServerFlavors:get",
+ "ecs:serverInterfaces:use",
+ "ecs:cloudServers:addNics",
+ "ecs:quotas:get",
+ "evs:types:get",
+ "evs:quotas:get"
+ ],
+ "Effect": "Allow"
+ }
+ ]
+}
+
+
+ Example 2: Denying cluster deletion
+ A policy with only Deny permissions must be used in conjunction with other policies for it to take effect. If the permissions assigned to a user contain both Allow and Deny, the Deny permissions take precedence over the Allow permissions.
+ The following method can be used if you need to assign permissions of the CSS Admin policy to a user but you want to prevent the user from deleting clusters. Create a custom policy for denying cluster deletion, and attach both policies to the group to which the user belongs. Then, the user can perform all operations on CSS except deleting clusters. The following is an example of a deny policy:
+ {
+ "Version": "1.1",
+ "Statement": [
+ {
+ "Effect": "Deny",
+ "Action": [
+ "css:cluster:delete"
+ ]
+ }
+ ]
+}
+
+ Example 3: Defining permissions for multiple services in a policy
+ A custom policy can contain the actions of multiple services that are of the global or project-level type. The following is an example policy containing actions of multiple services:
+ {
+ "Version": "1.1",
+ "Statement": [
+ {
+ "Action": [
+ "ecs:cloudServers:resize",
+ "ecs:cloudServers:delete",
+ "ecs:cloudServers:delete",
+ "css:cluster:restart",
+ "css:*:get*",
+ "css:*:list*"
+ ],
+ "Effect": "Allow"
+ }
+ ]
+}
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899152.html b/docs/css/umn/en-us_topic_0000001477899152.html
new file mode 100644
index 00000000..ddf3db1c
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899152.html
@@ -0,0 +1,31 @@
+
+
+Flow Control 1.0
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899160.html b/docs/css/umn/en-us_topic_0000001477899160.html
new file mode 100644
index 00000000..8d4dc4da
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899160.html
@@ -0,0 +1,31 @@
+
+
+Vector Retrieval
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899164.html b/docs/css/umn/en-us_topic_0000001477899164.html
new file mode 100644
index 00000000..dd3be38a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899164.html
@@ -0,0 +1,85 @@
+
+
+Scaling Out a Cluster
+If the workloads on the data plane of a cluster change, you can scale out the cluster by increasing the number or capacity of its nodes. Services are not interrupted during cluster scale-out.
+ Prerequisites- The target cluster is available and has no tasks in progress.
- The target cluster has sufficient quotas available.
+
+ Constraints- The Node Specifications cannot be modified during scale-out. You can modify Node Specifications by referring to Changing Specifications.
- If you change the number and storage capacity of a specified type of node, nodes in other types will not be changed.
- The quota of nodes in different types varies. For details, see Table 1.
+
Table 1 Number of nodes in different typesNode Type
+ |
+Number
+ |
+
+
+ess
+ |
+ess: 1-32
+ |
+
+ess, ess-master
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ |
+
+ess, ess-client
+ |
+ess: 1-32
+ess-client: 1-32
+ |
+
+ess, ess-cold
+ |
+ess: 1-32
+ess-cold: 1-32
+ |
+
+ess, ess-master, ess-client
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-client: 1-32
+ |
+
+ess, ess-master, ess-cold
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-cold: 1-32
+ |
+
+ess, ess-client, ess-cold
+ |
+ess: 1-32
+ess-client: 1-32
+ess-cold: 1-32
+ |
+
+ess, ess-master, ess-client, ess-cold
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-client: 1-32
+ess-cold: 1-32
+ |
+
+Details about the four node types: - ess: the default node type that is mandatory for cluster creation. The other three node types are optional.
- ess-master: master node
- ess-client: client node
- ess-cold: cold data node
+
+ |
+
+
+
+
+
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, choose the Scale Cluster tab and click Scale out to set parameters.
- Action: Select Scale out.
- Resource: The changed amount of resources.
- Nodes: The number of nodes and node storage capacity of the default data node.
- Nodes: For details, see Table 1.
- The value range of Node Storage Type depends on the Node Specifications. The value must be a multiple of 20.
+
+ - Click Next.
- Confirm the information and click Submit.
- Click Back to Cluster List to switch to the Clusters page. The Task Status is Scaling out. When Cluster Status changes to Available, the cluster has been successfully scaled out.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899172.html b/docs/css/umn/en-us_topic_0000001477899172.html
new file mode 100644
index 00000000..e88dc2a7
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899172.html
@@ -0,0 +1,263 @@
+
+
+Creating a Cluster in Non-Security Mode
+This section describes how to create an Elasticsearch cluster in non-security mode.
+ Procedure- Log in to the CSS management console.
- On the Dashboard page, click Create Cluster in the upper right corner. The Create page is displayed.
Alternatively, choose Clusters > Elasticsearch in the navigation tree on the left. Click Create Cluster in the upper right corner. The Create page is displayed.
+ - Specify Region and AZ.
+
Table 1 Parameter description for Region and AZParameter
+ |
+Description
+ |
+
+
+Region
+ |
+Select a region for the cluster from the drop-down list on the right. Currently, only eu-de and eu-nl are supported.
+ |
+
+AZ
+ |
+Select AZs associated with the cluster region.
+You can select up to three general AZs. For details, see Deploying a Cross-AZ Cluster.
+ |
+
+
+
+
+ - Configure basic cluster information.
+
Table 2 Description of basic parametersParameter
+ |
+Description
+ |
+
+
+Version
+ |
+Select a cluster version from the drop-down list box.
+ |
+
+Name
+ |
+Cluster name, which contains 4 to 32 characters. Only letters, numbers, hyphens (-), and underscores (_) are allowed and the value must start with a letter.
+ NOTE: After a cluster is created, you can modify the cluster name as required. Click the name of a cluster to be modified. On the displayed Basic Information page, click next to the cluster name. After the modification is completed, click to save the modification. If you want to cancel the modification, click .
+
+ |
+
+
+
+
+ - Configure cluster specifications.
+
Table 3 Parameter descriptionParameter
+ |
+Description
+ |
+
+
+Nodes
+ |
+Number of nodes in a cluster. Select a number from 1 to 32. You are advised to configure three or more nodes to ensure high availability of the cluster.
+- If neither a master node nor client node is enabled, the nodes specified by this parameter are used to serve as both the master node and client node. Nodes provide the cluster management, data storage, cluster access, and data analysis functions. To ensure data stability in the cluster, it is recommended that you set this parameter to a value no less than 3.
- If only the master node function is enabled, nodes specified by this parameter are used to store data and provide functions of client nodes.
- If both the master and client node functions are enabled, the nodes specified by this parameter are only used for storing data.
- If only the client node function is enabled, nodes specified by this parameter are used to store data and provide functions of the master node.
+ |
+
+CPU Architecture
+ |
+The supported type is determined by the actual regional environment. You can select x86.
+ |
+
+Node Specifications
+ |
+Specifications of nodes in a cluster. You can select a specified specification based on your needs. Each cluster supports only one specification.
+After you select a flavor, the CPU and memory corresponding to the current specification are displayed below the parameter. For example, if you select css.medium.8, then 1 vCPUs | 8 GB will be displayed, indicating that the node flavor you select contains one vCPU and 8 GB memory.
+ |
+
+Node Storage Type
+ |
+In the current version, the following options are available: Common I/O, High I/O, and Ultra-high I/O.
+ |
+
+Node Storage Capacity
+ |
+Storage space. Its value varies with node specifications.
+The node storage capacity must be a multiple of 20.
+ |
+
+Disk Encryption
+ |
+If you select this option, the nodes in the cluster you create will use encrypted EVS disks to protect data. By default, this option is not selected. Note that you cannot modify this setting after the cluster is created. Therefore, exercise caution when performing the setting.
+After you select this option, you need to select an available key from the Key Name drop-down list. If no key is available, click Create/View Key to go to the KMS management console and create or modify a key. For details, see Creating a CMK.
+Enabling disk encryption has no impact on your operations on a cluster (such as accessing the cluster and importing data to the cluster). However, after you enable disk encryption, operation performance deteriorates by about 10%.
+ NOTE: - If the cluster is in the Available status and the key used for disk encryption is in the Pending deletion or disable status or has been deleted after a cluster is created, cluster scale-out is not allowed. However, other operations on the cluster, such as restarting the cluster, creating snapshots, restoring the cluster, and importing data to the cluster are not affected. In addition, this key cannot be used for cluster creation in the future.
- After a cluster is created, do not delete the key used by the cluster. Otherwise, the cluster will become unavailable.
- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
+
+ |
+
+Master node
+ |
+The master node manages all nodes in the cluster. If more than 20 nodes are required to store and analyze a large amount of data, you are advised to enable the master node to ensure cluster stability. Otherwise, you are advised to set only the Nodes parameter and use the nodes as both master and client nodes.
+After enabling the master node, specify Node Specifications, Nodes, and Node Storage Type. The value of Nodes must be an odd number greater than or equal to 3. Up to nine nodes are supported. The value of Node Storage Capacity is fixed. You can select a storage type based on your needs.
+ |
+
+Client node
+ |
+The client node allows clients to access clusters and analyze data. If more than 20 nodes are required to store and analyze a large amount of data, you are advised to enable the client node to ensure cluster stability. Otherwise, you are advised to set only the Nodes parameter and use the nodes as both master and client nodes.
+After enabling the client node, specify Node Specifications, Nodes and Node Storage Type. The value of Nodes ranges from 1 to 32. The value of Node Storage Capacity is fixed. You can select a storage type based on your needs.
+ |
+
+Cold data node
+ |
+The cold data node is used to store historical data, for which query responses can be returned in minutes. If you do not quire a quick query response, store historical data on cold data nodes to reduce costs.
+After enabling cold data node, configure Node Specifications, Nodes, Node Storage Type, and Node Storage Capacity. The value of Nodes ranges from 1 to 32. Select Node Storage Type and Node Storage Capacity as required.
+After the cold data node is enabled, CSS automatically adds cold and hot tags to related nodes.
+ |
+
+
+
+
+Figure 1 Configuring host specifications
+ - Set the enterprise project.
When creating a CSS cluster, you can bind an enterprise project to the cluster if you have enabled the enterprise project function. You can select an enterprise project created by the current user from the drop-down list on the right or click View Project Management to go to the Enterprise Project Management console and create a new project or view existing projects.
+ - Set network specifications of the cluster.
+
Table 4 Parameter descriptionParameter
+ |
+Description
+ |
+
+
+VPC
+ |
+A VPC is a secure, isolated, and logical network environment.
+Select the target VPC. Click View VPC to enter the VPC management console and view the created VPC names and IDs. If no VPCs are available, create one.
+ NOTE: The VPC must contain CIDRs. Otherwise, cluster creation will fail. By default, a VPC will contain CIDRs.
+
+ |
+
+Subnet
+ |
+A subnet provides dedicated network resources that are isolated from other networks, improving network security.
+Select the target subnet. You can access the VPC management console to view the names and IDs of the existing subnets in the VPC.
+ |
+
+Security Group
+ |
+A security group is a collection of access control rules for ECSs that have the same security protection requirements and are mutually trusted in a VPC. To view more details about the security group, click View Security Group.
+ NOTE: - For cluster access purposes, ensure that the security group contains port 9200.
- If your cluster version is 7.6.2 or later, ensure that all the ports used for communication between nodes in the same security group are allowed. If such settings cannot be configured, ensure at least the access to port 9300 is allowed.
- After the port 9300 is enabled, if the cluster disk usage is high, delete expired data to release the disk storage space.
+
+ |
+
+Security Mode
+ |
+Security mode is disabled.
+ |
+
+
+
+
+Figure 2 Configuring network specifications
+ - Click Next: Configure Advanced Settings. Configure the automatic snapshot creation and other functions.
- Configure Cluster Snapshot. Set basic configuration and snapshot configuration.
The cluster snapshot function is enabled by default. You can also disable this function as required. To store automatic snapshots in OBS, an agency will be created to access OBS. Additional cost will be incurred if snapshots are stored in standard storage.
+
+Table 5 Cluster snapshot parameterParameter
+ |
+Description
+ |
+
+
+OBS bucket
+ |
+Select an OBS bucket for storing snapshots from the drop-down list box. You can also click Create Bucket on the right to create an OBS bucket. For details, see Creating a Bucket.
+The created or existing OBS bucket must meet the following requirements:
+- Storage Class is Standard or Warm.
- Region must be the same as that of the created cluster.
+ |
+
+Backup Path
+ |
+Storage path of the snapshot in the OBS bucket.
+The backup path configuration rules are as follows: - The backup path cannot contain the following characters: \:*?"<>|
- The backup path cannot start with a slash (/).
- The backup path cannot start or end with a period (.).
- The backup path cannot contain more than 1,023 characters.
+
+ |
+
+IAM Agency
+ |
+IAM agency authorized by the current account for CSS to access or maintain data stored in OBS You can also click Create IAM Agency on the right to create an IAM agency. For details, see Creating an Agency.
+The created or existing IAM agency must meet the following requirements:
+- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- The agency must have the Tenant Administrator permission for the OBS(S3) project in OBS(S3).
+ |
+
+Snapshot Encryption
+ |
+Whether to enable the snapshot encryption function. Enabling the snapshot encryption function ensures the security of your snapshot data.
+After the snapshot encryption function is enabled, select a key from the Key Name drop-down list. If no key is available, click Create/View Key to switch to the KMS management console to create or modify a key. For details, see Creating a CMK.
+- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
- If a snapshot has been stored in the OBS bucket, you cannot modify the parameters used for encrypting the snapshot.
- If the key used for encryption is in the Pending deletion or disable status, you cannot perform backup and restoration operations on the cluster. Specifically, you cannot create new snapshots for the cluster, or use existing snapshots to restore clusters. In this case, switch to the KMS management console and change the status of the target key to enable so that backup and restore operations are allowed on the cluster.
- If you delete the key used for encryption, you cannot perform backup and restore operations on the cluster. In addition, you cannot restore the deleted key. Therefore, exercise caution when deleting a key. If the key is deleted or is in the Pending deletion or disable state, automatic snapshot creation is allowed based on the configured snapshot policy. However, all automatic snapshot creation tasks will fail, and the failed tasks are displayed in the failed task list in the Failed Tasks dialog box. In such scenario, you are advised to disable the automatic snapshot creation function.
+ |
+
+
+
+
+
+Table 6 Automatic snapshot creation parameterParameter
+ |
+Description
+ |
+
+
+Snapshot Name Prefix
+ |
+The snapshot name prefix contains 1 to 32 characters and must start with a lowercase letter. Only lowercase letters, digits, hyphens (-), and underscores (_) are allowed. A snapshot name consists of a snapshot name prefix and a timestamp, for example, snapshot-1566921603720.
+ |
+
+Time Zone
+ |
+Time zone for the backup time, which cannot be changed. Specify Backup Started Time based on the time zone.
+ |
+
+Backup Start Time
+ |
+The time when the backup starts automatically every day. You can specify this parameter only in full hours, for example, 00:00 or 01:00. The value ranges from 00:00 to 23:00. Select a time from the drop-down list.
+ |
+
+Retention Period (days)
+ |
+The number of days that snapshots are retained in the OBS bucket. The value ranges from 1 to 90. You can specify this parameter as required. The system automatically deletes expired snapshots every hour at half past the hour.
+ |
+
+
+
+
+Figure 3 Setting parameters for automatic snapshot creation
+ - Configure advanced settings for the cluster.
- Default: The VPC Endpoint Service, Kibana Public Access, and Tag functions are disabled by default. You can manually enable these functions after the cluster is created.
- Custom: You can enable the VPC Endpoint Service and Tag functions as required.
+
+Table 7 Parameters for advanced settingsParameter
+ |
+Description
+ |
+
+
+VPC Endpoint Service
+ |
+After enabling this function, you can obtain a private domain name for accessing the cluster in the same VPC. For details, see Accessing a Cluster Using a VPC Endpoint.
+ NOTE: The VPC endpoint service cannot be enabled for a shared VPC.
+
+ |
+
+Kibana Public Access
+ |
+Clusters in non-security mode cannot access Kibana through the Internet.
+ |
+
+Tag
+ |
+Adding tags to clusters can help you identify and manage your cluster resources. You can customize tags or use tags predefined by Tag Management Service (TMS). For details, see Managing Tags.
+If your organization has enabled tag policies for CSS, you must comply with the tag policy rules when creating clusters, otherwise, clusters may fail to be created. Contact the organization administrator to learn more about tag policies.
+ |
+
+
+
+
+
+ - Click Next: Confirm. Check the configuration and click Next to create a cluster.
- Click Back to Cluster List to switch to the Clusters page. The cluster you created is listed on the displayed page and its status is Creating. If the cluster is successfully created, its status will change to Available.
If the cluster creation fails, create the cluster again.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899176.html b/docs/css/umn/en-us_topic_0000001477899176.html
new file mode 100644
index 00000000..cf3dc42f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899176.html
@@ -0,0 +1,30 @@
+
+
+Flow Control 2.0
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899180.html b/docs/css/umn/en-us_topic_0000001477899180.html
new file mode 100644
index 00000000..c50ecf7f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899180.html
@@ -0,0 +1,21 @@
+
+
+Creating a Cluster
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899184.html b/docs/css/umn/en-us_topic_0000001477899184.html
new file mode 100644
index 00000000..8cef995f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899184.html
@@ -0,0 +1,18 @@
+
+
+Removing Specified Nodes
+If a cluster can process existing data without fully using its nodes, you can remove one or more specified nodes from the cluster to reduce costs. Services will not be interrupted during the removal of specified nodes.
+ PrerequisitesThe target cluster is available and has no tasks in progress.
+
+ Constraints- Ensure that the disk usage after scale-in is less than 80% and each AZ of each node type has at least one node.
- In a cross-AZ cluster, the difference between the numbers of the same type nodes in different AZs cannot exceed 1.
- For a cluster without master nodes, the number of removed data nodes and cold data nodes in a scale-in must be fewer than half of the original number of data nodes and cold data nodes, and the number of remaining data nodes and cold data nodes after a scale-in must be greater than the maximum number of index replicas.
- For a cluster with master nodes, the number of removed master nodes in a scale-in must be fewer than half of the original master node number. After scale-in, there has to be an odd number of master nodes, and there has to be at least three of them.
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, click the Scale In tab.
- On the Scale In tab page, set the following parameters:
- Whether to perform data migration: If this option is selected, data migration is performed. If the target node contains disabled indexes or indexes that have no replicas, this option must be selected.
- In the data node table, select the node to be scaled in.
+ - Click Next.
- Confirm the information and click Submit.
- Click Back to Cluster List to switch to the Clusters page. The Task Status is Scaling in. When Cluster Status changes to Available, the cluster has been successfully scaled in.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899188.html b/docs/css/umn/en-us_topic_0000001477899188.html
new file mode 100644
index 00000000..4a77b12b
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899188.html
@@ -0,0 +1,42 @@
+
+
+Adding Master/Client Nodes
+If workloads on the data plane of a cluster increase, you can add master or client nodes as needed. Services are not interrupted while they are added.
+ PrerequisitesThe target cluster is available and has no tasks in progress.
+
+ Constraints- If a cluster already has master and client nodes, the Add Master/Client Node tab is not displayed on the Modify Configuration page. In this case, you need to add the master or client nodes by referring to Scaling Out a Cluster.
- When you add master or client nodes, the number of nodes that can be configured varies depending on the node type. For details, see Table 1.
+
Table 1 Number of nodes in different typesNode Flavor
+ |
+Number
+ |
+
+
+Master node
+ |
+An odd number ranging from 3 to 9
+ |
+
+Client node
+ |
+1 to 32
+ |
+
+
+
+
+
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, choose the Add Master/Client Node tab.
- Select the target node type and set the node specifications, quantity, and storage.
- Master and client nodes cannot be added at the same time.
- If a cluster already has a master or client node, you can only add nodes of the other type.
+ - Click Next.
- Confirm the information and click Submit.
Return to the cluster list page. The Task Status of the cluster is Scaling out.
+- If you added a master node and Cluster Status changed to Available, the master node has been successfully added.
If the cluster version is earlier than 7.x, when the Cluster Status changes to Available, you need to restart all data nodes and cold data nodes in the cluster to make the new node take effect. If the data nodes and cold data nodes are not restarted, the cluster may be reported as unavailable. (The cluster services still run properly.) For details, see Restarting a Cluster.
+
+ - If you added a client node and Cluster Status changed to Available, the client node has been added. You can restart data nodes and cold data nodes to shut down Cerebro and Kibana processes on the nodes.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899192.html b/docs/css/umn/en-us_topic_0000001477899192.html
new file mode 100644
index 00000000..029e9c10
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899192.html
@@ -0,0 +1,297 @@
+
+
+Querying Vectors
+Standard QueryStandard vector query syntax is provided for vector fields with vector indexes. The following command will return n (specified by size/topk) data records that are most close to the query vector.
+ POST my_index/_search
+{
+ "size":2,
+ "_source": false,
+ "query": {
+ "vector": {
+ "my_vector": {
+ "vector": [1, 1],
+ "topk":2
+ }
+ }
+ }
+}
+
+ Table 1 Parameters for standard queryParameter
+ |
+Description
+ |
+
+
+vector (the first one)
+ |
+Indicates that the query type is VectorQuery.
+ |
+
+my_vector
+ |
+Indicates the name of the vector field you want to query.
+ |
+
+vector (the second one)
+ |
+Indicates the vector value you want to query, which can be an array or a Base64 string
+ |
+
+topk
+ |
+Same as the value of size generally.
+ |
+
+Table 2
+ |
+Indicates optional query parameters. You can adjust the vector index parameters to achieve higher query performance or precision.
+ |
+
+
+
+
+
+ Table 2 Optional query parametersType
+ |
+Parameter
+ |
+Description
+ |
+
+
+Graph index configuration parameters
+ |
+ef
+ |
+Queue size of the neighboring node during the query. A larger value indicates a higher query precision and slower query speed. The default value is 200.
+Value range: (0, 100000]
+ |
+
+max_scan_num
+ |
+Maximum number of scanned nodes. A larger value indicates a higher query precision and slower query speed. The default value is 10000.
+Value range: (0, 1000000]
+ |
+
+IVF index configuration parameters
+ |
+nprobe
+ |
+Number of center points. A larger value indicates a higher query precision and slower query speed. The default value is 100.
+Value range: (0, 100000]
+ |
+
+
+
+
+
+ Compound QueryVector search can be used together with other Elasticsearch subqueries, such as Boolean query and post-filtering, for compound query.
+ In the following two examples, top 10 (topk) results closest to the query vector are queried first. filter retains only the results whose my_label field is red.
+ - Example of a Boolean query
POST my_index/_search
+{
+ "size": 10,
+ "query": {
+ "bool": {
+ "must": {
+ "vector": {
+ "my_vector": {
+ "vector": [1, 2],
+ "topk": 10
+ }
+ }
+ },
+ "filter": {
+ "term": { "my_label": "red" }
+ }
+ }
+ }
+}
+ - Example of post-filtering
GET my_index/_search
+{
+ "size": 10,
+ "query": {
+ "vector": {
+ "my_vector": {
+ "vector": [1, 2],
+ "topk": 10
+ }
+ }
+ },
+ "post_filter": {
+ "term": { "my_label": "red" }
+ }
+}
+
+
+ ScriptScore QueryYou can use script_score to perform Nearest Neighbor Search (NSS) on vectors. The query syntax is provided below.
+ The pre-filtering condition can be any query. script_score traverses only the pre-filtered results, calculates the vector similarity, and sorts and returns the results. The performance of this query depends on the size of the intermediate result set after the pre-filtering. If the pre-filtering condition is set to match_all, brute-force search is performed on all data.
+ POST my_index/_search
+ {
+ "size":2,
+ "query": {
+ "script_score": {
+ "query": {
+ "match_all": {}
+ },
+ "script": {
+ "source": "vector_score",
+ "lang": "vector",
+ "params": {
+ "field": "my_vector",
+ "vector": [1.0, 2.0],
+ "metric": "euclidean"
+ }
+ }
+ }
+ }
+ }
+
+ Table 3 script_score parametersParameter
+ |
+Description
+ |
+
+
+source
+ |
+Script description. Its value is vector_score if the vector similarity is used for scoring.
+ |
+
+lang
+ |
+Script syntax description. Its value is vector.
+ |
+
+field
+ |
+Vector field name
+ |
+
+vector
+ |
+Vector data to be queried
+ |
+
+metric
+ |
+Measurement method, which can be euclidean, inner_product, cosine, and hamming.
+Default value: euclidean
+ |
+
+
+
+
+
+ Re-Score QueryIf the GRAPH_PQ or IVF_GRAPH_PQ index is used, the query results are sorted based on the asymmetric distance calculated by PQ. CSS supports re-scoring and ranking of query results to improve the recall rate.
+ Assuming that my_index is a PQ index, an example of re-scoring the query results is as follows:
+ GET my_index/_search
+ {
+ "size": 10,
+ "query": {
+ "vector": {
+ "my_vector": {
+ "vector": [1.0, 2.0],
+ "topk": 100
+ }
+ }
+ },
+ "rescore": {
+ "window_size": 100,
+ "vector_rescore": {
+ "field": "my_vector",
+ "vector": [1.0, 2.0],
+ "metric": "euclidean"
+ }
+ }
+ }
+
+ Table 4 Rescore parameter descriptionParameter
+ |
+Description
+ |
+
+
+window_size
+ |
+Vector retrieval returns topk search results and ranks the first window_size results.
+ |
+
+field
+ |
+Vector field name
+ |
+
+vector
+ |
+Vector data to be queried
+ |
+
+metric
+ |
+Measurement method, which can be euclidean, inner_product, cosine, and hamming.
+Default value: euclidean
+ |
+
+
+
+
+
+ Painless Syntax ExtensionCSS extension supports multiple vector distance calculation functions, which can be directly used in customized painless scripts to build flexible re-score formulas.
+ The following is an example:
+ POST my_index/_search
+{
+ "size": 10,
+ "query": {
+ "script_score": {
+ "query": {
+ "match_all": {}
+ },
+ "script": {
+ "source": "1 / (1 + euclidean(params.vector, doc[params.field]))",
+ "params": {
+ "field": "my_vector",
+ "vector": [1, 2]
+ }
+ }
+ }
+ }
+}
+ The following table lists the distance calculation functions supported by the CSS.
+
+ Function Signature
+ |
+Description
+ |
+
+
+euclidean(Float[], DocValues)
+ |
+Euclidean distance function
+ |
+
+cosine(Float[], DocValues)
+ |
+Cosine similarity function
+ |
+
+innerproduct(Float[], DocValues)
+ |
+Inner product function
+ |
+
+hamming(String, DocValues)
+ |
+Hamming distance function Only vectors whose dim_type is binary are supported. The input query vector must be a Base64-encoded character string.
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899200.html b/docs/css/umn/en-us_topic_0000001477899200.html
new file mode 100644
index 00000000..4aa2cc87
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899200.html
@@ -0,0 +1,148 @@
+
+
+Using Kibana or APIs to Import Data to Elasticsearch
+You can import data in various formats, such as JSON, to Elasticsearch in CSS by using Kibana or APIs.
+ Importing Data Using KibanaBefore importing data, ensure that you can use Kibana to access the cluster. The following procedure illustrates how to use the POST command to import data. - Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters > Elasticsearch to switch to the Clusters page.
- Choose Clusters in the navigation pane. Locate the target cluster and click Access Kibana in the Operation column to log in to Kibana.
- Click Dev Tools in the navigation tree on the left.
- (Optional) On the Console page, run the related command to create an index for storing data and specify a custom mapping to define the data type.
If there is an available index in the cluster where you want to import data, skip this step. If there is no available index, create an index by referring to the following sample code.
+For example, on the Console page of Kibana, run the following command to create an index named my_store and specify a user-defined mapping to define the data type:
+Versions earlier than 7. xPUT /my_store
+{
+ "settings": {
+ "number_of_shards": 1
+ },
+ "mappings": {
+ "products": {
+ "properties": {
+ "productName": {
+ "type": "text"
+ },
+ "size": {
+ "type": "keyword"
+ }
+ }
+ }
+ }
+}
+
+Versions 7.x and later
+PUT /my_store
+{
+ "settings": {
+ "number_of_shards": 1
+ },
+ "mappings": {
+ "properties": {
+ "productName": {
+ "type": "text"
+ },
+ "size": {
+ "type": "keyword"
+ }
+ }
+ }
+}
+ - Run commands to import data. For example, run the following command to import a piece of data:
Versions earlier than 7. xPOST /my_store/products/_bulk
+{"index":{}}
+{"productName":"Latest art shirts for women in 2017 autumn","size":"L"}
+
+Versions 7.x and later
+POST /my_store/_bulk
+{"index":{}}
+{"productName":"Latest art shirts for women in 2017 autumn","size":"L"}
+The command output is similar to that shown in Figure 1. If the value of the errors field in the result is false, the data is successfully imported.
+Figure 1 Response message
+
+
+
+ Importing Data Using APIsYou can call the bulk API using the cURL command to import a JSON data file.
+ - You are advised to import a file smaller than 50 MB.
- This section uses a cluster in non-security mode as an example to describe how to run the cURL command to import data..
+
+ - Log in to the ECS that you use to access the cluster.
- Upload the JSON data file to the ECS.
- Run the following commands in the path where the JSON data file is stored in the ECS to import the JSON data to an Elasticsearch cluster.
In the command, replace the value of { Private network address and port number of the node} with the private network address and port number of a node in the cluster. If the node fails to work, the command will fail to be executed. If the cluster contains multiple nodes, you can replace the value of { Private network address and port number of the node} with the private network address and port number of any available node in the cluster. If the cluster contains only one node, restore the node and execute the command again. test.json indicates the JSON file whose data is to be imported. curl -X PUT "http://{Private network address and port number of the node} /_bulk" -H 'Content-Type: application/json' --data-binary @test.json
+
+If communication encryption has been enabled on the cluster where you will import data, you need to send HTTPS requests and add -k to the cURL command.
+curl -X PUT -k "https://{Private network address and port number of the node} /_bulk" -H 'Content-Type: application/json' --data-binary @test.json
+ The value of the -X parameter is a command and that of the -H parameter is a message header. In the preceding command, PUT is the value of the -X parameter and 'Content-Type: application/json' --data-binary @test.json is the value of the -H parameter. Do not add -k between a parameter and its value.
+
+Example 1: In this example, assume that you need to import data in the test.json file to an Elasticsearch cluster, where communication encryption is disabled and the private network address and port number of one node are 192.168.0.90 and 9200 respectively. The data in the test.json file is as follows:
+Versions earlier than 7.x
+{"index": {"_index":"my_store","_type":"products"}}
+{"productName":"Autumn new woman blouses 2019","size":"M"}
+{"index": {"_index":"my_store","_type":"products"}}
+{"productName":"Autumn new woman blouses 2019","size":"L"}
+Versions 7.x and later
+{"index": {"_index":"my_store"}}
+{"productName":"Autumn new woman blouse 2019","size":"M"}
+{"index": {"_index":"my_store"}}
+{"productName":"Autumn new woman blouse 2019","size":"L"}
+Perform the following steps to import the data:
+- Run the following command to create an index named my_store:
Versions earlier than 7. xcurl -X PUT http://192.168.0.90:9200/my_store -H 'Content-Type: application/json' -d '
+ {
+ "settings": {
+ "number_of_shards": 1
+ },
+ "mappings": {
+ "products": {
+ "properties": {
+ "productName": {
+ "type": "text"
+ },
+ "size": {
+ "type": "keyword"
+ }
+ }
+ }
+ }
+ }'
+
+Versions 7.x and later
+curl -X PUT http://192.168.0.90:9200/my_store -H 'Content-Type: application/json' -d '
+{
+ "settings": {
+ "number_of_shards": 1
+ },
+ "mappings": {
+ "properties": {
+ "productName": {
+ "type": "text"
+ },
+ "size": {
+ "type": "keyword"
+ }
+ }
+ }
+}'
+ - Run the following command to import the data in the test.json file:
curl -X PUT "http://192.168.0.90:9200/_bulk" -H 'Content-Type: application/json' --data-binary @test.json
+In this case, if the following information is displayed, the data is successfully imported:
+{"took":204,"errors":false,"items":[{"index":{"_index":"my_store","_type":"_doc","_id":"DJQkBIwBbJvUd2769Wi-","_version":1,"result":"created","_shards":{"total":2,"successful":1,"failed":0},"_seq_no":0,"_primary_term":1,"status":201}},{"index":{"_index":"my_store","_type":"_doc","_id":"DZQkBIwBbJvUd2769Wi_","_version":1,"result":"created","_shards":{"total":2,"successful":1,"failed":0},"_seq_no":1,"_primary_term":1,"status":201}}]}
+
+Example 2: In this example, assume that you need to import data in the test.json file to an Elasticsearch cluster, where communication encryption has been enabled and the node access address and content in the testdata.json are the same as those in example 1. Perform the following steps to import the data:
+- Run the following command to create an index named my_store:
curl -X PUT -k https://192.168.0.90:9200/my_store -H 'Content-Type: application/json' -d '
+ {
+ "settings": {
+ "number_of_shards": 1
+ },
+ "mappings": {
+ "products": {
+ "properties": {
+ "productName": {
+ "type": "text"
+ },
+ "size": {
+ "type": "keyword"
+ }
+ }
+ }
+ }
+ }'
+ - Run the following command to import the data in the test.json file:
curl -X PUT -k "https://192.168.0.90:9200/_bulk" -H 'Content-Type: application/json' --data-binary @test.json
+In this case, if the following information is displayed, the data is successfully imported:
+{"took":204,"errors":false,"items":[{"index":{"_index":"my_store","_type":"_doc","_id":"DJQkBIwBbJvUd2769Wi-","_version":1,"result":"created","_shards":{"total":2,"successful":1,"failed":0},"_seq_no":0,"_primary_term":1,"status":201}},{"index":{"_index":"my_store","_type":"_doc","_id":"DZQkBIwBbJvUd2769Wi_","_version":1,"result":"created","_shards":{"total":2,"successful":1,"failed":0},"_seq_no":1,"_primary_term":1,"status":201}}]}
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899208.html b/docs/css/umn/en-us_topic_0000001477899208.html
new file mode 100644
index 00000000..e5ccfd94
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899208.html
@@ -0,0 +1,19 @@
+
+
+Managing the Vector Index Cache
+The vector retrieval engine is developed in C++ and uses off-heap memory. You can use the following APIs to manage the index cache.
+ - View cache statistics.
GET /_vector/stats
+In the implementation of the vector plug-in, the vector index is the same as other types of Lucene indexes. Each segment constructs and stores an index file. During query, the index file is loaded to the non-heap memory. The plug-in uses the cache mechanism to manage the non-heap memory. You can use this API to query the non-heap memory usage, number of cache hits, and number of loading times.
+ - Preload the vector index.
PUT /_vector/warmup/{index_name}
+You can use this API to preload the vector index specified by index_name to the off-heap memory for query.
+ - Clear the cache.
PUT /_vector/clear/cache
+PUT /_vector/clear/cache/index_name
+The caching mechanism limits the non-heap memory usage when vector indexes are used. When the total index size exceeds the cache size limit, index entry swap-in and swap-out occur, which affects the query performance. You can use this API to clear unnecessary index cache to ensure the query performance of hot data indexes.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899212.html b/docs/css/umn/en-us_topic_0000001477899212.html
new file mode 100644
index 00000000..d0142f7f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899212.html
@@ -0,0 +1,325 @@
+
+
+Using the Open Distro SQL Plug-in to Compile Queries
+For Elasticsearch 6.5.4 and later versions, Open Distro for Elasticsearch SQL lets you write queries in SQL rather than in the Elasticsearch query domain-specific language (DSL).
+ If you are already familiar with SQL and do not want to learn query DSL, this feature is a great option.
+ Basic Operations- Kibana (recommended)
- Log in to Kibana and send requests using request parameters or request body to _opendistro/_sqlURI in the Dev Tools page.
POST _opendistro/_sql
+{
+ "query": "SELECT * FROM my-index LIMIT 50"
+}
+ - By default, the result is returned in the JSON structure. If you want the result to be returned in the CSV format, run the following command:
POST _opendistro/_sql?format=csv
+{
+ "query": "SELECT * FROM my-index LIMIT 50"
+}
+When data is returned in the CSV format, each row corresponds to a document and each column corresponds to a field.
+
+ - cURL commands
You can also run cURL commands in ECS to execute SQL statements.
+curl -XPOST https://localhost:9200/_opendistro/_sql -u username:password -k -d '{"query": "SELECT * FROM kibana_sample_data_flights LIMIT 10"}' -H 'Content-Type: application/json'
+
+
+
+ Supported OperationsOpen Distro for Elasticsearch supports the following SQL operations: statements, conditions, aggregations, include and exclude fields, common functions, joins, and show.
+ - Statements
+
Table 1 StatementsStatement
+ |
+Example
+ |
+
+
+Select
+ |
+SELECT * FROM my-index
+ |
+
+Delete
+ |
+DELETE FROM my-index WHERE _id=1
+ |
+
+Where
+ |
+SELECT * FROM my-index WHERE ['field']='value'
+ |
+
+Order by
+ |
+SELECT * FROM my-index ORDER BY _id asc
+ |
+
+Group by
+ |
+SELECT * FROM my-index GROUP BY range(age, 20,30,39)
+ |
+
+Limit
+ |
+SELECT * FROM my-index LIMIT 50 (default is 200)
+ |
+
+Union
+ |
+SELECT * FROM my-index1 UNION SELECT * FROM my-index2
+ |
+
+Minus
+ |
+SELECT * FROM my-index1 MINUS SELECT * FROM my-index2
+ |
+
+
+
+
+ As with any complex query, large UNION and MINUS statements can strain or even crash your cluster.
+
+
+ - Conditions
+
Table 2 ConditionsCondition
+ |
+Example
+ |
+
+
+Like
+ |
+SELECT * FROM my-index WHERE name LIKE 'j%'
+ |
+
+And
+ |
+SELECT * FROM my-index WHERE name LIKE 'j%' AND age > 21
+ |
+
+Or
+ |
+SELECT * FROM my-index WHERE name LIKE 'j%' OR age > 21
+ |
+
+Count distinct
+ |
+SELECT count(distinct age) FROM my-index
+ |
+
+In
+ |
+SELECT * FROM my-index WHERE name IN ('alejandro', 'carolina')
+ |
+
+Not
+ |
+SELECT * FROM my-index WHERE name NOT IN ('jane')
+ |
+
+Between
+ |
+SELECT * FROM my-index WHERE age BETWEEN 20 AND 30
+ |
+
+Aliases
+ |
+SELECT avg(age) AS Average_Age FROM my-index
+ |
+
+Date
+ |
+SELECT * FROM my-index WHERE birthday='1990-11-15'
+ |
+
+Null
+ |
+SELECT * FROM my-index WHERE name IS NULL
+ |
+
+
+
+
+ - Aggregations
+
Table 3 AggregationsAggregation
+ |
+Example
+ |
+
+
+avg()
+ |
+SELECT avg(age) FROM my-index
+ |
+
+count()
+ |
+SELECT count(age) FROM my-index
+ |
+
+max()
+ |
+SELECT max(age) AS Highest_Age FROM my-index
+ |
+
+min()
+ |
+SELECT min(age) AS Lowest_Age FROM my-index
+ |
+
+sum()
+ |
+SELECT sum(age) AS Age_Sum FROM my-index
+ |
+
+
+
+
+
+ - Include and exclude fields
+
Table 4 Include and exclude fieldsPattern
+ |
+Example
+ |
+
+
+include()
+ |
+SELECT include('a*'), exclude('age') FROM my-index
+ |
+
+exclude()
+ |
+SELECT exclude('*name') FROM my-index
+ |
+
+
+
+
+
+ - Functions
+
Table 5 FunctionsFunction
+ |
+Example
+ |
+
+
+floor
+ |
+SELECT floor(number) AS Rounded_Down FROM my-index
+ |
+
+trim
+ |
+SELECT trim(name) FROM my-index
+ |
+
+log
+ |
+SELECT log(number) FROM my-index
+ |
+
+log10
+ |
+SELECT log10(number) FROM my-index
+ |
+
+substring
+ |
+SELECT substring(name, 2,5) FROM my-index
+ |
+
+round
+ |
+SELECT round(number) FROM my-index
+ |
+
+sqrt
+ |
+SELECT sqrt(number) FROM my-index
+ |
+
+concat_ws
+ |
+SELECT concat_ws(' ', age, height) AS combined FROM my-index
+ |
+
+/
+ |
+SELECT number / 100 FROM my-index
+ |
+
+%
+ |
+SELECT number % 100 FROM my-index
+ |
+
+date_format
+ |
+SELECT date_format(date, 'Y') FROM my-index
+ |
+
+
+
+
+ You must enable fielddata in the document mapping for most string functions to work properly.
+
+
+
+
+
+ JoinsOpen Distro for Elasticsearch SQL supports inner joins, left outer joins and cross joins. Joins have the following constraints:
+ - You can only join two indexes.
+ - You must use an alias for an index (for example, people p).
+ - In an ON clause, you can only use the AND conditions.
+
+ - You cannot use GROUP BY or ORDER BY to obtain results.
+ - LIMIT with OFFSET (for example, LIMIT 25 OFFSET 25) is not supported.
+
+ JDBC DriverThe Java Database Connectivity (JDBC) driver allows you to integrate Open Distro for Elasticsearch with your business intelligence (BI) applications.
+ For details about how to download and use JAR files, see GitHub Repositories.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899216.html b/docs/css/umn/en-us_topic_0000001477899216.html
new file mode 100644
index 00000000..89ea42af
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899216.html
@@ -0,0 +1,152 @@
+
+
+Change History
+
+ Released On
+ |
+Description
+ |
+
+
+2024-05-17
+ |
+Added the following sections:
+
+ |
+
+2024-02-07
+ |
+Deleted:
+
+Added:
+New examples in section "Enhanced Import Performance" > Optimization of Other Parameters.
+ |
+
+2023-12-15
+ |
+Delete the following sections:
+- Using the OpenDistro Alarm Plugin
- (Optional) Service Authorization
- Logstash
- Intelligent O&M
+ |
+
+2023-09-25
+ |
+Added the following sections:
+
+ |
+
+2023-03-28
+ |
+- Optimized the content structure of the following sections:
- What Is Cloud Search Service?
- Related Services
- Clusters in Security Mode
+ - Updated the procedure description in the following sections:
+
- Added:
+
- Deleted the following sections:
- What Is Kibana?
- What is Cerebro?
- Suggestions on Using Elasticsearch
- Viewing Monitoring Metrics
- Creating Alarm Rules
+
+ |
+
+2022-09-14
+ |
+Updated Index Backup and Restoration.
+ |
+
+2022-07-28
+ |
+Added the cluster version 7.10.2.
+ |
+
+2022-06-30
+ |
+Added the description about cluster version 7.9.3.
+Supported the VPC endpoint service Accessing a Cluster Using a VPC Endpoint.
+Optimized following sections based on the use cases and operation flow:
+
+ |
+
+2020-08-30
+ |
+- Added: cluster version 7.6.2.
- Added:
+
+ |
+
+2019-03-15
+ |
+
+ |
+
+2019-01-26
+ |
+Accepted in OTC-3.2/Agile-01.2019.
+ |
+
+2019-01-17
+ |
+
+ |
+
+2018-12-17
+ |
+
+ |
+
+2018-12-03
+ |
+Updated the screenshots in section "Why Does My ECS Fail to Connect to a Cluster?".
+ |
+
+2018-11-02
+ |
+Updated descriptions in the following sections based on software function changes:
+Index Backup and Restoration
+ |
+
+2018-10-19
+ |
+Added parameter descriptions and modified the description of operations in the following section:
+Index Backup and Restoration
+ |
+
+2018-10-12
+ |
+Added the description about the automatic snapshot deletion time in the following section:
+Index Backup and Restoration
+ |
+
+2018-10-08
+ |
+Accepted in OTC-3.2.
+ |
+
+2018-09-28
+ |
+
+ |
+
+2018-09-14
+ |
+Added Cluster List Overview.
+ |
+
+2018-08-20
+ |
+
+ |
+
+2018-07-31
+ |
+This issue is the first official release.
+ |
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899220.html b/docs/css/umn/en-us_topic_0000001477899220.html
new file mode 100644
index 00000000..a2fcdfc4
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899220.html
@@ -0,0 +1,18 @@
+
+
+Large Query Isolation
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001477899224.html b/docs/css/umn/en-us_topic_0000001477899224.html
new file mode 100644
index 00000000..699ca50b
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001477899224.html
@@ -0,0 +1,13 @@
+
+
+Stopping Index Synchronization
+You can specify multiple indexes or use wildcard to match the target indexes and terminate their synchronization tasks. Subsequent modifications to the indexes in the primary cluster will not be synchronized to the secondary cluster. The read-only state of the indexes in the secondary cluster is cancelled, and new data can be written to the secondary cluster.
+ An example request is as follows:
+ PUT log*/stop_remote_sync
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001504911882.html b/docs/css/umn/en-us_topic_0000001504911882.html
new file mode 100644
index 00000000..0f724fdd
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001504911882.html
@@ -0,0 +1,37 @@
+
+
+
+ Elasticsearch
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527697777.html b/docs/css/umn/en-us_topic_0000001527697777.html
new file mode 100644
index 00000000..7e5cf7f2
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527697777.html
@@ -0,0 +1,23 @@
+
+
+How Do I Query Snapshot Information?
+PrerequisitesThe snapshot function has been enabled for the cluster and snapshot information has been configured.
+
+ Querying a Snapshot- Log in to the CSS management console, and click Clusters in the navigation pane. On the displayed Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the left navigation pane of the Kibana page, click Dev Tools. Click Get to work to switch to the Console page.
Enter the code as required in the left pane, click to execute the command, and view the result in the right pane.
+ - Run the GET _snapshot/_all command to query information about all repositories.
Figure 1 Querying information about all repositories
+- bucket: OBS bucket name
- base_path: Path. It consists of a fixed prefix and a cluster name.
- endpoint: OBS domain name
- region: your region
+ - Query snapshot information.
- Run the GET _snapshot/repo_auto/_all command to query the list of all the snapshots in the current repository.
Figure 2 Snapshot information
+- snapshot: snapshot name
- state: snapshot status
- start_time, start_time_in_millis, end_time, and end_time_in_millis: snapshot time
- shards: the number of shards. total indicates the total number of shards. failed indicates the number of failures. successful indicates the number of successes.
+ - Run the GET _snapshot/repo_auto/$snapshot-xxx command to query information about a specified snapshot.
- Replace $snapshot-xxx with the actual snapshot name.
- repo_auto is followed by a snapshot name or wildcard characters.
+
+ - (Optional) Delete information about a specified snapshot.
To delete a specific snapshot, run the DELETE _snapshot/ repo_auto/$snapshot-xxx command.
+Replace $snapshot-xxx with the actual snapshot name.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527697781.html b/docs/css/umn/en-us_topic_0000001527697781.html
new file mode 100644
index 00000000..7f791c9d
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527697781.html
@@ -0,0 +1,25 @@
+
+
+How Do I Set the Default Maximum Number of Records Displayed on a Page for an Elasticsearch Cluster
+Solution- Method 1
Open Kibana and run the following commands on the Dev Tools page:
+PUT _all/_settings?preserve_existing=true
+{
+"index.max_result_window" : "10000000"
+}
+ - Method 2
Run the following commands in the background:
+curl –XPUT 'http://localhost:9200/_all/_setting?preserve_existing=true'-d
+{
+"index.max_result_window":"1000000"
+}
+
+
+ This configuration consumes memory and CPU resources. Exercise caution when setting this parameter.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527697785.html b/docs/css/umn/en-us_topic_0000001527697785.html
new file mode 100644
index 00000000..7eb600c3
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527697785.html
@@ -0,0 +1,35 @@
+
+
+
+ Resource Usage and Change
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527697789.html b/docs/css/umn/en-us_topic_0000001527697789.html
new file mode 100644
index 00000000..0786682a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527697789.html
@@ -0,0 +1,19 @@
+
+
+
+ Components
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527697793.html b/docs/css/umn/en-us_topic_0000001527697793.html
new file mode 100644
index 00000000..d0751c51
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527697793.html
@@ -0,0 +1,13 @@
+
+
+Do Ports 9200 and 9300 Both Open?
+Yes. Port 9200 is used by external systems to access CSS clusters, and port 9300 is used for communication between nodes.
+ The methods for accessing port 9300 are as follows:
+ - If your client is in the same VPC and subnet with the CSS cluster, you can access it directly.
- If your client is in the same VPC with but different subnet from the CSS cluster, apply for a route separately.
- If your client is in the different VPCs and subnets from the CSS cluster, create a VPC peering connection to enable communication between the two VPCs, and then apply for routes to connect the two subnets.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527697797.html b/docs/css/umn/en-us_topic_0000001527697797.html
new file mode 100644
index 00000000..be08f5ce
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527697797.html
@@ -0,0 +1,13 @@
+
+
+How Do I Check the Numbers of Shards and Replicas in a Cluster on the CSS Console?
+- Log in to the console.
- On the Clusters page, click Access Kibana in the Operation column of a cluster.
- Log in to Kibana and choose Dev Tools.

+ - On the Console page, run the GET _cat/indices?v command query the number of shards and replicas in a cluster. In the following figure, the pri column indicates the number of index shards, and the rep column indicates the number of replicas. After an index is created, its pri value cannot be modified. Its rep value can be modified.

+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527777425.html b/docs/css/umn/en-us_topic_0000001527777425.html
new file mode 100644
index 00000000..0eee1f04
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527777425.html
@@ -0,0 +1,14 @@
+
+
+Why Does the Disk Usage Increase After the delete_by_query Command Was Executed to Delete Data?
+Running the delete_by_query command can only add a deletion mark to the target data instead of really deleting it. When you search for data, all data is searched and the data with the deletion mark is filtered out.
+ The space occupied by an index with the deletion mark will not be released immediately after you call the disk deletion API. The disk space is released only when the segment merge is performed next time.
+ Querying the data with deletion mark occupies disk space. In this case, the disk usage increases when you run the disk deletion commands.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527777429.html b/docs/css/umn/en-us_topic_0000001527777429.html
new file mode 100644
index 00000000..3156aff2
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527777429.html
@@ -0,0 +1,16 @@
+
+
+The Average Memory Usage of an Elasticsearch Cluster Reaches 98%
+SymptomThe cluster monitoring result shows that the average memory usage of a cluster is 98%. Does it affect cluster performance?
+
+ Possible CauseIn an ES cluster, 50% of the memory is occupied by Elasticsearch and the other 50% is used by Lucene to cache files. It is normal that the average memory usage reaches 98%.
+
+ SolutionYou can monitor the cluster memory usage by checking the maximum JVM heap usage and average JVM heap usage.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527777433.html b/docs/css/umn/en-us_topic_0000001527777433.html
new file mode 100644
index 00000000..e97f09cb
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527777433.html
@@ -0,0 +1,23 @@
+
+
+
+ Clusters in Security Mode
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527777437.html b/docs/css/umn/en-us_topic_0000001527777437.html
new file mode 100644
index 00000000..267d1856
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527777437.html
@@ -0,0 +1,16 @@
+
+
+Can I Modify the TLS Algorithm of an Elasticsearch Cluster?
+You can modify TLS algorithms in CSS 7.6.2 and later versions.
+ - Log in to the CSS management console.
- In the navigation pane, choose Clusters. The cluster list is displayed.
- Click the name of the target cluster to go to the cluster details page.
- Select Parameter Configurations, click Edit, expand the Customize parameter, and click Add.
Add the opendistro_security.ssl.http.enabled_ciphers parameter and set it to ['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384'].
+ If the parameter value contains multiple algorithm protocols, enclose the value with a pair of square brackets ([]). If the parameter value is a single algorithm protocol, enclose the value with a pair of single quotation marks(' ').
+
+ - After the modification is complete, click Submit.In the displayed Submit Configuration dialog box, select the box indicating "I understand that the modification will take effect after the cluster is restarted." and click Yes.
If the Status is Succeeded in the parameter modification list, the modification has been saved.
+ - Return to the cluster list and choose More > Restart in the Operation column to restart the cluster and make the modification take effect.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527777441.html b/docs/css/umn/en-us_topic_0000001527777441.html
new file mode 100644
index 00000000..7180ca77
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527777441.html
@@ -0,0 +1,17 @@
+
+
+How Do I Query Index Data on Kibana in an ES Cluster?
+
+ Run the following command to query index data through an API on Kibana:
+ GET indexname/_search
+ The returned data is shown in the following figure.
+ Figure 1 Returned data
+ - took: How many milliseconds the query cost.
- time_out: Whether a timeout occurred.
- _shard: Data is split into five shards. All of the five shards have been searched and data is returned successfully. No query result fails to be returned. No data is skipped.
- hits.total: Number of query results. Three documents are returned in this example.
- max_score: Score of the returned documents. The document that is more relevant to your search criteria would have a higher score.
- hits.hits: Detailed information of the returned documents.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527777445.html b/docs/css/umn/en-us_topic_0000001527777445.html
new file mode 100644
index 00000000..9c2a2a9d
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527777445.html
@@ -0,0 +1,11 @@
+
+
+How Do I Delete Index Data?
+- Manually: Run the DELETE /my_index command in Kibana.
- Automatically: Create scheduled tasks to call the index deletion request and periodically execute the tasks. CSS supports Open Distro Index State Management. For details, see: https://opendistro.github.io/for-elasticsearch-docs/docs/im/ism/
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527777449.html b/docs/css/umn/en-us_topic_0000001527777449.html
new file mode 100644
index 00000000..c8da36e2
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527777449.html
@@ -0,0 +1,23 @@
+
+
+Why All New Index Shards Are Allocated to the Same Node?
+Possible CauseThe possible causes are as follows:
+ - Shards were unevenly distributed in previous index allocations, and the predominate parameter in the latest indexed shard allocation was balance.shard. To balance the shard distribution across nodes, the new shards were allocated to the node with only a small number of shards.
- After a new node was added to a cluster and before the automatic cluster rebalancing completes, the predominate parameter was balance.shard. The shards of a new index are allocated to the new node, where there are no shards yet.
+ The following two parameters are used to balance the shard allocation in a cluster:
+ cluster.routing.allocation.balance.index (default value: 0.45f)
+ cluster.routing.allocation.balance.shard (default value: 0.55f)
+ - balance.index: A larger value indicates that all the shards of an index are more evenly distributed across nodes. For example, if an index has six shards and there are three data nodes, two shards will be distributed on each node.
- balance.shard: A larger value indicates that all the shards of all the indexes are more evenly distributed across nodes. For example, if index a has two shards, index b has four, and there are three data nodes, two shards will be distributed on each node.
- You can specify both balance.index and balance.shard to balance the shard allocation.
+
+
+ SolutionTo prevent the all the shards of an index from being allocated to a single node, use either of the following methods:
+ - To create an index during cluster scale-out, configure the following parameter:
"index.routing.allocation.total_shards_per_node": 2
+That is, allow no more than two shards of an index to be allocated on each node. Determine the maximum number of shards allocated to each node based on the number of data nodes in your cluster and the number of index shards (both primary and secondary).
+
- If too many shards are distributed on only a few nodes, you can move some of the shards to other nodes to balance the distribution. Run the move command of POST _cluster/reroute. The rebalance module will automatically exchange the shard with a shard on the destination node. Determine the values of balance.index and balance.shard as needed.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527937329.html b/docs/css/umn/en-us_topic_0000001527937329.html
new file mode 100644
index 00000000..3003b84e
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527937329.html
@@ -0,0 +1,13 @@
+
+
+Can I Export Data from Kibana?
+Exporting data from Kibana requires the SQL Workbench plugin. Currently, you can only export data from Kibana 7.6.2 or later.
+ In SQL Workbench of Kibana, you can enter Elasticsearch SQL statements to query data or click Download to export data. You can export 1 to 200 data records. By default, 200 data records are exported.
+ Figure 1 SQL Workbench
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527937333.html b/docs/css/umn/en-us_topic_0000001527937333.html
new file mode 100644
index 00000000..5ecc93be
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527937333.html
@@ -0,0 +1,18 @@
+
+
+How Do I Configure a Two-Replica CSS Cluster?
+- Run GET _cat/indices?v in Kibana to check the number of cluster replicas. If the value of rep is 1, the cluster has two replicas.

+ - If the value of rep is not 1, run the following command to set the number of replicas:
PUT /index/_settings
+{
+"number_of_replicas" : 1 //Number of replicas
+}
+ index specifies the index name. Set this parameter based on site requirements.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527937337.html b/docs/css/umn/en-us_topic_0000001527937337.html
new file mode 100644
index 00000000..692ef457
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527937337.html
@@ -0,0 +1,11 @@
+
+
+What Is the Maximum Storage Capacity of CSS?
+You can configure up to 200 nodes for a cluster (each node corresponds to an ECS). The maximum storage capacity of an ECS is the total capacity of EVS disks attached to the ECS. You can calculate the total storage capacity of CSS based on the sizes of EVS disks attached to different ECSs. The EVS disk size is determined by the node specifications selected when you create the cluster.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527937341.html b/docs/css/umn/en-us_topic_0000001527937341.html
new file mode 100644
index 00000000..1be3f294
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527937341.html
@@ -0,0 +1,15 @@
+
+
+Can Elasticsearch Data Be Migrated Between VPCs?
+Elasticsearch does not support direct data migration between different VPCs. You can use either of the following methods to migrate data.
+ Method 1Use the backup and restoration function to migrate cluster data.
+
+ Method 2- Connect the VPC network and establish a VPC peering connection.
- After the network is connected, use Logstash to migrate data.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001527937345.html b/docs/css/umn/en-us_topic_0000001527937345.html
new file mode 100644
index 00000000..e666b403
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001527937345.html
@@ -0,0 +1,18 @@
+
+
+Kibana
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528097289.html b/docs/css/umn/en-us_topic_0000001528097289.html
new file mode 100644
index 00000000..b2578f09
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528097289.html
@@ -0,0 +1,18 @@
+
+
+How Do I Clear Expired Data to Release Storage Space?
+- Run the following command to delete a single index data record.
curl -XDELETE http://IP:9200/Index_name
+ IP: the IP address of any node in the cluster
+
+ - Run the following command to delete all Logstash data of a day. For example, delete all data on June 19, 2017:
For a cluster in non-security mode: curl -XDELETE 'http://IP:9200/logstash-2017.06.19*'
+For a cluster in security mode: curl -XDELETE -u username:password 'https://IP:9200/logstash-2017.06.19' -k
+ - username: username of the administrator. The default value is admin.
- password: the password set during cluster creation
- IP: the IP address of any node in the cluster
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528097293.html b/docs/css/umn/en-us_topic_0000001528097293.html
new file mode 100644
index 00000000..b7b1e22f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528097293.html
@@ -0,0 +1,12 @@
+
+
+What Storage Options Does CSS Provide?
+CSS uses EVS and local disks to store your indices. During cluster creation, you can specify the EVS disk type and specifications (the EVS disk size).
+ - Supported EVS disk types include common I/O, high I/O, and ultra-high I/O.
- The EVS disk size varies depending on the node specifications selected when you create a cluster.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528097297.html b/docs/css/umn/en-us_topic_0000001528097297.html
new file mode 100644
index 00000000..a03b8844
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528097297.html
@@ -0,0 +1,15 @@
+
+
+Which CSS Metrics Should I Focus On?
+Disk usage and cluster health status are two key metrics that you can focus on. You can log in to Cloud Eye and configure alarm rules for these metrics. If alarms are reported, handle them by taking appropriate measures.
+ Configuration examples:
+ - Alarms are reported if the disk usage is higher than or equal to a specified value (for example, 85%) and has reached this value multiple times (for example, 5 times) within a specified time period (for example, 5 minutes).
- Alarms are reported if the value of the cluster health status metric exceeds 0 for multiple times (for example, 5 times) within a specified time period (for example, 5 minutes).
+ Measures:
+ - If disk usage alarms are reported, view available disk space, check whether data can be deleted from cluster nodes or archived to other systems to free up space, or check if you can expand the disk capacity.
- If cluster health status alarms are reported, check whether shard allocation is normal, whether shards have been lost, and check whether the process has been restarted on Cerebro.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528097305.html b/docs/css/umn/en-us_topic_0000001528097305.html
new file mode 100644
index 00000000..2b1667d1
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528097305.html
@@ -0,0 +1,22 @@
+
+
+What Are Regions and AZs?
+Regions and AZsA region and availability zone (AZ) identify the location of a data center. You can create resources in a specific region and AZ.
+ - A region is a physical data center. Each region is completely independent, and thereby improves fault tolerance and stability. After a resource is created, its region cannot be changed.
- An AZ is a physical location using independent power supplies and networks. Faults in an AZ do not affect other AZs. A region can contain multiple AZs that are physically isolated but networked together. This enables low-cost and low-latency network connections.
+ Figure 1 shows the relationship between regions and AZs. Figure 1 Regions and AZs
+
+
+ Region SelectionYou are advised to select a region close to you or your target users. This reduces network latency and improves the access success rate.
+
+ AZ SelectionWhen determining whether to deploy resources in the same AZ, consider your application's requirements for disaster recovery (DR) and network latency.
+ - To prioritize DR capabilities, deploy resources in different AZs in the same region.
- To prioritize network latency, deploy resources in the same AZ.
+
+ Regions and EndpointsBefore using an API to call resources, you will need to specify the resource region and endpoint. For details, see "Endpoints" in Cloud Search Service API Reference.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528097309.html b/docs/css/umn/en-us_topic_0000001528097309.html
new file mode 100644
index 00000000..a7e4c4c7
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528097309.html
@@ -0,0 +1,14 @@
+
+
+How Do I Migrate a CSS Cluster Across Regions?
+CSS clusters cannot be directly migrated. You can back up a cluster to an OBS bucket and restore it to a new region.
+ - If the OBS bucket is in the same region as your CSS cluster, migrate the cluster by following the instructions in Index Backup and Restoration.
- If the OBS bucket is not in the same region as your CSS cluster, configure cross-region replication to back up the cluster to the bucket, and migrate the cluster by following the instructions in Index Backup and Restoration.
+ - Before cross-region replication, ensure the snapshot folder of the destination cluster is empty. Otherwise, the snapshot information cannot be updated to the snapshot list of the destination cluster.
- Before every migration, ensure the folder is empty.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528097313.html b/docs/css/umn/en-us_topic_0000001528097313.html
new file mode 100644
index 00000000..5bbcbc3a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528097313.html
@@ -0,0 +1,15 @@
+
+
+How Do I Create a Type Under an Index in an Elasticsearch 7.x Cluster?
+In Elasticsearch 7.x and later versions, types cannot be created for indexes.
+ If you need to use types, add include_type_name=true to the command. For example:
+ PUT _template/urldialinfo_template?include_type_name=true
+ After the command is executed, the following information is displayed:
+ "#! Deprecation: [types removal] Specifying include_type_name in put index template requests is deprecated. The parameter will be removed in the next major version. "
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528097317.html b/docs/css/umn/en-us_topic_0000001528097317.html
new file mode 100644
index 00000000..17320950
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528097317.html
@@ -0,0 +1,25 @@
+
+
+How Do I Clear the Cache of a CSS Cluster?
+- Clear the fielddata
During aggregation and sorting, data are converted to the fielddata structure, which occupies a large amount of memory.
+- Run the following commands on Kibana to check the memory occupied by index fielddata:
DELETE /_search/scroll
+{
+"scroll_id" :
+"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ=="
+}
+ - If the memory usage of fielddata is too high, you can run the following command to clear fielddata:
POST /test/_cache/clear?fielddata=true
+
+In the preceding command, test indicates the name of the index whose fielddata occupies a large amount of memory.
+ - Clear segments
The FST structure of each segment is loaded to the memory and will not be cleared. If the number of index segments is too large, the memory usage will be high. You are advised to periodically clear the segments.
+- Run the following command on Kibana to check the number of segments and their memory usage on each node:
GET /_cat/nodes?v&h=segments.count,segments.memory&s=segments.memory:desc
+ - If the memory usage of segments is too high, you can delete or disable unnecessary indexes, or periodically combine indexes that are not updated.
+ - Clear the cache
Run the following command on Kibana to clear the cache:
+POST _cache/clear
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299557.html b/docs/css/umn/en-us_topic_0000001528299557.html
new file mode 100644
index 00000000..786f9603
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299557.html
@@ -0,0 +1,232 @@
+
+
+Creating a Vector Index
+
+ Creating a Vector Index- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- Click Dev Tools in the navigation tree on the left and run the following command to create a vector index.
Create an index named my_index that contains a vector field my_vector and a text field my_label. The vector field creates the graph index and uses Euclidean distance to measure similarity.
+PUT my_index
+{
+ "settings": {
+ "index": {
+ "vector": true
+ }
+ },
+ "mappings": {
+ "properties": {
+ "my_vector": {
+ "type": "vector",
+ "dimension": 2,
+ "indexing": true,
+ "algorithm": "GRAPH",
+ "metric": "euclidean"
+ },
+ "my_label": {
+ "type": "text"
+ }
+ }
+ }
+}
+
+Table 1 Parameters for creating an indexType
+ |
+Parameter
+ |
+Description
+ |
+
+
+Index settings parameters
+ |
+vector
+ |
+To use a vector index, set this parameter to true.
+ |
+
+Field mappings parameters
+ |
+type
+ |
+Field type, for example, vector.
+ |
+
+dimension
+ |
+Vector dimension. Value range: [1, 4096]
+ |
+
+indexing
+ |
+Whether to enable vector index acceleration.
+The value can be: - false: disables vector index acceleration. If this parameter is set to false, vector data is written only to docvalues, and only ScriptScore and Rescore can be used for vector query.
- true: enables vector index acceleration. If this parameter is set to true, an extra vector index is created. The index algorithm is specified by the algorithm field and VectorQuery can be used for data query.
+
+Default value: false
+ |
+
+algorithm
+ |
+Index algorithm. This parameter is valid only when indexing is set to true.
+The value can be: - FLAT: brute-force algorithm that calculates the distance between the target vector and all vectors in sequence. The algorithm relies on sheer computing power and its recall rate reaches 100%. You can use this algorithm if you require high recall accuracy.
- GRAPH: Hierarchical Navigable Small Worlds (HNSW) algorithm for graph indexes. This algorithm is mainly used in scenarios where high performance and precision are required and the data records of a single shard is fewer than 10 million.
- GRAPH_PQ: combination of the HNSW algorithm and the PQ algorithm. The PQ algorithm reduces the storage overhead of original vectors, so that HNSW can easily search for data among hundreds of millions of records.
- IVF_GRAPH: combination of IVF and HNSW. The entire space is divided into multiple cluster centroids, which makes search much faster but slightly inaccurate. You can use this algorithm if you require high performance when searching for data among hundreds of millions of records.
- IVF_GRAPH_PQ: combination of the PQ algorithm with the IVF or HNSW algorithm to further improve the system capacity and reduce the system overhead. This algorithm is applicable to scenarios where there are more than 1 billion files in shards and high retrieval performance is required.
- PV_GRAPH: Improved Hierarchical Navigable Small Worlds (HNSW) algorithm for graph index. This algorithm is applicable to scenarios where there are fewer than 10 million files in a single shard, available memory resources are sufficient, and high performance and precision are required. This algorithm supports the vector and scalar joint filtering. Currently, the sub_fields parameter supports only the keyword type. Compared with post-filtering and Boolean query, it greatly improves the filling rate of returned results and the search performance. Only Elasticsearch cluster 7.10.2 supports the PV_GRAPH index.
+
+
+ |
+
+Table 2
+ |
+If Indexing is set to true, CSS provides optional parameters for vector search to achieve higher query performance or precision.
+ |
+
+metric
+ |
+Method of calculating the distance between vectors.
+The value can be:
+- euclidean: Euclidean distance
- inner_product: inner product distance
- cosine: cosine distance
- hamming: Hamming distance, which can be used only when dim_type is set to binary.
+Default value: euclidean
+ |
+
+dim_type
+ |
+Type of the vector dimension value.
+The value can be binary and float (default).
+ |
+
+sub_fields
+ |
+Define the auxiliary scalar field of vectors. Only the keyword type is supported. This parameter must be specified if you need to use the vector and scalar joint filtering feature. It takes effect only when algorithm is set to PV_GRPAH.
+ |
+
+
+
+
+
+Table 2 Optional parametersType
+ |
+Parameter
+ |
+Description
+ |
+
+
+Graph index configuration parameters
+ |
+neighbors
+ |
+Number of neighbors of each vector in a graph index. The default value is 64. A larger value indicates higher query precision. A larger index results in a slower build and query speed.
+Value range: [10, 255]
+ |
+
+shrink
+ |
+Cropping coefficient during HNSW build. The default value is 1.0f.
+Value range: (0.1, 10)
+ |
+
+scaling
+ |
+Scaling ratio of the upper-layer graph nodes during HNSW build. The default value is 50.
+Value range: (0, 128]
+ |
+
+efc
+ |
+Queue size of the neighboring node during HNSW build. The default value is 200. A larger value indicates a higher precision and slower build speed.
+Value range: (0, 100000]
+ |
+
+max_scan_num
+ |
+Maximum number of nodes that can be scanned. The default value is 10000. A larger value indicates a higher precision and slower indexing speed.
+Value range: (0, 1000000]
+ |
+
+PQ index configuration parameters
+ |
+centroid_num
+ |
+Number of cluster centroids of each fragment. The default value is 255.
+Value range: (0, 65535]
+ |
+
+fragment_num
+ |
+Number of fragments. The default value is 0. The plug-in automatically sets the number of fragments based on the vector length.
+Value range: [0, 4096]
+ |
+
+
+
+
+
+
+ Importing Vector DataRun the following command to import vector data. When writing vector data to the my_index index, you need to specify the vector field name and vector data.
+
+ - If the input vector data is a Base64 string encoded using little endian:
When writing binary vectors or high dimensional vectors that have a large number of valid bits, the Base64 encoding format is efficient for data transmission and parsing. POST my_index/_doc
+{
+ "my_vector": "AACAPwAAAEA="
+}
+
+ - To write a large amount of data, bulk operations are recommended.
POST my_index/_bulk
+{"index": {}}
+{"my_vector": [1.0, 2.0], "my_label": "red"}
+{"index": {}}
+{"my_vector": [2.0, 2.0], "my_label": "green"}
+{"index": {}}
+{"my_vector": [2.0, 3.0], "my_label": "red"}
+
+
+ Advanced Cluster Configurations- When importing data offline, you are advised to set refresh_interval of indexes to -1 to disable automatic index refreshing and improve batch write performance.
- You are advised to set number_of_replicas to 0. After the offline data import is complete, you can modify the parameter value as needed.
- The parameters of other advanced functions as follows:
+
Table 3 Cluster parametersParameter
+ |
+Description
+ |
+
+
+native.cache.circuit_breaker.enabled
+ |
+Whether to enable the circuit breaker for off-heap memory.
+Default value: true
+ |
+
+native.cache.circuit_breaker.cpu.limit
+ |
+Upper limit of off-heap memory usage of the vector index.
+For example, if the overall memory of a host is 128 GB and the heap memory occupies 31 GB, the default upper limit of the off-heap memory usage is 43.65 GB, that is, (128 - 31) x 45%. If the off-heap memory usage exceeds its upper limit, the circuit breaker will be triggered.
+Default value: 45%
+ |
+
+native.cache.expire.enabled
+ |
+Whether to enable the cache expiration policy. If this parameter is set to true, some cache items that have not been accessed for a long time will be cleared.
+Value: true or false
+Default value: false
+ |
+
+native.cache.expire.time
+ |
+Expiration time.
+Default value: 24h
+ |
+
+native.vector.index_threads
+ |
+Number of threads used for creating underlying indexes. Each shard uses multiple threads. Set a relatively small value to avoid resource preemption caused by the build queries of too many threads.
+Default value: 4
+ |
+
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299569.html b/docs/css/umn/en-us_topic_0000001528299569.html
new file mode 100644
index 00000000..3300d97d
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299569.html
@@ -0,0 +1,537 @@
+
+
+Freezing an Index
+Precautions- Before freezing an index, ensure no data is being written to it. The index will be set to read only before being frozen, and data write will fail.
- After an index is frozen:
- It becomes read-only.
- The index data will be dumped to OBS. This process occupies network bandwidth.
- The query latency of a dumped index will increase. During aggregation, the latency of processing complex queries and reading a large volume of data is long.
- It cannot be unfrozen. That is, a read-only index cannot be changed to writable.
- After the freezing is complete, the index data in your local disks will be deleted.
+
+
+ Procedure- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- Click Dev Tools in the navigation tree on the left.
- Run the following command to freeze a specified index and dump it to OBS:
POST ${index_name}/_freeze_low_cost
+
+Table 1 Parameter descriptionParameter
+ |
+Description
+ |
+
+
+index_name
+ |
+Name of the index to be frozen.
+ |
+
+
+
+
+Information similar to the following is displayed:
+{
+ "freeze_uuid": "pdsRgUtSTymVDWR_HoTGFw"
+}
+
+Table 2 Response parameterParameter
+ |
+Description
+ |
+
+
+freeze_uuid
+ |
+After an index freezing request is submitted, an asynchronous job will be started. The request returns the asynchronous job ID, which can be used to query the progress of the asynchronous job.
+ |
+
+
+
+
+ After an index freezing request is submitted, data cannot be written to the index. During the index freezing, query requests are not affected. After the freezing is complete, the index is closed and then opened. During this period, the index cannot be queried, and the cluster may be in the red status for a short time. The index is restored after being opened.
+
+ - Run the following command to check the freezing task progress:
GET _freeze_low_cost_progress/${freeze_uuid}
+
+Table 3 Parameter descriptionParameter
+ |
+Description
+ |
+
+
+freeze_uuid
+ |
+Asynchronous task ID, which is obtained in 4.
+ |
+
+
+
+
+Information similar to the following is displayed:
+{
+
+ "stage" : "STARTED",
+ "shards_stats" : {
+ "INIT" : 0,
+ "FAILURE" : 0,
+ "DONE" : 0,
+ "STARTED" : 3,
+ "ABORTED" : 0
+ },
+ "indices" : {
+ "data1" : [
+ {
+ "uuid" : "7OS-G1-tRke2jHZPlckexg",
+ "index" : {
+ "name" : "data1",
+ "index_id" : "4b5PHXJITLaS6AurImfQ9A",
+ "shard" : 2
+ },
+ "start_ms" : 1611972010852,
+ "end_ms" : -1,
+ "total_time" : "10.5s",
+ "total_time_in_millis" : 10505,
+ "stage" : "STARTED",
+ "failure" : null,
+ "size" : {
+ "total_bytes" : 3211446689,
+ "finished_bytes" : 222491269,
+ "percent" : "6.0%"
+ },
+ "file" : {
+ "total_files" : 271,
+ "finished_files" : 12,
+ "percent" : "4.0%"
+ },
+ "rate_limit" : {
+ "paused_times" : 1,
+ "paused_nanos" : 946460970
+ }
+ },
+ {
+ "uuid" : "7OS-G1-tRke2jHZPlckexg",
+ "index" : {
+ "name" : "data1",
+ "index_id" : "4b5PHXJITLaS6AurImfQ9A",
+ "shard" : 0
+ },
+ "start_ms" : 1611972010998,
+ "end_ms" : -1,
+ "total_time" : "10.3s",
+ "total_time_in_millis" : 10359,
+ "stage" : "STARTED",
+ "failure" : null,
+ "size" : {
+ "total_bytes" : 3221418186,
+ "finished_bytes" : 272347118,
+ "percent" : "8.0%"
+ },
+ "file" : {
+ "total_files" : 372,
+ "finished_files" : 16,
+ "percent" : "4.0%"
+ },
+ "rate_limit" : {
+ "paused_times" : 5,
+ "paused_nanos" : 8269016764
+ }
+ },
+ {
+ "uuid" : "7OS-G1-tRke2jHZPlckexg",
+ "index" : {
+ "name" : "data1",
+ "index_id" : "4b5PHXJITLaS6AurImfQ9A",
+ "shard" : 1
+ },
+ "start_ms" : 1611972011021,
+ "end_ms" : -1,
+ "total_time" : "10.3s",
+ "total_time_in_millis" : 10336,
+ "stage" : "STARTED",
+ "failure" : null,
+ "size" : {
+ "total_bytes" : 3220787498,
+ "finished_bytes" : 305789614,
+ "percent" : "9.0%"
+ },
+ "file" : {
+ "total_files" : 323,
+ "finished_files" : 14,
+ "percent" : "4.0%"
+ },
+ "rate_limit" : {
+ "paused_times" : 3,
+ "paused_nanos" : 6057933087
+ }
+ }
+ ]
+ }
+}
+
+Table 4 Response parametersParameter
+ |
+Description
+ |
+
+
+stage
+ |
+Status. Its value can be:
+- INIT: The instance has just started or is being initialized.
- FAILURE: failed
- DONE: complete
- STARTED: started
- ABORTED: Canceled. This field is reserved.
+ |
+
+shards_stats
+ |
+Numbers of shards in each state.
+ |
+
+indices
+ |
+Index status details.
+ |
+
+
+
+
+
+Table 5 Return values of indicesParameter
+ |
+Description
+ |
+
+
+uuid
+ |
+UUID of the freezing operation
+ |
+
+index
+ |
+Index and shard information
+ |
+
+start_ms
+ |
+Start time
+ |
+
+end_ms
+ |
+End time. If no end time is specified, the value -1 is displayed.
+ |
+
+total_time
+ |
+Time spent
+ |
+
+total_time_in_millis
+ |
+Time spent, in milliseconds
+ |
+
+stage
+ |
+Status of the current shard.
+ |
+
+failure
+ |
+Failure cause. If no failure occurs, null is displayed.
+ |
+
+size.total_bytes
+ |
+Size of files to be frozen, in bytes
+ |
+
+size.finished_bytes
+ |
+Frozen bytes
+ |
+
+size.percent
+ |
+Percentage of frozen bytes
+ |
+
+file.total_bytes
+ |
+Number of files to be frozen
+ |
+
+file.finished_bytes
+ |
+Number of frozen files
+ |
+
+file.percent
+ |
+Percentage of frozen files
+ |
+
+rate_limit.paused_times
+ |
+Number of times that freezing is suspended due to rate limit
+ |
+
+rate_limit.paused_nanos
+ |
+Duration of freezing task suspension due to rate limit, in nanoseconds
+ |
+
+
+
+
+The following parameters are added to a frozen index. For details, see Table 6.
+
+Table 6 Frozen index parametersParameter
+ |
+Description
+ |
+
+
+index.frozen_low_cost
+ |
+Indicates whether an index is frozen. The value is true.
+ |
+
+index.blocks.write
+ |
+Indicates whether data writing is denied in a frozen index. The value is true.
+ |
+
+index.store.type
+ |
+Storage type of an index. The value is obs.
+ |
+
+
+
+
+ - After an index is frozen, its data will be cached. Run the following command to check the current cache status: For details about the cache, see Configuring Cache.
GET _frozen_stats
+GET _frozen_stats/${node_id}
+
+Table 7 Parameter descriptionParameter
+ |
+Description
+ |
+
+
+node_id
+ |
+Node ID, which can be used to obtain the cache status of a node.
+ |
+
+
+
+
+Information similar to the following is displayed:
+{
+ "_nodes" : {
+ "total" : 3,
+ "successful" : 3,
+ "failed" : 0
+ },
+ "cluster_name" : "css-zzz1",
+ "nodes" : {
+ "7uwKO38RRoaON37YsXhCYw" : {
+ "name" : "css-zzz1-ess-esn-2-1",
+ "transport_address" : "10.0.0.247:9300",
+ "host" : "10.0.0.247",
+ "ip" : "10.0.0.247",
+ "block_cache" : {
+ "default" : {
+ "type" : "memory",
+ "block_cache_capacity" : 8192,
+ "block_cache_blocksize" : 8192,
+ "block_cache_size" : 12,
+ "block_cache_hit" : 14,
+ "block_cache_miss" : 0,
+ "block_cache_eviction" : 0,
+ "block_cache_store_fail" : 0
+ }
+ },
+ "obs_stats" : {
+ "list" : {
+ "obs_list_count" : 17,
+ "obs_list_ms" : 265,
+ "obs_list_avg_ms" : 15
+ },
+ "get_meta" : {
+ "obs_get_meta_count" : 79,
+ "obs_get_meta_ms" : 183,
+ "obs_get_meta_avg_ms" : 2
+ },
+ "get_obj" : {
+ "obs_get_obj_count" : 12,
+ "obs_get_obj_ms" : 123,
+ "obs_get_obj_avg_ms" : 10
+ },
+ "put_obj" : {
+ "obs_put_obj_count" : 12,
+ "obs_put_obj_ms" : 2451,
+ "obs_put_obj_avg_ms" : 204
+ },
+ "obs_op_total" : {
+ "obs_op_total_ms" : 3022,
+ "obs_op_total_count" : 120,
+ "obs_op_avg_ms" : 25
+ }
+ },
+ "reader_cache" : {
+ "hit_count" : 0,
+ "miss_count" : 1,
+ "load_success_count" : 1,
+ "load_exception_count" : 0,
+ "total_load_time" : 291194714,
+ "eviction_count" : 0
+ }
+ },
+ "73EDpEqoQES749umJqxOzQ" : {
+ "name" : "css-zzz1-ess-esn-3-1",
+ "transport_address" : "10.0.0.201:9300",
+ "host" : "10.0.0.201",
+ "ip" : "10.0.0.201",
+ "block_cache" : {
+ "default" : {
+ "type" : "memory",
+ "block_cache_capacity" : 8192,
+ "block_cache_blocksize" : 8192,
+ "block_cache_size" : 12,
+ "block_cache_hit" : 14,
+ "block_cache_miss" : 0,
+ "block_cache_eviction" : 0,
+ "block_cache_store_fail" : 0
+ }
+ },
+ "obs_stats" : {
+ "list" : {
+ "obs_list_count" : 17,
+ "obs_list_ms" : 309,
+ "obs_list_avg_ms" : 18
+ },
+ "get_meta" : {
+ "obs_get_meta_count" : 79,
+ "obs_get_meta_ms" : 216,
+ "obs_get_meta_avg_ms" : 2
+ },
+ "get_obj" : {
+ "obs_get_obj_count" : 12,
+ "obs_get_obj_ms" : 140,
+ "obs_get_obj_avg_ms" : 11
+ },
+ "put_obj" : {
+ "obs_put_obj_count" : 12,
+ "obs_put_obj_ms" : 1081,
+ "obs_put_obj_avg_ms" : 90
+ },
+ "obs_op_total" : {
+ "obs_op_total_ms" : 1746,
+ "obs_op_total_count" : 120,
+ "obs_op_avg_ms" : 14
+ }
+ },
+ "reader_cache" : {
+ "hit_count" : 0,
+ "miss_count" : 1,
+ "load_success_count" : 1,
+ "load_exception_count" : 0,
+ "total_load_time" : 367179751,
+ "eviction_count" : 0
+ }
+ },
+ "EF8WoLCUQbqJl1Pkqo9-OA" : {
+ "name" : "css-zzz1-ess-esn-1-1",
+ "transport_address" : "10.0.0.18:9300",
+ "host" : "10.0.0.18",
+ "ip" : "10.0.0.18",
+ "block_cache" : {
+ "default" : {
+ "type" : "memory",
+ "block_cache_capacity" : 8192,
+ "block_cache_blocksize" : 8192,
+ "block_cache_size" : 12,
+ "block_cache_hit" : 14,
+ "block_cache_miss" : 0,
+ "block_cache_eviction" : 0,
+ "block_cache_store_fail" : 0
+ }
+ },
+ "obs_stats" : {
+ "list" : {
+ "obs_list_count" : 17,
+ "obs_list_ms" : 220,
+ "obs_list_avg_ms" : 12
+ },
+ "get_meta" : {
+ "obs_get_meta_count" : 79,
+ "obs_get_meta_ms" : 139,
+ "obs_get_meta_avg_ms" : 1
+ },
+ "get_obj" : {
+ "obs_get_obj_count" : 12,
+ "obs_get_obj_ms" : 82,
+ "obs_get_obj_avg_ms" : 6
+ },
+ "put_obj" : {
+ "obs_put_obj_count" : 12,
+ "obs_put_obj_ms" : 879,
+ "obs_put_obj_avg_ms" : 73
+ },
+ "obs_op_total" : {
+ "obs_op_total_ms" : 1320,
+ "obs_op_total_count" : 120,
+ "obs_op_avg_ms" : 11
+ }
+ },
+ "reader_cache" : {
+ "hit_count" : 0,
+ "miss_count" : 1,
+ "load_success_count" : 1,
+ "load_exception_count" : 0,
+ "total_load_time" : 235706838,
+ "eviction_count" : 0
+ }
+ }
+ }
+}
+
+ - Run the following command to reset the cache status:
POST _frozen_stats/reset
+Information similar to the following is displayed:
+{
+ "_nodes" : {
+ "total" : 1,
+ "successful" : 1,
+ "failed" : 0
+ },
+ "cluster_name" : "Es-0325-007_01",
+ "nodes" : {
+ "mqTdk2YRSPyOSXfesREFSg" : {
+ "result" : "ok"
+ }
+ }
+}
+ This command is used to debug performance issues. If you reset the cache status and run this command, you can check the cache command status. You do not need to run this command during service running.
+
+ - Run the following command to check all the frozen indexes:
GET _cat/freeze_indices?stage=${STAGE}
+
+Table 8 Parameter descriptionParameter
+ |
+Description
+ |
+
+
+STAGE
+ |
+Its value can be:
+- start: List of indexes that are being frozen
- done: List of indexes that have been frozen
- unfreeze: List of indexes that are not frozen
- Empty or other values: List of all indexes that are being frozen or have been frozen
+ |
+
+
+
+
+Information similar to the following is displayed:
+green open data2 0bNtxWDtRbOSkS4JYaUgMQ 3 0 5 0 7.9kb 7.9kb
+green open data3 oYMLvw31QnyasqUNuyP6RA 3 0 51 0 23.5kb 23.5kb
+ The parameters and return values of this command are the same as those of _cat/indices of Elasticsearch.
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299573.html b/docs/css/umn/en-us_topic_0000001528299573.html
new file mode 100644
index 00000000..83ef7d2a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299573.html
@@ -0,0 +1,103 @@
+
+
+Key Operations Recorded by CTS
+Cloud Trace Service (CTS) is available on the public cloud platform. With CTS, you can record operations associated with CSS for later query, audit, and backtrack operations.
+ PrerequisitesCTS has been enabled. For details, see Enabling CTS.
+
+ Key Operations Recorded by CTS
+ Table 1 Key operations recorded by CTSOperation
+ |
+Resource Type
+ |
+Event Name
+ |
+
+
+Creating a cluster
+ |
+cluster
+ |
+createCluster
+ |
+
+Deleting a cluster
+ |
+cluster
+ |
+deleteCluster
+ |
+
+Expanding the cluster capacity
+ |
+cluster
+ |
+roleExtendCluster
+ |
+
+Restarting a cluster
+ |
+cluster
+ |
+rebootCluster
+ |
+
+Performing basic configurations for a cluster snapshot
+ |
+cluster
+ |
+updateSnapshotPolicy
+ |
+
+Setting the automatic snapshot creation policy
+ |
+cluster
+ |
+updateAutoSnapshotPolicy
+ |
+
+Upgrading a cluster
+ |
+cluster
+ |
+upgradeCluster
+ |
+
+Retrying the upgrade
+ |
+cluster
+ |
+retryAction
+ |
+
+Manually creating a snapshot
+ |
+snapshot
+ |
+createSnapshot
+ |
+
+Restoring a snapshot
+ |
+snapshot
+ |
+restoreSnapshot
+ |
+
+Deleting a snapshot
+ |
+snapshot
+ |
+deleteSnapshot
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299577.html b/docs/css/umn/en-us_topic_0000001528299577.html
new file mode 100644
index 00000000..6506b923
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299577.html
@@ -0,0 +1,176 @@
+
+
+Flow Control
+Flow control can be implemented via an independent API.
+ - Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run the commands to query traffic control information.
+
Example response:
+{
+ "_nodes" : {
+ "total" : 1,
+ "successful" : 1,
+ "failed" : 0
+ },
+ "cluster_name" : "css-flowcontroller",
+ "nodes" : {
+ "ElBRNCMbTj6L1C-Wke-Dnw" : {
+ "name" : "css-flowcontroller-ess-esn-1-1",
+ "host" : "10.0.0.133",
+ "timestamp" : 1613979513747,
+ "flow_control" : {
+ "transport" : {
+ "concurrent_req" : 0,
+ "rejected_concurrent" : 0,
+ "rejected_new" : 0,
+ "rejected_deny" : 0
+ },
+ "http" : {
+ "concurrent_req" : 0,
+ "rejected_concurrent" : 0,
+ "rejected_new" : 0,
+ "rejected_deny" : 0
+ },
+ "memory" : {
+ "memory_allow" : 41,
+ "memory_rejected" : 0
+ },
+ "cpu": {
+ "rejected_cpu" : 0
+ }
+ "ip_address" : [
+ {
+ "ip" : "/10.0.0.198",
+ "count" : 453
+ },
+ {
+ "ip" : "/198.19.49.1",
+ "count" : 42
+ }
+ ],
+ "url_sample" : [
+ {
+ "url" : "/*/_search?pretty=true",
+ "method" : "GET",
+ "remote_address" : "/10.0.0.198:16763",
+ "count" : 1
+ }
+ ]
+ }
+ }
+}
+In the response, the information of each node is separated. The http field records the numbers of concurrent connections and new connections. The memory records memory flow control statistics. The ip_address field records the recent client IP addresses that are accessed most recently. The url_sample field records the recent URLs that are requested most frequently. The cpu field records CPU flow control statistics.
+
+Table 1 Response parametersParameter
+ |
+Description
+ |
+
+
+concurrent_req
+ |
+Number of TCP connections of a node, which is recorded no matter whether flow control is enabled. This value is similar to the value of current_open of the GET /_nodes/stats/http API but is smaller, because whitelisted IP addresses and internal node IP addresses are not counted.
+ |
+
+rejected_concurrent
+ |
+Number of concurrent connections rejected during HTTP flow control. This value is not cleared when HTTP flow control is disabled.
+ |
+
+rejected_new
+ |
+Number of new connections rejected during HTTP flow control. This value is not cleared when HTTP flow control is disabled.
+ |
+
+rejected_deny
+ |
+Number of requests rejected based on the blacklist during HTTP flow control. This value is not cleared when HTTP flow control is disabled.
+ |
+
+memory_allow
+ |
+Number of allowed requests during memory flow control. This parameter takes effect when memory flow control is enabled, and its value is not cleared after memory flow control is disabled. The requests from the paths in the allow_path whitelist are not recorded. If allow_path is set to **, no requests are recorded.
+ |
+
+memory_rejected
+ |
+Number of rejected requests during memory flow control. This parameter takes effect when memory flow control is enabled, and its value is not cleared after memory flow control is disabled. The requests from the paths in the allow_path whitelist are not recorded. If allow_path is set to **, no requests are recorded.
+ |
+
+rejected_cpu
+ |
+Number of requests rejected when the CPU flow control threshold is exceeded. This parameter takes effect when CPU flow control is enabled, and its value is not cleared after CPU flow control is disabled.
+ |
+
+ip_address
+ |
+IP addresses and the number of requests. For details, see Table 2.
+ |
+
+url_sample
+ |
+Request path sampling. The number of URLs of a request are collected based on the configured time and sampling interval. For details, see Table 3.
+ |
+
+
+
+
+
+Table 2 ip_addressParameter
+ |
+Description
+ |
+
+
+ip
+ |
+Source IP address for accessing the node.
+ |
+
+method
+ |
+Number of access requests from an IP address.
+ |
+
+
+
+
+
+Table 3 url_sampleParameter
+ |
+Description
+ |
+
+
+url
+ |
+Request URL
+ |
+
+method
+ |
+Method corresponding to the request path
+ |
+
+remote_address
+ |
+Source IP address and port number of the request
+ |
+
+count
+ |
+How many times a path is sampled
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299581.html b/docs/css/umn/en-us_topic_0000001528299581.html
new file mode 100644
index 00000000..ee46d31b
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299581.html
@@ -0,0 +1,88 @@
+
+
+Managing Failed Tasks
+In the Failed Tasks dialog box, you can view the failed tasks related to a cluster, such as failing to create, restart, scale out, back up, or restore a cluster. In addition, you can view the failure cause of each task and choose to delete one or all failed tasks.
+ Viewing Failed Tasks- Log in to the CSS management console.
- Click Clusters to switch to the Clusters page. Click the digit next to Failed Tasks to switch to the Failed Tasks dialog box.
Figure 1 Clicking the digit next to Failed Tasks
+ - In the Failed Tasks dialog box, view all failed tasks of the current account. The following information about the failed tasks is displayed: Name/ID, Task Status, and Failure Time.
- Click the question mark in the Task Status column to view the failure cause of a task. You are advised to troubleshoot faults based on failure causes. For details about failure causes, see Error Code.
Figure 2 Viewing the failure cause of a task
+
+
+ Deleting a Failed TaskYou can delete one or all failed tasks at a time.
+ - To delete a failed task, perform the following operations: Locate the row that contains the target task and click Delete in the Operation column. In the displayed dialog box, confirm the task you want to delete and click Yes.
- To delete all failed tasks, perform the following operations: In the Failed Tasks dialog box, click Delete All. In the displayed dialog box, confirm the information about all failed tasks and click Yes.
+ Figure 3 Deleting a failed task
+
+ Error Code
+ Table 1 Failure causesError Code
+ |
+Failure Cause
+ |
+Solution
+ |
+
+
+CSS.6000
+ |
+Failed to create the cluster because of an internal error. Please try again later. If the problem persists, contact customer service.
+ |
+Please try again later or contact customer service.
+ |
+
+CSS.6001
+ |
+Failed to scale out the cluster because of an internal error. Please try again later. If the problem persists, contact customer service.
+ |
+
+CSS.6002
+ |
+Failed to restart the cluster because of an internal error. Please try again later. If the problem persists, contact customer service.
+ |
+
+CSS.6003
+ |
+Failed to restore the cluster because of an internal error. Please try again later. If the problem persists, contact customer service.
+ |
+
+CSS.6004
+ |
+Failed to create the node because of ECS exceptions (<ECS error code>). Please try again later. If the problem persists, contact customer service.
+ NOTE: <ECS error code> indicates the error information reported by ECS. For details about the cause and solution, see ECS Error Code Description.
+
+ |
+
+CSS.6005
+ |
+Failed to initialize the service because of an internal error. Please try again later. If the problem persists, contact customer service.
+ |
+
+CSS.6007
+ |
+Failed to create the snapshot because of an internal error. Please try again later. If the problem persists, contact customer service.
+ |
+
+CSS.6008
+ |
+Failed to create the snapshot because the OBS bucket you select does not exist or has been deleted.
+ |
+Modify the OBS bucket.
+ |
+
+CSS.6009
+ |
+Failed to restore the snapshot because the OBS bucket you select does not exist or has been deleted.
+ |
+
+CSS.6010
+ |
+Failed to restore the snapshot because the OBS object does not exist or has been deleted.
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299585.html b/docs/css/umn/en-us_topic_0000001528299585.html
new file mode 100644
index 00000000..300eae71
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299585.html
@@ -0,0 +1,68 @@
+
+
+Changing AZs
+CSS supports cross-AZ deployment. You can add an AZ to obtain more resources or improve cluster availability, and can migrate your current AZ to one with higher specifications. This section describes how to add or migrate your AZs.
+ DescriptionYou can Add AZ or Migrate AZ. - Add AZ: Add one or two AZs to a single-AZ cluster, or add an AZ to a dual-AZ cluster to improve cluster availability.
- Migrate AZ: Completely migrate data from the current AZ to another AZ that has sufficient resources.
+
+
+ Prerequisites- Ensure that an AZ with sufficient resources exists.
- The target cluster is available and has no tasks in progress.
- Make sure that no non-standard operations have been performed in the cluster. If you have made non-standard modifications, such as modifying return routes, system parameters, and Kibana configurations, these modifications will be lost after the AZ change and your services may be affected.
+
+ Constraints- To ensure service continuity, the total number of data nodes and cold data nodes in a cluster must be greater than or equal to 3.
- During the change, nodes are brought offline one by one and then new nodes are created. Ensure that the disk capacity of other nodes can store all the data of the node after a single node is brought offline.
- To prevent backup allocation failures after a node is brought offline during the change, ensure that the maximum number of primary and standby index shards of an index can be allocated to the remaining data nodes and cold data nodes. That is, the maximum number of primary and standby shards of an index plus 1 is less than or equal to the total number of data nodes and cold data nodes in the current cluster.
- You are advised to back up data before the change to prevent data loss caused by upgrade faults.
- Before a change completes, some nodes may have been moved to a new AZ. In this case, the AZs before and after the change are both displayed. After the change succeeds, the new AZs and their nodes will be displayed properly.
- When adding AZs, the current AZ must be retained in the change. When adding one or two AZs to a single-AZ cluster, you must change AZs for all nodes at the same time. When adding an AZ to a dual-AZ cluster, you can change AZs for a single type of nodes or all nodes in a cluster at a time. For example, in a cluster using the dual-AZ architecture, you can use the three-AZ architecture for master nodes alone. During HA modification, the nodes with the smallest configurations are modified to rebuild the cluster. After the HA modification is complete, the YML configuration of the nodes that are not modified is also updated. You need to restart the cluster to make the modification take effect.
- When migrating an AZ, you can select only one target AZ. You can migrate AZs for a single type of nodes or all nodes in a cluster at a time. For example, in a cluster with two AZs, you can migrate the AZ of the master node to the other AZ. After adding AZs, you need to restart the cluster to make the modification take effect.
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- Click the Change AZ tab.
- On the Change AZ page, set parameters.
+
Table 1 Parameters for changing AZsParameter
+ |
+Description
+ |
+
+
+Operation Type
+ |
+- Add AZ: Add one or two AZs to a single-AZ cluster, or add an AZ to a dual-AZ cluster.
During HA modification, the nodes with the smallest configurations are modified to rebuild the cluster. After the HA modification is complete, the YML configuration of the nodes that are not modified is also updated. You need to restart the cluster to make the modification take effect.
+ - Migrate AZ: Migrate data from one AZ to another.
After adding AZs, you need to restart the cluster to make the modification take effect.
+
+ |
+
+Node Type
+ |
+Select a type of node or All nodes to change their AZ.
+ NOTE: When adding one or two AZs to a single-AZ cluster, you can only select All nodes to change AZs for all nodes at a time.
+
+ |
+
+Current AZ
+ |
+Current AZ of a cluster
+ |
+
+Target AZ
+ |
+Target AZ.
+- Add AZ: Select up to three AZs, which must include all your current AZs.
- Migrate AZ: Select only one target AZ, which cannot be your current AZ.
+ |
+
+Agency
+ |
+Select an IAM agency to grant the current account the permission to change AZs.
+If no agencies are available, click Create IAM Agency to go to the IAM console and create an agency.
+ NOTE: The selected agency must be authorized with the Tenant Administrator or VPC Administrator policy.
+
+ |
+
+
+
+
+ - Click Submit. Determine whether to check for the backup of all indexes and click OK to start the change.
- The current AZ change task is displayed in the task list. If the task status is Running, expand the task list and click View Progress to view the progress details.
If the task status is Failed, you can retry or terminate the task.
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299597.html b/docs/css/umn/en-us_topic_0000001528299597.html
new file mode 100644
index 00000000..9d610592
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299597.html
@@ -0,0 +1,84 @@
+
+
+Scaling in a Cluster
+If a cluster can process existing data without fully using its resources, you can scale in the cluster to reduce costs. You are advised to scale in clusters during off-peak hours.
+ PrerequisitesThe target cluster is available and has no tasks in progress.
+
+ Constraints- Only the number of nodes can be modified during cluster scale-in. The node specifications and node storage capacity cannot be modified. You can modify node specifications by referring to Changing Specifications. You can modify node storage capacity by referring to Scaling Out a Cluster.
- If you change the number and storage capacity of a specified type of node, nodes in other types will not be changed.
- Ensure that the disk usage after scale-in is less than 80% and each AZ of each node type has at least one node.
- When scaling in a cluster, the data in the node to be deleted is migrated to other nodes. The timeout threshold for data migration is five hours. If data migration is not complete within 5 hours, the cluster scale-in fails. You are advised to perform scale-in for multiple times when the cluster has huge amounts of data.
- For a cluster without master nodes, the number of remaining data nodes (including cold data nodes and other types of nodes) after scale-in must be greater than half of the original node number, and greater than the maximum number of index replicas.
- For a cluster with master nodes, the number of removed master nodes in a scale-in must be fewer than half of the original master node number. After scale-in, there has to be an odd number of master nodes, and there has to be at least three of them.
- A cluster with two nodes cannot be scaled in. You can create a cluster using a single node.
- The quota of nodes in different types varies. For details, see Table 1.
+
Table 1 Number of nodes in different typesNode Type
+ |
+Number
+ |
+
+
+ess
+ |
+ess: 1-32
+ |
+
+ess, ess-master
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ |
+
+ess, ess-client
+ |
+ess: 1-32
+ess-client: 1-32
+ |
+
+ess, ess-cold
+ |
+ess: 1-32
+ess-cold: 1-32
+ |
+
+ess, ess-master, ess-client
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-client: 1-32
+ |
+
+ess, ess-master, ess-cold
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-cold: 1-32
+ |
+
+ess, ess-client, ess-cold
+ |
+ess: 1-32
+ess-client: 1-32
+ess-cold: 1-32
+ |
+
+ess, ess-master, ess-client, ess-cold
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-client: 1-32
+ess-cold: 1-32
+ |
+
+Details about the four node types: - ess: the default node type that is mandatory for cluster creation. The other three node types are optional.
- ess-master: master node
- ess-client: client node
- ess-cold: cold data node
+
+ |
+
+
+
+
+
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, choose the Scale Cluster tab and click Scale in to set parameters.
- Action: Select Scale in.
- Resources: The changed amount of resources.
- Nodes: The number of the default data nodes. For details about the value range that can be changed, see Table 1.
+ - Click Next.
- Confirm the information and click Submit.
- Click Back to Cluster List to switch to the Clusters page. The Task Status is Scaling in. When Cluster Status changes to Available, the cluster has been successfully scaled in.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299601.html b/docs/css/umn/en-us_topic_0000001528299601.html
new file mode 100644
index 00000000..e55df4ef
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299601.html
@@ -0,0 +1,111 @@
+
+
+Configuring YML Parameters
+You can modify the elasticsearch.yml file.
+ Modifying Parameter Configurations- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster. The cluster information page is displayed.
- Click Parameter Configurations and click Edit to modify module parameters as required.
+
Table 1 Module parametersModule Name
+ |
+Parameter
+ |
+Description
+ |
+
+
+Cross-domain Access
+ |
+http.cors.allow-credentials
+ |
+Whether to return the Access-Control-Allow-Credentials of the header during cross-domain access
+Value: true or false
+Default value: false
+ |
+
+http.cors.allow-origin
+ |
+Origin IP address allowed for cross-domain access, for example, 122.122.122.122:9200
+ |
+
+http.cors.max-age
+ |
+Cache duration of the browser. The cache is automatically cleared after the time range you specify.
+Unit: s
+Default value: 1,728,000
+ |
+
+http.cors.allow-headers
+ |
+Headers allowed for cross-domain access, including X-Requested-With, Content-Type, and Content-Length. Use commas (,) and spaces to separate headers.
+ |
+
+http.cors.enabled
+ |
+Whether to allow cross-domain access
+Value: true or false
+Default value: false
+ |
+
+http.cors.allow-methods
+ |
+Methods allowed for cross-domain access, including OPTIONS, HEAD, GET, POST, PUT, and DELETE. Use commas (,) and spaces to separate methods.
+ |
+
+Reindexing
+ |
+reindex.remote.whitelist
+ |
+Configured for migrating data from the current cluster to the target cluster through the reindex API. The example value is 122.122.122.122:9200.
+ |
+
+Custom Cache
+ |
+indices.queries.cache.size
+ |
+Cache size in the query phase
+Value range: 1 to 100
+Unit: %
+Default value: 10%
+ |
+
+Queue Size in a Thread Pool
+ |
+thread_pool.bulk.queue_size
+ |
+Queue size in the bulk thread pool. The value is an integer. You need to customize this parameter.
+Default value: 200
+ |
+
+thread_pool.write.queue_size
+ |
+Queue size in the write thread pool. The value is an integer. You need to customize this parameter.
+Default value: 200
+ |
+
+thread_pool.force_merge.size
+ |
+Queue size in the force merge thread pool. The value is an integer.
+Default value: 1
+ |
+
+Customize
+ |
+You can add parameters based on your needs.
+ |
+Customized parameters
+ NOTE: - Enter multiple values in the format as [value1, value2, value3...].
- Separate values by commas (,) and spaces.
- Colons (:) are not allowed.
+
+ |
+
+
+
+
+ - After the modification is complete, click Submit.In the displayed Submit Configuration dialog box, select the box indicating "I understand that the modification will take effect after the cluster is restarted." and click Yes.
If the Status is Succeeded in the parameter modification list, the modification has been saved. Up to 20 modification records can be displayed.
+ - Return to the cluster list and choose More > Restart in the Operation column to restart the cluster and make the modification take effect.
- You need to restart the cluster after modification, or Configuration unupdated will be displayed in the Task Status column on the Clusters page.
- If you restart the cluster after the modification, and Task Status displays Configuration error, the parameter configuration file fails to be modified.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299609.html b/docs/css/umn/en-us_topic_0000001528299609.html
new file mode 100644
index 00000000..e02a8d79
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299609.html
@@ -0,0 +1,86 @@
+
+
+(Optional) Pre-Building and Registering a Center Point Vector
+When you perform operations in Creating a Vector Index, if IVF_GRAPH and IVF_GRAPH_PQ index algorithms are selected, you need to pre-build and register the center point vector.
+ ContextThe vector index acceleration algorithms IVF_GRAPH and IVF_GRAPH_PQ are suitable for ultra-large-scale computing. These two algorithms allow you to narrow down the query range by dividing a vector space into subspaces through clustering or random sampling. Before pre-build, you need to obtain all center point vectors by clustering or random sampling.
+ Then, pre-construct and register the center point vectors to create the GRAPH or GRAPH_PQ index and register them with the Elasticsearch cluster. All nodes in the cluster can share the index file. Reuse of the center index among shards can effectively reduce the training overhead and the number of center index queries, improving the write and query performance.
+
+ Procedure- On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- Click Dev Tools in the navigation tree on the left.
- Create a center point index table.
- For example, if the created index is named my_dict, number_of_shards of the index must be set to 1. Otherwise, the index cannot be registered.
- If you want to use the IVF_GRAPH index, set algorithm of the center point index to GRAPH.
- If you want to use the IVF_GRAPH_PQ index, set algorithm of the center point index to GRAPH_PQ.
+PUT my_dict
+ {
+ "settings": {
+ "index": {
+ "vector": true
+ },
+ "number_of_shards": 1,
+ "number_of_replicas": 0
+ },
+ "mappings": {
+ "properties": {
+ "my_vector": {
+ "type": "vector",
+ "dimension": 2,
+ "indexing": true,
+ "algorithm": "GRAPH",
+ "metric": "euclidean"
+ }
+ }
+ }
+ }
+ - Write the center point vector to the created index.
Write the center point vector obtained through sampling or clustering into the created my_dict index by referring to Importing Vector Data.
+ - Call the registration API.
Register the created my_dict index with a Dict object with a globally unique identifier name (dict_name).
+PUT _vector/register/my_dict
+ {
+ "dict_name": "my_dict"
+ }
+ - Create an IVF_GRAPH or IVF_GRAPH_PQ index.
You do not need to specify the dimension and metric information. Simply specify the registered dictionary name.
+PUT my_index
+ {
+ "settings": {
+ "index": {
+ "vector": true
+ }
+ },
+ "mappings": {
+ "properties": {
+ "my_vector": {
+ "type": "vector",
+ "indexing": true,
+ "algorithm": "IVF_GRAPH",
+ "dict_name": "my_dict",
+ "offload_ivf": false
+ }
+ }
+ }
+ }
+
+Table 1 Field mappings parametersParameter
+ |
+Description
+ |
+
+
+dict_name
+ |
+Specifies the name of the depended central point index. The vector dimension and measurement metric of the index are the same as those of the Dict index.
+ |
+
+offload_ivf
+ |
+Unloads the IVF inverted index implemented by the underlying index to Elasticsearch. In this way, the use of non-heap memory and the overhead of write and merge operations are reduced. However, the query performance also deteriorates. You can use the default value.
+Value: true or false
+Default value: false
+ |
+
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299613.html b/docs/css/umn/en-us_topic_0000001528299613.html
new file mode 100644
index 00000000..2bbfd029
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299613.html
@@ -0,0 +1,68 @@
+
+
+Cluster List Overview
+The cluster list displays all CSS clusters. If there are a large number of clusters, these clusters will be displayed on multiple pages. You can view clusters of all statuses from the cluster list.
+ Clusters are listed in chronological order by default in the cluster list, with the most recent cluster displayed at the top. Table 1 shows the cluster parameters.
+ In the upper right corner of the cluster list, you can enter the cluster name, or cluster ID and click to search for a cluster. You can also click in the upper right corner to refresh the cluster list. Click to download the cluster list.
+
+ Table 1 Cluster list parameter descriptionParameter
+ |
+Description
+ |
+
+
+Name/ID
+ |
+Name and ID of a cluster. You can click a cluster name to switch to the Basic Information page. The cluster ID is automatically generated by the system and uniquely identifies a cluster.
+ |
+
+Cluster Status
+ |
+Status of a cluster. For details about the cluster status, see Viewing the Cluster Runtime Status and Storage Capacity Status.
+ |
+
+Task Status
+ |
+Status of a task, such as cluster restart, cluster capacity expansion, cluster backup, and cluster restoration.
+ |
+
+Version
+ |
+Elasticsearch version of the cluster.
+ |
+
+Created
+ |
+Time when the cluster is created.
+ |
+
+Enterprise Project
+ |
+Enterprise project that a cluster belongs to.
+ |
+
+Private Network Address
+ |
+Private network address and port number of the cluster. You can use these parameters to access the cluster. If the cluster has multiple nodes, the private network addresses and port numbers of all nodes are displayed.
+ |
+
+Billing Mode
+ |
+Billing mode of a cluster.
+ |
+
+Operation
+ |
+Operations that can be performed on a cluster, including accessing Kibana, checking metrics, restarting a cluster, and deleting a cluster. If an operation is not allowed, the button is gray.
+ |
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299617.html b/docs/css/umn/en-us_topic_0000001528299617.html
new file mode 100644
index 00000000..61e85d40
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299617.html
@@ -0,0 +1,22 @@
+
+
+Description
+Image recognition and retrieval, video search, and personalized recommendation impose high requirements on the latency and accuracy of high-dimensional space vector retrieval. To facilitate large-scale vector search, CSS integrates the vector search feature powered by vector search engine and the Elasticsearch plug-in mechanism.
+ PrinciplesVector search works in a way similar to traditional search. To improve vector search performance, we need to:
+ - Narrow down the matched scope
Similar to traditional text search, vector search use indexes to accelerate the search instead of going through all data. Traditional text search uses inverted indexes to filter out irrelevant documents, whereas vector search creates indexes for vectors to bypass irrelevant vectors, narrowing down the search scope.
+ - Reduce the complexity of calculating a single vector
The vector search method can quantize and approximate high dimensional vectors first. By doing this, you can acquire a smaller and more relevant data set. Then more sophisticated algorithms are applied to this smaller data set to perform computation and sorting. This way, complex computation is performed on only part of the vectors, and efficiency is improved.
+
+ Vector search means to retrieve the k-nearest neighbors (KNN) to the query vector in a given vector data set by using a specific measurement method. Generally, CSS only focuses on Approximate Nearest Neighbor (ANN), because a KNN search requires excessive computational resources.
+
+ FunctionsThe engine integrates a variety of vector indexes, such as brute-force search, Hierarchical Navigable Small World (HNSW) graphs, product quantization, and IVF-HNSW. It also supports multiple similarity calculation methods, such as Euclidean, inner product, cosine, and Hamming. The recall rate and retrieval performance of the engine are better than those of open-source engines. It can meet the requirements for high performance, high precision, low costs, and multi-modal computation.
+ The search engine also supports all the capabilities of the native Elasticsearch, including distribution, multi-replica, error recovery, snapshot, and permission control. The engine is compatible with the native Elasticsearch ecosystem, including the cluster monitoring tool Cerebro, the visualization tool Kibana, and the real-time data ingestion tool Logstash. Several client languages, such as Python, Java, Go, and C++, are supported.
+
+ Constraints- Only Elasticsearch clusters of versions 7.6.2 and 7.10.2 and OpenSearch clusters of version 1.3.6 support vector search.
- The vector search plug-in performs in-memory computing and requires more memory than common indexes do. It is recommended that the memory of the cluster node be greater than or equal to 8 GB and the cluster computing specifications be memory-optimized.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299621.html b/docs/css/umn/en-us_topic_0000001528299621.html
new file mode 100644
index 00000000..d1ab20e6
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299621.html
@@ -0,0 +1,18 @@
+
+
+Binding an Enterprise Project
+You can create enterprise projects based on your organizational structure. Then you can manage resources across different regions by enterprise project, add users and user groups to enterprise projects, and grant different permissions to the users and user groups. This section describes how to bind a CSS cluster to an enterprise project and how tp modify an enterprise project.
+ PrerequisitesBefore binding an enterprise project, you have created an enterprise project.
+
+ Binding an Enterprise ProjectWhen creating a cluster, you can bind an existing enterprise project to the cluster, or click View Enterprise Project to go to the enterprise project management console and create a new project or view existing projects.
+
+ Modifying an Enterprise ProjectFor a cluster that has been created, you can modify its enterprise project based on the site requirements.
+ - Log in to the CSS management console.
- In the navigation pane on the left, select a cluster type. The cluster management page is displayed.
- In the cluster list on the displayed page, click the target cluster name to switch to the Cluster Information page.
- On the Cluster Information page, click the enterprise project name on the right of Enterprise Project. The project management page is displayed.
- On the Resources tab page, select the region of the current cluster, and select CSS for Service. In this case, the corresponding CSS cluster is displayed in the resource list.
- Select the cluster whose enterprise project you want to modify and click Remove.
- On the Remove Resource page, specify Mode and select Destination Enterprise Project, and click OK.
- After the resource is removed, you can view the modified enterprise project information on the Clusters page.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299625.html b/docs/css/umn/en-us_topic_0000001528299625.html
new file mode 100644
index 00000000..2733127d
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299625.html
@@ -0,0 +1,22 @@
+
+
+Kibana Platform
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528299629.html b/docs/css/umn/en-us_topic_0000001528299629.html
new file mode 100644
index 00000000..96cc772b
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528299629.html
@@ -0,0 +1,57 @@
+
+
+Accessing a Cluster from a Public Network
+You can access a security cluster (Elasticsearch clusters in version 6.5.4 or later support the security mode) that has the HTTPS access enabled through the public IP address provided by the system.
+ By default, CSS uses a shared load balancer for public network access. You can use a dedicated load balancer to improve performance. For details about its configuration, see Connecting to a Dedicated Load Balancer.
+ If public network access is enabled for CSS, then EIP and bandwidth resources will be used and billed.
+
+ Configuring Public Network Access- Log in to the CSS management console.
- On the Create Cluster page, enable Security Mode. Set the administrator password and enable HTTPS access.
- Select Automatically assign for Public IP Address and set related parameters.
+
Table 1 Public network access parametersParameter
+ |
+Description
+ |
+
+
+Bandwidth
+ |
+Bandwidth for accessing Kibana with the public IP address
+ |
+
+Access Control
+ |
+If you disable this function, all IP addresses can access the cluster through the public IP address. If you enable access control, only IP addresses in the whitelist can access the cluster through the public IP address.
+ |
+
+Whitelist
+ |
+IP address or IP address range allowed to access a cluster. Use commas (,) to separate multiple addresses. This parameter can be configured only when Access Control is enabled.
+ |
+
+
+
+
+
+
+ Managing Public Network AccessYou can configure, modify, view the public network access of, or disassociate the public IP address from a cluster.
+ - Log in to the CSS management console.
- On the Clusters page, click the name of the target cluster. On the Basic Information page that is displayed, manage the public network access configurations.
- Configuring public network access
If you enabled HTTPS but did not configure the public network access during security cluster creation, you can configure it on the Basic Information page after configuring the cluster.
+Click Associate next to Public IP Address, set the access bandwidth, and click OK.
+If the association fails, wait for several minutes and try again.
+ - Modifying public network access
For a cluster for which you have configured public network access, you can click Edit next to Bandwidth to modify the bandwidth, or you can click Set next to Access Control to set the access control function and the whitelist for access.
+ - Viewing public network access
On the Basic Information page, you can view the public IP address associated with the current cluster.
+ - Disassociating a public IP address from a cluster
To disassociate the public IP address, click Disassociate next to Public IP Address.
+
+
+
+ Accessing a Cluster Through the Public IP AddressAfter configuring the public IP address, you can use it to access the cluster.
+ For example, run the following cURL commands to view the index information in the cluster. In this example, the public access IP address of one node in the cluster is 10.62.179.32 and the port number is 9200.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379249.html b/docs/css/umn/en-us_topic_0000001528379249.html
new file mode 100644
index 00000000..54ff21eb
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379249.html
@@ -0,0 +1,19 @@
+
+
+Read/Write Splitting
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379253.html b/docs/css/umn/en-us_topic_0000001528379253.html
new file mode 100644
index 00000000..44c4dce8
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379253.html
@@ -0,0 +1,29 @@
+
+
+Overview
+You can scale in or out a cluster and change cluster specifications. In this way, you can improve cluster efficiency and reduce O&M costs.
+ Scaling Out a Cluster- If a data node (ess) processes many data writing and querying requests and responds slowly, you can expand its storage capacity to improve its efficiency. If some nodes turn unavailable due to the excessive data volume or misoperations, you can add new nodes to ensure the cluster availability.
- Cold data nodes (ess-cold) are used to share the workload of data nodes. To prevent cold data loss, you can expand the storage capacity of the cold data node or add new ones.
+
+ Changing Specifications- If the allocation of new indexes or shards takes too long or the node coordination and scheduling are inefficient, you can change the master node (ess-master) specifications.
- If too many tasks need to be distributed or too many results have been aggregated, you can change the client node (ess-client) specifications.
- If the speed of data writing and query decreases suddenly, you can change the data node (ess) specifications.
- If cold data query becomes slow, you can change the cold node (ess-cold) specifications.
+
+ Scaling in a Cluster- If a cluster can process existing data without fully using its resources, you can scale in the cluster to reduce costs.
+
+ Removing Specified Nodes- If a cluster can process existing data without fully using its nodes, you can remove one or more specified nodes from the cluster to reduce costs.
+
+ Replacing a Specified Node- If a node in the cluster is faulty, you can create a new node with the same specifications to replace it.
+
+ Adding Master/Client Nodes- If the workloads on the data plane of a cluster increase, you can dynamically scale the cluster by adding master/client nodes.
+
+ Changing the Security Mode
+ After a cluster is created, its security mode can be changed using the following methods: - Change a non-security cluster to a security cluster that uses HTTP or HTTPS protocol.
- Change a security cluster that uses HTTP or HTTPS protocol to a non-security cluster.
- Change the protocol of a security cluster.
+
+ Changing AZs
+ You can Add AZ or Migrate AZ. - Add AZ: Add one or two AZs to a single-AZ cluster, or add an AZ to a dual-AZ cluster to improve cluster availability.
- Migrate AZ: Completely migrate data from the current AZ to another AZ that has sufficient resources.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379257.html b/docs/css/umn/en-us_topic_0000001528379257.html
new file mode 100644
index 00000000..f9bf6d3e
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379257.html
@@ -0,0 +1,22 @@
+
+
+Context
+Feature DescriptionCSS can control traffic at the node level. You can configure the blacklist and whitelist, the maximum concurrent HTTP connections, and the maximum HTTP connections for a node. You can also configure backpressure based on client traffic in the node memory and block access in one click. CSS can also collect statistics on node access IP addresses and URIs. Each function has an independent control switch, which is disabled by default. To restore default values of parameters, set them to null.
+ After the client write traffic backpressure and control is enabled, large requests will be rejected when too much node heap memory has been occupied. This function prevents nodes from being suspended and reduces the risk of node unavailability.
+ - HTTP/HTTPS flow control:
- You can control client IP address access by setting IP addresses and subnets in HTTP/HTTPS blacklist or whitelist. If an IP address is in the blacklist, the client is disconnected and all its request are rejected. Whitelist rules take precedence over blacklist rules. If a client IP address exists in both the blacklist and whitelist, the client request will not be rejected.
- HTTP/HTTPS concurrent connection flow control limits the total number of HTTP connections to a node per second.
- HTTP/HTTPS new connection flow control limits the number of new connections to a node.
+ - Memory flow control
Memory flow control limits the write traffic based on the node heap memory. You can back pressure requests to the client, trigger resource recycling as much as possible, and then accept requests based on the available heap memory.
+ - Request sampling
Request sampling can record the access of client IP addresses and the type of requests from the client. Based on the statistics, you can identify the access traffic of client IP addresses and analyze the client write and query requests.
+ - One-click traffic blocking
One-click access blocking can block all the access traffic of a node, excluding the traffic from Kibana and CSS O&M and monitoring APIs.
+ - Flow control
Flow control provides an independent API for viewing traffic statistics and records the number of current client connections and client backpressure connections. You can evaluate the flow control threshold and analyze the cluster loads based on the statistics.
+ - Access logs
Access logs record the URLs and bodies of HTTP/HTTPS requests received by nodes within a period of time. You can analyze the current traffic pressure based on the access logs.
+
+
+ Constraints- Currently, only Elasticsearch clusters of versions 7.6.2 and 7.10.2 support the traffic control feature.
- Elasticsearch clusters of versions 7.6.2 and 7.10.2 created after February 2023 support only traffic control 2.0. Clusters created before February 2023 support only traffic control 1.0. For details, see Flow Control 1.0.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379265.html b/docs/css/umn/en-us_topic_0000001528379265.html
new file mode 100644
index 00000000..94a156e6
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379265.html
@@ -0,0 +1,70 @@
+
+
+Matching Index Synchronization
+The request URL and request body parameters are as follows:
+ PUT auto_sync/pattern/{pattern_name}
+
+ Table 1 Request body parametersParameter
+ |
+Description
+ |
+
+
+remote_cluster
+ |
+Name of the primary cluster. The default name is leader1. You can change the name by configuring the primary cluster information.
+ |
+
+remote_index_patterns
+ |
+Mode of the index to be synchronized in the primary cluster. The wildcard (*) is supported.
+ |
+
+local_index_pattern
+ |
+Mode of the index being synchronized in the secondary cluster. The template can be replaced. For example, if this parameter is set to {{remote_index}}-sync, the index log1 change to log1-sync after synchronization.
+ |
+
+apply_exist_index
+ |
+Whether to synchronize existing indexes in the primary cluster. The default value is true.
+ |
+
+settings
+ |
+Index settings of the index being synchronized
+ |
+
+
+
+
+ The following are two examples:
+ 1. Synchronize a single index from the primary cluster to the secondary cluster.
+ PUT auto_sync/pattern/pattern1
+{
+ "remote_cluster": "leader1",
+ "remote_index_patterns": "log*",
+ "local_index_pattern": "{{remote_index}}-sync",
+ "apply_exist_index": true
+}
+ 2. Synchronize a single index from the primary cluster to the secondary cluster and modify the index configurations.
+ PUT auto_sync/pattern/pattern1
+{
+ "remote_cluster": "leader1",
+ "remote_index_patterns": "log*",
+ "local_index_pattern": "{{remote_index}}-sync",
+ "apply_exist_index": true,
+ "settings": {
+ "number_of_replicas": 4
+ }
+}
+ The following index configurations cannot be modified:
+ - number_of_shards
- version.created
- uuid
- creation_date
- soft_deletes.enabled
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379273.html b/docs/css/umn/en-us_topic_0000001528379273.html
new file mode 100644
index 00000000..5395dd8f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379273.html
@@ -0,0 +1,83 @@
+
+
+Creating a User and Granting Permissions by Using Kibana
+CSS uses the opendistro_security plug-in to provide security cluster capabilities. The opendistro_security plug-in is built based on the RBAC model. RBAC involves three core concepts: user, action, and role. RBAC simplifies the relationship between users and actions, simplifies permission management, and facilitates permission expansion and maintenance. The following figure shows the relationship between the three.
+ Figure 1 User, action, and role
+
+ Table 1 ParametersParameter
+ |
+Description
+ |
+
+
+User
+ |
+A user can send operation requests to Elasticsearch clusters. The user has credentials such as username and password, and zero or multiple backend roles and custom attributes.
+ |
+
+Role
+ |
+A role is a combination of permissions and action groups, including operation permissions on clusters, indexes, documents, or fields.
+ |
+
+Permission
+ |
+Single permission, for example, creating an index (for example, indices:admin/create)
+ |
+
+Role mapping
+ |
+A user will be assigned a role after successful authentication. Role mapping is to map a role to a user (or a backend role). For example, the mapping from kibana_user (role) to jdoe (user) means that John Doe obtains all permissions of kibana_user after being authenticated by kibana_user. Similarly, the mapping from all_access (role) to admin (backend role) means that any user with the backend role admin (from the LDAP/Active Directory server) has all the permissions of role all_access after being authenticated. You can map a role to multiple users or backend roles.
+ |
+
+Action group
+ |
+A group of permissions. For example, the predefined SEARCH action group grants roles to use _search and _msearchAPI.
+ |
+
+
+
+
+ In addition to the RBAC model, Elasticsearch has an important concept called tenant. RBAC is used to manage user authorization, and tenants are used for information sharing across tenants. In a tenant space, IAM users can share information such as dashboard data and index patterns.
+ This section describes how to use Kibana to create a user and grant permissions to the user. Kibana can be used to create users and grant permissions only when the security mode is enabled for the cluster.
+ - The Kibana UI varies depending on the Kibana version, but their operations are similar. This section takes Kibana 7.6.2 as an example to describe the procedure.
- You can customize the username, role name, and tenant name in Kibana.
+
+
+ Logging in to Kibana- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
Enter the administrator username and password to log in to Kibana. - Username: admin (default administrator account name)
- Password: Enter the administrator password you set when creating the cluster in security mode.
+ Figure 2 Login page
+
+
+
+ Creating a UserLog in to Kibana and create a user on the Security page.
+ - After a successful login, choose Security in the navigation tree on the left of the Kibana operation page. The Security page is displayed.
Figure 3 Accessing the Security page
+ - Choose Authentication Backends > Internal Users Database.
Figure 4 Adding a user (1)
+ - On the Internal Users Database page, choose
. The page for adding user information is displayed.Figure 5 Adding a user (2)
+ - On the user creation page, specify Username, Password, and Repeatpassword, and click Submit.
+ The user will be displayed in the user list.
+
+ Creating a Role and Granting PermissionsCreate a role and grant permissions to the role.
+ - Click Roles.
Figure 6 Adding a role
+ - On the Open Distro Security Roles page, click
.- On the Overview tab page, set the role name.
Figure 7 Entering a role name
+ - On the Cluster Permissions tab page, set CSS cluster permissions. Set cluster permissions based on service requirements. If this parameter is not specified for a role, the role has no cluster-level permissions.
- Permissions: Action Groups: You can click Add Action Group to set cluster permissions. For example, if you select the read permission for a cluster, you can only view information such as the cluster status and cluster nodes.
- Permissions: Single Permissions: Select Show Advanced and click Add Single Permission to set more refined permissions for the cluster. For example, if this parameter is set to indices:data/read, you can only read specified indexes.
+Figure 8 Cluster Permissions tab page
+ - Configure index permissions on the Index Permissions page.
- Index patterns: Set this parameter to the name of the index whose permission needs to be configured. For example, my_store.
Use different names for the index and the user.
+
+ - Permissions: Action Groups: Click Add Action Group and set the permission as required. For example, select the read-only permission Search.
+ - On the Tenant Permissions page, set role permissions based on service requirements.
- Global permissions: Click Add Field to set the kibana read and write permissions of a role, for example, kibana_all_read or kibana_all_write.
- Tenant permissions: Click Add tenant pattern to add a tenant mode and set the kibana_all_read or kibana_all_write permission for a new tenant mode.
Figure 9 Tenant Permissions tab
+
+
+ - Click Save Role Definition and you can view the configured role.
+
+ Configuring a Role for a UserAfter creating a role and granting permissions to the role, you need to map the role to a user so that the user can obtain the permissions of the mapped role.
+ - Click Role Mappings. On the displayed Role Mappings page, map the roles.
Figure 10 Role mapping
+ - On the Role Mappings page, click
to select a role and add users.- Role: Select the name of the role to be mapped.
- Users: Click Add User and enter the name of the user whose role is mapped.
+Figure 11 Users and roles
+ - Click Submit.
- Verify that the configuration takes effect in Kibana.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379277.html b/docs/css/umn/en-us_topic_0000001528379277.html
new file mode 100644
index 00000000..f338aada
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379277.html
@@ -0,0 +1,17 @@
+
+
+Importing Data
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379285.html b/docs/css/umn/en-us_topic_0000001528379285.html
new file mode 100644
index 00000000..c31b7b04
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379285.html
@@ -0,0 +1,82 @@
+
+
+Changing the Security Mode
+After a cluster is created, its security mode can be changed using the following methods:
+
+ ContextYou can create clusters in multiple security modes. For details about the differences between security modes, see Table 1.
+ Table 1 Cluster security modesSecurity Mode
+ |
+Scenario
+ |
+Advantage
+ |
+Disadvantage
+ |
+
+
+Non-Security Mode
+ |
+Intranet services and test scenarios
+ |
+Simple. Easy to access.
+ |
+Poor security. Anyone can access such clusters.
+ |
+
+Security Mode + HTTP Protocol
+ |
+User permissions can be isolated, which is applicable to scenarios sensitive to cluster performance.
+ |
+Security authentication is required for accessing such clusters, which improves cluster security. Accessing a cluster through HTTP protocol can retain the high performance of the cluster.
+ |
+Cannot be accessed from the public network.
+ |
+
+Security Mode + HTTPS Protocol
+ |
+Scenarios that require high security and public network access.
+ |
+Security authentication is required for accessing such clusters, which improves cluster security. HTTPS protocol allows public network to access such clusters.
+ |
+The performance of clusters using HTTPS is 20% lower than that of using HTTP.
+ |
+
+
+
+
+
+
+ Prerequisites- You are advised to back up data before changing the cluster security mode.
- The target cluster is available and has no tasks in progress.
+
+ Constraints- Only clusters (whose version is 6.5.4 or later) created after November 2022 support security mode switching.
- A cluster automatically restarts when its security mode is being changed. Services are interrupted during the restart. The authentication mode for invoking the cluster will change after the restart, and client configurations need to be adjusted accordingly.
- If a cluster has already opened the Kibana session box, a session error message will be displayed after you change the cluster security mode. In this case, clear the cache and open Kibana again.
+
+ Switching from the Non-Security Mode to Security ModeYou can change a non-security cluster to a security cluster that uses HTTP or HTTPS. After a cluster's security mode is enabled, security authentication is required for accessing the cluster.
+ - Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters > Elasticsearch. The Elasticsearch cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- Choose the Configure Security Mode tab.
- Enable the security mode. Enter and confirm the administrator password of the cluster.
Figure 1 Enabling the security mode
+ - Enable or disable HTTPS Access.
- If you enable HTTPS Access: The HTTPS protocol is used to encrypt cluster communication and you can configure public networks to access the cluster.
- If you disable HTTPS Access: The HTTP protocol is used and you cannot configure public networks to access the cluster.
+ - Click Submit. Confirm the information and the cluster list page is displayed.
The Task Status of the cluster is The security mode is changing. When the cluster status changes to Available, the security mode has been successfully changed.
+
+
+ Switching from the Security to Non-Security ModeYou can change a security cluster that uses HTTP or HTTPS to a non-security cluster. After a cluster's security mode is disabled, security authentication is no longer required for accessing the cluster.
+ - Clusters in non-security mode can be accessed without security authentication, and HTTP protocol is used to transmit data. Ensure the security of the cluster access environment and do not expose the access interface to the public network.
- During the switchover from the security mode to the non-security mode, the indexes of the original security cluster will be deleted. Back up data before disabling the security mode.
- If a security cluster has been bound to a public IP address, unbind it before changing the security mode.
- If a security cluster has enabled Kibana public network access, disable it before changing the security mode.
+
+ - Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters. On the displayed Clusters page, locate the target cluster and choose More > Modify Configuration in the Operation column.
- Choose the Configure Security Mode tab.
- Disable the security mode.
Figure 2 Disabling the security mode
+ - Click Submit. Confirm the information and the cluster list page is displayed.
The Task Status of the cluster is The security mode is changing. When the cluster status changes to Available, the security mode has been successfully changed.
+
+
+ Switching the Protocol of Security ClustersYou can change the protocol of a security cluster.
+ If a security cluster has been bound to a public IP address, you need to unbind it before changing HTTPS protocol to HTTP.
+
+ - Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters. On the displayed Clusters page, locate the target cluster and choose More > Modify Configuration in the Operation column.
- Choose the Configure Security Mode tab.
- Enable or disable HTTPS Access.
Figure 3 Configuring the protocol
+- If you enable HTTPS Access:
HTTPS protocol is used to encrypt cluster communication and you can configure public network access.
+ - If you disable HTTPS Access: An alarm message is displayed. Click OK to disable the function.
Cluster communication is no longer encrypted and the public network access function cannot be enabled.
+
+ - Click Submit. Confirm the information and the cluster list page is displayed.
The Task Status of the cluster is The security mode is changing. When the cluster status changes to Available, the security mode has been successfully changed.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379297.html b/docs/css/umn/en-us_topic_0000001528379297.html
new file mode 100644
index 00000000..aec288c9
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379297.html
@@ -0,0 +1,20 @@
+
+
+(Optional) Interconnecting with a Dedicated Load Balancer
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379301.html b/docs/css/umn/en-us_topic_0000001528379301.html
new file mode 100644
index 00000000..82e21af9
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379301.html
@@ -0,0 +1,85 @@
+
+
+Monitoring OBS Operations
+To clearly display the operations of the storage and compute decoupling plugin in OBS, the real-time OBS rate metric is added to CSS and recorded in the system index.
+ PrerequisiteThis feature is available in Elasticsearch clusters of versions 7.6.2 and 7.10.2 and OpenSearch clusters created after March 2023.
+
+
+ GET _frozen_stats/obs_rate API- Calculation method: The average OBS operation rate in the last 5 seconds is calculated every 5 seconds.
- Example request:
GET _frozen_stats/obs_rate
+GET _frozen_stats/obs_rate/{nodeId}
+{nodeId} indicates the ID of the node whose OBS operation rate you want to query.
+ - Example response:
{
+ "_nodes" : {
+ "total" : 1,
+ "successful" : 1,
+ "failed" : 0
+ },
+ "cluster_name" : "elasticsearch",
+ "nodes" : {
+ "dflDvcSwTJ-fkiIlT2zE3A" : {
+ "name" : "node-1",
+ "transport_address" : "127.0.0.1:9300",
+ "host" : "127.0.0.1",
+ "ip" : "127.0.0.1",
+ "update_time" : 1671777600482, // Time when the current statistics are updated.
+ "obs_rate" : {
+ "list_op_rate" : 0.0, // Rate of OBS list operations. Unit: times/s.
+ "get_meta_op_rate" : 0.0, // Rate of OBS get meta operations. Unit: times/s.
+ "get_obj_op_rate" : 0.0, // Rate of OBS get operations. Unit: times/s.
+ "put_op_rate" : 0.0, // Rate of OBS put operations. Unit: times/s.
+ "obs_total_op_rate" : 0.0, // Rate of all OBS operations. The unit is times/s.
+ "obs_upload_rate" : "0.0 MB/s", // Data upload rate of OBS, in MB/s.
+ "obs_download_rate" : "0.0 MB/s" // Data download rate of OBS, in MB/s.
+ }
+ }
+ }
+ }
+
+
+ System Index- System index name: .freeze_obs_rate-YYYY.mm.dd.
- Example: .freeze_obs_rate-2023.01.23
The default retention period of indexes is 30 days.
+
+
+
+ Configuration Item
+ Configuration Item
+ |
+Type
+ |
+Scope
+ |
+Can Be Dynamically Modified
+ |
+Description
+ |
+
+low_cost.obs_rate_index.evict_time
+ |
+String
+ |
+node
+ |
+Yes
+ |
+The retention period of the .freeze_obs_rate-YYYY.mm.dd index.
+- Value range: 1d to 365d
- Default value: 30d
- Unit: day
+ |
+
+
+
+
+
+ For example, run the following command to modify the retention period of the .freeze_obs_rate-YYYY.mm.dd index:
+ PUT _cluster/settings
+ {
+ "persistent": {
+ "low_cost.obs_rate_index.evict_time": "7d"
+ }
+ }
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379305.html b/docs/css/umn/en-us_topic_0000001528379305.html
new file mode 100644
index 00000000..7f64ea06
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379305.html
@@ -0,0 +1,17 @@
+
+
+Accessing an Elasticsearch Cluster
+Elasticsearch clusters have built-in Kibana and Cerebro components. You can quickly access an Elasticsearch cluster through Kibana and Cerebro.
+ Access a Cluster Through Kibana- Log in to the CSS management console.
- On the Clusters page, locate the target cluster and click Access Kibana in the Operation column to go to the Kibana login page.
- Non-security cluster: The Kibana console is displayed.
- Security cluster: Enter the username and password on the login page and click Log In to go to the Kibana console. The default username is admin and the password is the one specified during cluster creation.
+ - After the login is successful, you can access clusters through Kibana.
+
+ Accessing a Cluster Through Cerebro- Log in to the CSS management console.
- On the Clusters page, locate the target cluster and click More > Cerebro in the Operation column to go to the Cerebro login page.
- Non-security cluster: Click the cluster name on the Cerebro login page to go to the Cerebro console.
- Security cluster: Click the cluster name on the Cerebro login page, enter the username and password, and click Authenticate to go to the Cerebro console. The default username is admin and the password is the one specified during cluster creation.
+ - After the login is successful, you can access clusters through Cerebro.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379309.html b/docs/css/umn/en-us_topic_0000001528379309.html
new file mode 100644
index 00000000..cf3f18dc
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379309.html
@@ -0,0 +1,139 @@
+
+
+Configuring Cache
+After data is dumped to OBS, some data is cached to reduce access to OBS and improve Elasticsearch query performance. Data that is requested for the first time is obtained from OBS. The obtained data is cached in the memory. In subsequent queries, the system searches for data in the cache first. Data can be cached in memory or files.
+ Elasticsearch accesses different files in different modes. The cache system supports multi-level cache and uses blocks of different sizes to cache different files. For example, a large number of small blocks are used to cache .fdx and .tip files, and a small number of large blocks are used to cache .fdt files.
+
+ Table 1 Cache configurationsParameter
+ |
+Type
+ |
+Description
+ |
+
+
+low_cost.obs.blockcache.names
+ |
+Array
+ |
+The cache system supports multi-level cache for data of different access granularities. This configuration lists the names of all caches. If this parameter is not set, the system has a cache named default. To customize the configuration, ensure there is a cache named default.
+Default value: default
+ |
+
+low_cost.obs.blockcache.<NAME>.type
+ |
+ENUM
+ |
+Cache type, which can be memory or file.
+If it is set to memory, certain memory will be occupied. If it is set to file, cache will be stored in disks. You are advised to use ultra-high I/O disks to improve cache performance.
+Default value: memory
+ |
+
+low_cost.obs.blockcache.<NAME>.blockshift
+ |
+Integer
+ |
+Size of each block in the cache. Its value is the number of bytes shifted left. For example, if this parameter is set to 16, the block size is 216 bytes, that is, 65536 bytes (64 KB).
+Default value: 13 (8 KB)
+ |
+
+low_cost.obs.blockcache.<NAME>.bank.count
+ |
+Integer
+ |
+Number of cache partitions.
+Default value: 1
+ |
+
+low_cost.obs.blockcache.<NAME>.number.blocks.perbank
+ |
+Integer
+ |
+Number of blocks included in each cache partition.
+Default value: 8192
+ |
+
+low_cost.obs.blockcache. <NAME>.exclude.file.types
+ |
+Array
+ |
+Extensions of files that are not cached. If the extensions of certain files are neither in the exclude list nor in the include list, they are stored in the default cache.
+ |
+
+low_cost.obs.blockcache. <NAME>.file.types
+ |
+Array
+ |
+Extensions of cached files. If the extensions of certain files are neither in the exclude list nor in the include list, they are stored in the default cache.
+ |
+
+
+
+
+ The following is a common cache configuration. It uses two levels of caches, default and large. The default cache uses 64 KB blocks and has a total of 30 x 4096 blocks. It is used to cache files except .fdt files. The large cache uses 2 MB blocks and contains 5 x 1000 blocks. It is used to cache .fdx, .dvd, and .tip files.
+ low_cost.obs.blockcache.names: ["default", "large"]
+low_cost.obs.blockcache.default.type: file
+low_cost.obs.blockcache.default.blockshift: 16
+low_cost.obs.blockcache.default.number.blocks.perbank: 4096
+low_cost.obs.blockcache.default.bank.count: 30
+low_cost.obs.blockcache.default.exclude.file.types: ["fdt"]
+
+low_cost.obs.blockcache.large.type: file
+low_cost.obs.blockcache.large.blockshift: 21
+low_cost.obs.blockcache.large.number.blocks.perbank: 1000
+low_cost.obs.blockcache.large.bank.count: 5
+low_cost.obs.blockcache.large.file.types: ["fdx", "dvd", "tip"]
+
+
+ Table 2 Other parametersParameter
+ |
+Type
+ |
+Description
+ |
+
+
+index.frozen.obs.max_bytes_per_sec
+ |
+String
+ |
+Maximum rate of uploading files to OBS during freezing. It takes effect immediately after you complete configuration.
+Default value: 150MB
+ |
+
+low_cost.obs.index.upload.threshold.use.multipart
+ |
+String
+ |
+If the file size exceeds the value of this parameter during freezing, the multipart upload function of OBS is used.
+Default value: 1GB
+ |
+
+index.frozen.reader.cache.expire.duration.seconds
+ |
+Integer
+ |
+Timeout duration.
+To reduce the heap memory occupied by frozen indexes, the reader caches data for a period of time after the index shard is started, and stops caching after it times out.
+Default value: 300s
+ |
+
+index.frozen.reader.cache.max.size
+ |
+Integer
+ |
+Maximum cache size.
+Default value: 100
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379313.html b/docs/css/umn/en-us_topic_0000001528379313.html
new file mode 100644
index 00000000..138bbbae
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379313.html
@@ -0,0 +1,32 @@
+
+
+Basic Settings
+- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- Click Dev Tools in the navigation tree on the left and perform the following operations:
Configure the primary cluster information. PUT /_cluster/settings
+{
+ "persistent" : {
+ "cluster" : {
+ "remote.rest" : {
+ "leader1" : {
+ "seeds" : [
+ "http://10.0.0.1:9200",
+ "http://10.0.0.2:9200",
+ "http://10.0.0.3:9200"
+ ] ,
+ "username": "elastic",
+ "password": "*****"
+ }
+ }
+ }
+ }
+}
+ - Secondary clusters must be able to access the REST API (default port: 9200) of the primary cluster.
- The primary cluster name is leader1 and can be changed.
- The value of seeds is the REST address of the primary cluster. Multiple values are supported. When HTTPS access is enabled, the URI schema must be changed to HTTPS.
- username and password are required only when the security mode is enabled for the primary cluster.
- After the configuration is complete, you can use the GET _remote/rest/info API to obtain the connection status with the primary cluster.
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379317.html b/docs/css/umn/en-us_topic_0000001528379317.html
new file mode 100644
index 00000000..bfe48f82
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379317.html
@@ -0,0 +1,303 @@
+
+
+Getting Started with Elasticsearch
+This section describes how to use Elasticsearch for product search. You can use the Elasticsearch search engine of CSS to search for data based on the scenario example. The basic operation process is as follows:
+
+ Scenario DescriptionA women's clothing brand builds an e-commerce website. It uses traditional databases to provide a product search function for users. However, due to an increase in the number of users and business growth, the traditional databases have slow response and low accuracy. To improve user experience and user retention, the e-commerce website plans to use Elasticsearch to provide the product search function for users.
+ This section describes how to use Elasticsearch to provide the search function for users.
+ Assume that the e-commerce website provides the following data:
+ {
+"products":[
+{"productName":"Latest art shirts for women in 2017 autumn","size":"L"}
+{"productName":"Latest art shirts for women in 2017 autumn","size":"M"}
+{"productName":"Latest art shirts for women in 2017 autumn","size":"S"}
+{"productName":"Latest jeans for women in spring 2018","size":"M"}
+{"productName":"Latest jeans for women in spring 2018","size":"S"}
+{"productName":"Latest casual pants for women in spring 2017","size":"L"}
+{"productName":"Latest casual pants for women in spring 2017","size":"S"}
+]
+}
+
+ Step 1: Create a ClusterCreate a cluster using Elasticsearch as the search engine. In this example, suppose that you create a cluster named Es-xfx. This cluster is used only for getting started with Elasticsearch. For this cluster, you are advised to select css.medium.8 for Node Specifications, Common I/O for Node Storage Type, and 40 GB for Node Storage Capacity. For details, see Creating a Cluster in Non-Security Mode.
+ Create a cluster using Elasticsearch as the search engine. In this example, suppose that you create a cluster named Sample-ESCluster. This cluster is used only for getting started with Elasticsearch. For this cluster, you are advised to select ess.spec-4u8g for Node Specifications, High I/O for Node Storage Type, and 40 GB for Node Storage Capacity. For details, see or .
+ After you create the cluster, switch to the cluster list to view the created cluster. If the Status of the cluster is Available, the cluster is created successfully.
+ Figure 1 Creating a cluster
+
+ Step 2: Import DataCSS supports importing data to Elasticsearch using Logstash, Kibana, or APIs. Kibana lets you visualize your Elasticsearch data. The following procedure illustrates how to import data to Elasticsearch using Kibana.
+ - On the Clusters page, locate the target cluster and click Access Kibana in the Operation column to go to the Kibana login page.
- Non-security cluster: The Kibana console is displayed.
- Security cluster: Enter the username and password on the login page and click Log In to go to the Kibana console. The default username is admin and the password is the one specified during cluster creation.
+ - In the navigation pane of Kibana on the left, choose Dev Tools.
The text box on the left is the input box. The triangle icon in the upper right corner of the input box is the command execution button. The text box on the right area is the result output box.
+Figure 2 Console page
+ The Kibana UI varies depending on the Kibana version.
+
+ - On the Console page, run the following command to create index named my_store:
Versions earlier than 7. xPUT /my_store
+{
+ "settings": {
+ "number_of_shards": 1
+ },
+ "mappings": {
+ "products": {
+ "properties": {
+ "productName": {
+ "type": "text",
+ "analyzer": "ik_smart"
+ },
+ "size": {
+ "type": "keyword"
+ }
+ }
+ }
+ }
+}
+
+Versions 7.x and later
+PUT /my_store
+{
+ "settings": {
+ "number_of_shards": 1
+ },
+ "mappings": {
+ "properties": {
+ "productName": {
+ "type": "text",
+ "analyzer": "ik_smart"
+ },
+ "size": {
+ "type": "keyword"
+ }
+ }
+ }
+ }
+The command output is similar to the following:
+{
+ "acknowledged" : true,
+ "shards_acknowledged" : true,
+ "index" : "my_store"
+}
+ - On the Console page, run the following command to import data to index named my_store:
Versions earlier than 7.x
+POST /my_store/products/_bulk
+{"index":{}}
+{"productName":"Latest art shirts for women in 2017 autumn","size":"L"}
+{"index":{}}
+{"productName":"Latest art shirts for women in 2017 autumn","size":"M"}
+{"index":{}}
+{"productName":"Latest art shirts for women in 2017 autumn","size":"S"}
+{"index":{}}
+{"productName":"Latest jeans for women in spring 2018","size":"M"}
+{"index":{}}
+{"productName":"Latest jeans for women in spring 2018","size":"S"}
+{"index":{}}
+{"productName":"Latest casual pants for women in spring 2017","size":"L"}
+{"index":{}}
+{"productName":"Latest casual pants for women in spring 2017","size":"S"}
+
+Versions 7.x and later
+POST /my_store/_doc/_bulk
+{"index":{}}
+{"productName":"Latest art shirts for women in 2017 autumn","size":"L"}
+{"index":{}}
+{"productName":"Latest art shirts for women in 2017 autumn","size":"M"}
+{"index":{}}
+{"productName":"Latest art shirts for women in 2017 autumn","size":"S"}
+{"index":{}}
+{"productName":"Latest jeans for women in spring 2018","size":"M"}
+{"index":{}}
+{"productName":"Latest jeans for women in spring 2018","size":"S"}
+{"index":{}}
+{"productName":"Latest casual pants for women in spring 2017","size":"L"}
+{"index":{}}{"productName":"Latest casual pants for women in spring 2017","size":"S"}
+If the value of the errors field in the command output is false, the data is imported successfully.
+
+
+ Step 3: Search for Data- Full-text search
If you access the e-commerce website and want to search for commodities whose names include "spring jeans", enter "spring jeans" to begin your search. The following example shows the command to be executed on Kibana and the command output.
+Command to be executed on Kibana:
+Versions earlier than 7.x
+GET /my_store/products/_search
+{
+ "query": {"match": {
+ "productName": "spring jeans"
+ }}
+}
+Versions 7.x and later
+GET /my_store/_search
+{
+ "query": {"match": {
+ "productName": "spring jeans"
+ }}
+}
+The command output is similar to the following:
+{
+ "took": 80,
+ "timed_out": false,
+ "_shards": {
+ "total": 1,
+ "successful": 1,
+ "skipped": 0,
+ "failed": 0
+ },
+ "hits": {
+ "total": 4,
+ "max_score": 1.8069603,
+ "hits": [
+ {
+ "_index": "my_store",
+ "_type": "products",
+ "_id": "yTG1QWUBRuneTTG2KJSq",
+ "_score": 1.8069603,
+ "_source": {
+ "productName": "Latest jeans for women in spring 2018",
+ "size": "M"
+ }
+ },
+ {
+ "_index": "my_store",
+ "_type": "products",
+ "_id": "yjG1QWUBRuneTTG2KJSq",
+ "_score": 1.8069603,
+ "_source": {
+ "productName": "Latest jeans for women in spring 2018",
+ "size": "S"
+ }
+ },
+ {
+ "_index": "my_store",
+ "_type": "products",
+ "_id": "yzG1QWUBRuneTTG2KJSq",
+ "_score": 0.56677663,
+ "_source": {
+ "productName": "Latest casual pants for women in spring 2017",
+ "size": "L"
+ }
+ },
+ {
+ "_index": "my_store",
+ "_type": "products",
+ "_id": "zDG1QWUBRuneTTG2KJSq",
+ "_score": 0.56677663,
+ "_source": {
+ "productName": "Latest casual pants for women in spring 2017",
+ "size": "S"
+ }
+ }
+ ]
+ }
+}
+- Elasticsearch supports full-text search. The preceding command searches for the information about all commodities whose names include "spring" or "jeans".
- Unlike traditional databases, Elasticsearch can return results in milliseconds by using inverted indexes.
- Elasticsearch supports sorting by score. In the command output, information about the first two commodities contains both "spring" and "jeans", while that about the last two products contain only "spring". Therefore, the first two commodities rank prior to the last two due to high keyword match.
+
+ - Aggregation result display
The e-commerce website provides the function of displaying aggregation results. For example, it classifies commodities corresponding to "spring" based on the size so that you can collect the number of products of different sizes. The following example shows the command to be executed on Kibana and the command output.
+Command to be executed on Kibana:
+Versions earlier than 7.x
+GET /my_store/products/_search
+{
+"query": {
+"match": { "productName": "spring" }
+},
+"size": 0,
+"aggs": {
+"sizes": {
+"terms": { "field": "size" }
+}
+}
+}
+Versions 7.x and later
+GET /my_store/_search
+{
+"query": {
+"match": { "productName": "spring" }
+},
+"size": 0,
+"aggs": {
+"sizes": {
+"terms": { "field": "size" }
+}
+}
+}
+The command output is similar to the following:
+Versions earlier than 7.x
+{
+ "took": 66,
+ "timed_out": false,
+ "_shards": {
+ "total": 1,
+ "successful": 1,
+ "skipped": 0,
+ "failed": 0
+ },
+ "hits": {
+ "total": 4,
+ "max_score": 0,
+ "hits": []
+ },
+ "aggregations": {
+ "sizes": {
+ "doc_count_error_upper_bound": 0,
+ "sum_other_doc_count": 0,
+ "buckets": [
+ {
+ "key": "S",
+ "doc_count": 2
+ },
+ {
+ "key": "L",
+ "doc_count": 1
+ },
+ {
+ "key": "M",
+ "doc_count": 1
+ }
+ ]
+ }
+ }
+}
+Versions 7.x and later
+{
+ "took" : 27,
+ "timed_out" : false,
+ "_shards" : {
+ "total" : 1,
+ "successful" : 1,
+ "skipped" : 0,
+ "failed" : 0
+ },
+ "hits" : {
+ "total" : {
+ "value" : 3,
+ "relation" : "eq"
+ },
+ "max_score" : null,
+ "hits" : [ ]
+ },
+ "aggregations" : {
+ "sizes" : {
+ "doc_count_error_upper_bound" : 0,
+ "sum_other_doc_count" : 0,
+ "buckets" : [
+ {
+ "key" : "L",
+ "doc_count" : 1
+ },
+ {
+ "key" : "M",
+ "doc_count" : 1
+ },
+ {
+ "key" : "S",
+ "doc_count" : 1
+ }
+ ]
+ }
+ }
+}
+
+
+ Step 4: Delete the ClusterOnce you understand the process and method of using Elasticsearch, you can perform the following steps to delete the cluster you created for the example and its data to avoid resource wastage.
+ After you delete a cluster, its data cannot be restored. Exercise caution when deleting a cluster.
+
+ - Log in to the CSS management console. In the navigation pane on the left, choose Clusters > Elasticsearch.
- Locate the row that contains cluster Es-xfx and click More > Delete in the Operation column.
- In the displayed dialog box, enter the name of the cluster to be deleted and click OK.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528379321.html b/docs/css/umn/en-us_topic_0000001528379321.html
new file mode 100644
index 00000000..71f38b18
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528379321.html
@@ -0,0 +1,13 @@
+
+
+Features
+CSS supports read/write splitting. Data written to the primary cluster (Leader) can be automatically synchronized to the secondary cluster (Follower). In this way, data is written to the primary cluster and queried in the secondary cluster. The read and write can be separated to improve the query performance (as shown in the left part of Figure 1). When the primary cluster is unavailable, the secondary cluster can provide data write and query services (as shown in the right part of Figure 1).
+ Figure 1 Two application scenarios of read/write splitting
+ Currently, only clusters of versions 7.6.2 and 7.10.2 support read/write isolation. The versions of the primary and secondary clusters must be the same.
+
+
+
diff --git a/docs/css/umn/css_01_0129.html b/docs/css/umn/en-us_topic_0000001528499121.html
similarity index 93%
rename from docs/css/umn/css_01_0129.html
rename to docs/css/umn/en-us_topic_0000001528499121.html
index 276f8558..1a84fe95 100644
--- a/docs/css/umn/css_01_0129.html
+++ b/docs/css/umn/en-us_topic_0000001528499121.html
@@ -1,13 +1,13 @@
-
+
Sample Code for Vector Search on a Client
-Elasticsearch provides standard REST APIs and clients developed using Java, Python, and Go.
- Based on the open-source dataset SIFT1M (http://corpus-texmex.irisa.fr/) and Python Elasticsearch client, this section provides a code snippet for creating a vector index, importing vector data, and querying vector data on the client.
- PrerequisitesThe Python dependency package has been installed on the client. If it is not installed, run the following commands to install it:
- pip install numpy
+Elasticsearch provides standard REST APIs and clients developed using Java, Python, and Go.
+ Based on the open-source dataset SIFT1M (http://corpus-texmex.irisa.fr/) and Python Elasticsearch client, this section provides a code snippet for creating a vector index, importing vector data, and querying vector data on the client.
+ PrerequisitesThe Python dependency package has been installed on the client. If it is not installed, run the following commands to install it:
+ pip install numpy
pip install elasticsearch==7.6.0
- Sample Code 1
+Sample Code 1
2
3
4
@@ -323,7 +323,7 @@ pip install elasticsearch==7.6.0
diff --git a/docs/css/umn/en-us_topic_0000001528499125.html b/docs/css/umn/en-us_topic_0000001528499125.html
new file mode 100644
index 00000000..922cc7bb
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528499125.html
@@ -0,0 +1,13 @@
+
+
+Context
+CSS monitors various metrics of the running status and change trend of cluster indexes to measure service usage and handle potential risks in a timely manner, ensuring that clusters can run stably.
+ During index monitoring, the stats information about indexes is collected and saved to the monitoring index (monitoring-eye-css-[yyyy-mm-dd]) of the cluster, and retained for one week by default.
+ Currently, only the Elasticsearch clusters of the versions 7.6.2 and 7.10.2 support the index monitoring.
+
+
+
diff --git a/docs/css/umn/css_01_0197.html b/docs/css/umn/en-us_topic_0000001528499129.html
similarity index 90%
rename from docs/css/umn/css_01_0197.html
rename to docs/css/umn/en-us_topic_0000001528499129.html
index a5c1df6e..7aeceb27 100644
--- a/docs/css/umn/css_01_0197.html
+++ b/docs/css/umn/en-us_topic_0000001528499129.html
@@ -1,8 +1,8 @@
-
+
kibana-monitor
-The configuration file content of kibana-monitor is as follows. You are advised to save the file as monitoring-kibana.ndjson.
- {"attributes":{"description":"","kibanaSavedObjectMeta":{"searchSourceJSON":"{}"},"title":"[monitoring] segment memory in bytes of index for top10","uiStateJSON":"{}","version":1,"visState":"{\"title\":\"[monitoring] segment memory in bytes of index for top10\",\"type\":\"metrics\",\"aggs\":[],\"params\":{\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\",\"type\":\"timeseries\",\"series\":[{\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"color\":\"#68BC00\",\"split_mode\":\"terms\",\"split_color_mode\":\"kibana\",\"metrics\":[{\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\",\"type\":\"max\",\"field\":\"index_stats.total.segments.memory_in_bytes\"}],\"separate_axis\":0,\"axis_position\":\"right\",\"formatter\":\"bytes\",\"chart_type\":\"line\",\"line_width\":1,\"point_size\":1,\"fill\":0.5,\"stacked\":\"none\",\"label\":\"segments memory in bytes \",\"type\":\"timeseries\",\"terms_field\":\"index_stats.index\",\"terms_order_by\":\"61ca57f2-469d-11e7-af02-69e470af7417\"}],\"time_field\":\"timestamp\",\"index_pattern\":\"monitoring-eye-css-*\",\"interval\":\"\",\"axis_position\":\"left\",\"axis_formatter\":\"number\",\"axis_scale\":\"normal\",\"show_legend\":1,\"show_grid\":1,\"tooltip_mode\":\"show_all\",\"default_index_pattern\":\"monitoring-eye-css-*\",\"default_timefield\":\"timestamp\",\"isModelInvalid\":false}}"},"id":"3ae5d820-6628-11ed-8cd7-973626cf6f70","references":[],"type":"visualization","updated_at":"2022-12-01T12:41:01.165Z","version":"WzIwNiwyXQ=="}
+The configuration file content of kibana-monitor is as follows. You are advised to save the file as monitoring-kibana.ndjson.
+ {"attributes":{"description":"","kibanaSavedObjectMeta":{"searchSourceJSON":"{}"},"title":"[monitoring] segment memory in bytes of index for top10","uiStateJSON":"{}","version":1,"visState":"{\"title\":\"[monitoring] segment memory in bytes of index for top10\",\"type\":\"metrics\",\"aggs\":[],\"params\":{\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\",\"type\":\"timeseries\",\"series\":[{\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"color\":\"#68BC00\",\"split_mode\":\"terms\",\"split_color_mode\":\"kibana\",\"metrics\":[{\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\",\"type\":\"max\",\"field\":\"index_stats.total.segments.memory_in_bytes\"}],\"separate_axis\":0,\"axis_position\":\"right\",\"formatter\":\"bytes\",\"chart_type\":\"line\",\"line_width\":1,\"point_size\":1,\"fill\":0.5,\"stacked\":\"none\",\"label\":\"segments memory in bytes \",\"type\":\"timeseries\",\"terms_field\":\"index_stats.index\",\"terms_order_by\":\"61ca57f2-469d-11e7-af02-69e470af7417\"}],\"time_field\":\"timestamp\",\"index_pattern\":\"monitoring-eye-css-*\",\"interval\":\"\",\"axis_position\":\"left\",\"axis_formatter\":\"number\",\"axis_scale\":\"normal\",\"show_legend\":1,\"show_grid\":1,\"tooltip_mode\":\"show_all\",\"default_index_pattern\":\"monitoring-eye-css-*\",\"default_timefield\":\"timestamp\",\"isModelInvalid\":false}}"},"id":"3ae5d820-6628-11ed-8cd7-973626cf6f70","references":[],"type":"visualization","updated_at":"2022-12-01T12:41:01.165Z","version":"WzIwNiwyXQ=="}
{"attributes":{"description":"","kibanaSavedObjectMeta":{"searchSourceJSON":"{}"},"title":"[monitoring] segment count of index for top10","uiStateJSON":"{}","version":1,"visState":"{\"aggs\":[],\"params\":{\"axis_formatter\":\"number\",\"axis_position\":\"left\",\"axis_scale\":\"normal\",\"default_index_pattern\":\"monitoring-eye-css-*\",\"default_timefield\":\"timestamp\",\"filter\":{\"language\":\"kuery\",\"query\":\"\"},\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\",\"index_pattern\":\"monitoring-eye-css-*\",\"interval\":\"\",\"isModelInvalid\":false,\"series\":[{\"axis_position\":\"right\",\"chart_type\":\"line\",\"color\":\"rgba(231,102,76,1)\",\"fill\":0.5,\"formatter\":\"number\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"label\":\"segment count of index for top10\",\"line_width\":1,\"metrics\":[{\"field\":\"index_stats.total.segments.count\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\",\"type\":\"max\"}],\"point_size\":1,\"separate_axis\":0,\"split_color_mode\":\"kibana\",\"split_mode\":\"terms\",\"stacked\":\"none\",\"terms_field\":\"index_stats.index\",\"terms_order_by\":\"61ca57f2-469d-11e7-af02-69e470af7417\",\"type\":\"timeseries\"}],\"show_grid\":1,\"show_legend\":1,\"time_field\":\"timestamp\",\"tooltip_mode\":\"show_all\",\"type\":\"timeseries\"},\"title\":\"[monitoring] segment count of index for top10\",\"type\":\"metrics\"}"},"id":"45d571c0-6626-11ed-8cd7-973626cf6f70","references":[],"type":"visualization","updated_at":"2022-12-01T12:41:01.165Z","version":"WzIwNywyXQ=="}
{"attributes":{"description":"","kibanaSavedObjectMeta":{"searchSourceJSON":"{}"},"title":"[monitoring] markdown","uiStateJSON":"{}","version":1,"visState":"{\"title\":\"[monitoring] markdown\",\"type\":\"markdown\",\"params\":{\"fontSize\":12,\"openLinksInNewTab\":false,\"markdown\":\"### Index Monitoring \\nThis dashboard contains default table for you to play with. You can view it, search it, and interact with the visualizations.\"},\"aggs\":[]}"},"id":"b2811c70-a5f1-11ec-9a68-ada9d754c566","references":[],"type":"visualization","updated_at":"2022-12-01T12:41:01.165Z","version":"WzIwOCwyXQ=="}
{"attributes":{"description":"number of document being indexing for primary and replica shards","kibanaSavedObjectMeta":{"searchSourceJSON":"{}"},"title":"[monitoring] Indexing Rate (/s)","uiStateJSON":"{}","version":1,"visState":"{\"title\":\"[monitoring] Indexing Rate (/s)\",\"type\":\"metrics\",\"params\":{\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\",\"type\":\"timeseries\",\"series\":[{\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"color\":\"rgba(0,32,188,1)\",\"split_mode\":\"everything\",\"metrics\":[{\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\",\"type\":\"max\",\"field\":\"indices_stats._all.total.indexing.index_total\"},{\"unit\":\"1s\",\"id\":\"fed72db0-a5f8-11ec-aa10-992297d21a2e\",\"type\":\"derivative\",\"field\":\"61ca57f2-469d-11e7-af02-69e470af7417\"},{\"unit\":\"\",\"id\":\"14b66420-a5f9-11ec-aa10-992297d21a2e\",\"type\":\"positive_only\",\"field\":\"fed72db0-a5f8-11ec-aa10-992297d21a2e\"}],\"separate_axis\":0,\"axis_position\":\"right\",\"formatter\":\"number\",\"chart_type\":\"line\",\"line_width\":1,\"point_size\":1,\"fill\":0.5,\"stacked\":\"none\",\"label\":\"Indexing Rate (/s)\",\"type\":\"timeseries\",\"split_color_mode\":\"rainbow\",\"hidden\":false}],\"time_field\":\"timestamp\",\"index_pattern\":\"monitoring-eye-css-*\",\"interval\":\"\",\"axis_position\":\"left\",\"axis_formatter\":\"number\",\"axis_scale\":\"normal\",\"show_legend\":1,\"show_grid\":1,\"default_index_pattern\":\"monitoring-eye-css-*\",\"default_timefield\":\"timestamp\",\"isModelInvalid\":false,\"legend_position\":\"bottom\"},\"aggs\":[]}"},"id":"de4f8ab0-a5f8-11ec-9a68-ada9d754c566","references":[],"type":"visualization","updated_at":"2022-12-01T12:41:01.165Z","version":"WzIwOSwyXQ=="}
@@ -19,7 +19,7 @@
diff --git a/docs/css/umn/en-us_topic_0000001528499133.html b/docs/css/umn/en-us_topic_0000001528499133.html
new file mode 100644
index 00000000..6762054f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528499133.html
@@ -0,0 +1,24 @@
+
+
+Index Monitoring
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528499137.html b/docs/css/umn/en-us_topic_0000001528499137.html
new file mode 100644
index 00000000..7dc1e3f9
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528499137.html
@@ -0,0 +1,197 @@
+
+
+Using Logstash to Import Data to Elasticsearch
+You can use Logstash to collect data and migrate collected data to Elasticsearch in CSS. This method helps you effectively obtain and manage data through Elasticsearch. Data files can be in the JSON or CSV format.
+ Logstash is an open-source, server-side data processing pipeline that ingests data from multiple sources simultaneously, transforms data, and then sends data to Elasticsearch. For details about Logstash, visit the following website: https://www.elastic.co/guide/en/logstash/current/getting-started-with-logstash.html
+ The following two scenarios are involved depending on the Logstash deployment:
+
+
+ Importing Data When Logstash Is Deployed on the External NetworkFigure 1 illustrates how data is imported when Logstash is deployed on an external network.
+ Figure 1 Importing data when Logstash is deployed on an external network
+
+ - Create a jump host and configure it as follows:
- The jump host is an ECS running the Linux OS and has been bound with an EIP.
- The jump host resides in the same VPC as the CSS cluster.
- SSH local port forwarding is configured for the jump host to forward requests from a chosen local port to port 9200 on one node of the CSS cluster.
- Refer to SSH documentation for the local port forwarding configuration.
+ - Use PuTTY to log in to the created jump host with the EIP.
- Run the following command to perform port mapping and transfer the request sent to the port on the jump host to the target cluster:
ssh -g -L <Local port of the jump host:Private network address and port number of a node> -N -f root@<Private IP address of the jump host>
+ - In the preceding command, <Local port of the jump host> refers to the port obtained in 1.
- In the preceding command, <Private network address and port number of a node> refers to the private network address and port number of a node in the cluster. If the node is faulty, the command execution will fail. If the cluster contains multiple nodes, you can replace the value of <private network address and port number of a node> with the private network address and port number of any available node in the cluster. If the cluster contains only one node, restore the node and execute the command again.
- Replace <Private IP address of the jump host> in the preceding command with the IP address (with Private IP) of the created jump host in the IP Address column in the ECS list on the ECS management console.
+
+For example, port 9200 on the jump host is assigned external network access permissions, the private network address and port number of the node are 192.168.0.81 and 9200, respectively, and the private IP address of the jump host is 192.168.0.227. You need to run the following command to perform port mapping:
+ssh -g -L 9200:192.168.0.81:9200 -N -f root@192.168.0.227
+ - Log in to the server where Logstash is deployed and store the data files to be imported on the server.
For example, data file access_20181029_log needs to be imported, the file storage path is /tmp/access_log/, and the data file includes the following data:
+ Create the access_log folder if it does not exist.
+
+| All | Heap used for segments | | 18.6403 | MB |
+| All | Heap used for doc values | | 0.119289 | MB |
+| All | Heap used for terms | | 17.4095 | MB |
+| All | Heap used for norms | | 0.0767822 | MB |
+| All | Heap used for points | | 0.225246 | MB |
+| All | Heap used for stored fields | | 0.809448 | MB |
+| All | Segment count | | 101 | |
+| All | Min Throughput | index-append | 66232.6 | docs/s |
+| All | Median Throughput | index-append | 66735.3 | docs/s |
+| All | Max Throughput | index-append | 67745.6 | docs/s |
+| All | 50th percentile latency | index-append | 510.261 | ms |
+
+ - In the server where Logstash is deployed, run the following command to create configuration file logstash-simple.conf in the Logstash installation directory:
cd /<Logstash installation directory>/
+vi logstash-simple.conf
+ - Input the following content in logstash-simple.conf:
input {
+Location of data
+}
+filter {
+Related data processing
+}
+output {
+ elasticsearch {
+ hosts => "<EIP of the jump host>:<Number of the port assigned external network access permissions on the jump host>"
+ (Optional) If communication encryption has been enabled on the cluster, you need to add the following configuration:
+ ssl => true
+ ssl_certificate_verification => false
+ }
+}
+- The input parameter indicates the data source. Set this parameter based on the actual conditions. For details about the input parameter and parameter usage, visit the following website: https://www.elastic.co/guide/en/logstash/current/input-plugins.html
- The filter parameter specifies the mode in which data is processed. For example, extract and process logs to convert unstructured information into structured information. For details about the filter parameter and parameter usage, visit the following website: https://www.elastic.co/guide/en/logstash/current/filter-plugins.html
- The output parameter indicates the destination address of the data. For details about the output parameter and parameter usage, visit https://www.elastic.co/guide/en/logstash/current/output-plugins.html. Replace <EIP address of the jump host> with the IP address (with EIP) of the created jump host in the IP Address column in the ECS list on the ECS management console. <Number of the port assigned external network access permissions on the jump host> is the number of the port obtained in 1, for example, 9200.
+Consider the data files in the /tmp/access_log/ path mentioned in 4 as an example. Assume that data import starts from data in the first row of the data file, the filtering condition is left unspecified (indicating no data processing operations are performed), the public IP address and port number of the jump host are 192.168.0.227 and 9200, respectively, and the name of the target index is myindex. Edit the configuration file as follows, and enter :wq to save the configuration file and exit.
+input {
+ file{
+ path => "/tmp/access_log/*"
+ start_position => "beginning"
+ }
+}
+filter {
+}
+output {
+ elasticsearch {
+ hosts => "192.168.0.227:9200"
+ index => "myindex"
+
+ }
+}
+ If a license error is reported, set ilm_enabled to false.
+
+If the cluster has the security mode enabled, you need to download a certificate first.
+- Download a certificate on the Basic Information page of the cluster.
Figure 2 Downloading a certificate
+ - Store the certificate to the server where Logstash is deployed.
- Modify the logstash-simple.conf configuration file.
Consider the data files in the /tmp/access_log/ path mentioned in 4 as an example. Assume that data import starts from data in the first row of the data file, the filtering condition is left unspecified (indicating no data processing operations are performed), and the public IP address and port number of the jump host are 192.168.0.227 and 9200, respectively. The name of the index for importing data is myindex, and the certificate is stored in /logstash/logstash6.8/config/CloudSearchService.cer. Edit the configuration file as follows, and enter :wq to save the configuration file and exit. input{
+ file {
+ path => "/tmp/access_log/*"
+ start_position => "beginning"
+ }
+}
+filter {
+ }
+output{
+ elasticsearch{
+ hosts => ["https://192.168.0.227:9200"]
+ index => "myindex"
+ user => "admin"
+ password => "******"
+ cacert => "/logstash/logstash6.8/config/CloudSearchService.cer"
+ manager_template => false
+ ilm_enabled => false
+ ssl => true
+ ssl_certificate_verification => false
+ }
+}
+ password: password for logging in to the cluster
+
+
+
+ - Run the following command to import the data collected by Logstash to the cluster:
./bin/logstash -f logstash-simple.conf
+ This command must be executed in the directory where the logstash-simple.conf file is stored. For example, if the logstash-simple.conf file is stored in /root/logstash-7.1.1/, go to the directory before running the command.
+
+ - Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters > Elasticsearch to switch to the Clusters page.
- From the cluster list, locate the row that contains the cluster to which you want to import data and click Access Kibana in the Operation column.
- In the Kibana navigation pane on the left, choose Dev Tools.
- On the Console page of Kibana, search for the imported data.
On the Console page of Kibana, run the following command to search for data. View the search results. If the searched data is consistent with the imported data, the data has been imported successfully.
+GET myindex/_search
+
+
+ Importing Data When Logstash Is Deployed on an ECSFigure 3 illustrates how data is imported when Logstash is deployed on an ECS that resides in the same VPC as the cluster to which data is to be imported.
+ Figure 3 Importing data when Logstash is deployed on an ECS
+ - Ensure that the ECS where Logstash is deployed and the cluster to which data is to be imported reside in the same VPC, port 9200 of the ECS security group has been assigned external network access permissions, and an EIP has been bound to the ECS.
- If there are multiple servers in a VPC, you do not need to associate EIPs to other servers as long as one server is associated with an EIP. Switch to the node where Logstash is deployed from the node with which the EIP is associated.
- If a private line or VPN is available, you do not need to associate an EIP.
+
+ - Use PuTTY to log in to the ECS.
For example, data file access_20181029_log is stored in the /tmp/access_log/ path of the ECS, and the data file includes the following data: | All | Heap used for segments | | 18.6403 | MB |
+| All | Heap used for doc values | | 0.119289 | MB |
+| All | Heap used for terms | | 17.4095 | MB |
+| All | Heap used for norms | | 0.0767822 | MB |
+| All | Heap used for points | | 0.225246 | MB |
+| All | Heap used for stored fields | | 0.809448 | MB |
+| All | Segment count | | 101 | |
+| All | Min Throughput | index-append | 66232.6 | docs/s |
+| All | Median Throughput | index-append | 66735.3 | docs/s |
+| All | Max Throughput | index-append | 67745.6 | docs/s |
+| All | 50th percentile latency | index-append | 510.261 | ms |
+
+ - Run the following command to create configuration file logstash-simple.conf in the Logstash installation directory:
cd /<Logstash installation directory>/
+vi logstash-simple.conf
+Input the following content in logstash-simple.conf: input {
+Location of data
+}
+filter {
+Related data processing
+}
+output {
+ elasticsearch{
+ hosts => "<Private network address and port number of the node>"}
+ (Optional) If communication encryption has been enabled on the cluster, you need to add the following configuration:
+ ssl => true
+ ssl_certificate_verification => false
+}
+ - The input parameter indicates the data source. Set this parameter based on the actual conditions. For details about the input parameter and parameter usage, visit the following website: https://www.elastic.co/guide/en/logstash/current/input-plugins.html
- The filter parameter specifies the mode in which data is processed. For example, extract and process logs to convert unstructured information into structured information. For details about the filter parameter and parameter usage, visit the following website: https://www.elastic.co/guide/en/logstash/current/filter-plugins.html
- The output parameter indicates the destination address of the data. For details about the output parameter and parameter usage, visit https://www.elastic.co/guide/en/logstash/current/output-plugins.html. <private network address and port number of a node> refers to the private network address and port number of a node in the cluster.
If the cluster contains multiple nodes, you are advised to replace the value of <Private network address and port number of a node> with the private network addresses and port numbers of all nodes in the cluster to prevent node faults. Use commas (,) to separate the nodes' private network addresses and port numbers. The following is an example:
+hosts => ["192.168.0.81:9200","192.168.0.24:9200"]
+If the cluster contains only one node, the format is as follows:
+hosts => "192.168.0.81:9200"
+
+
+Consider the data files in the /tmp/access_log/ path mentioned in 2 as an example. Assume that data import starts from data in the first row of the data file, the filtering condition is left unspecified (indicating no data processing operations are performed), the private network address and port number of the node in the cluster where data is to be imported are 192.168.0.81 and 9200, respectively, and the name of the target index is myindex. Edit the configuration file as follows, and enter :wq to save the configuration file and exit.
+input {
+ file{
+ path => "/tmp/access_log/*"
+ start_position => "beginning"
+ }
+}
+filter {
+}
+output {
+ elasticsearch {
+ hosts => "192.168.0.81:9200"
+ index => "myindex"
+
+ }
+}
+If the cluster has the security mode enabled, you need to download a certificate first.
+- Download a certificate on the Basic Information page of the cluster.
Figure 4 Downloading a certificate
+ - Store the certificate to the server where Logstash is deployed.
- Modify the logstash-simple.conf configuration file.
Consider the data files in the /tmp/access_log/ path mentioned in step 2 as an example. Assume that data import starts from data in the first row of the data file, the filtering condition is left unspecified (indicating no data processing operations are performed), the public IP address and port number of the jump host are 192.168.0.227 and 9200, respectively. The name of the index for importing data is myindex, and the certificate is stored in /logstash/logstash6.8/config/CloudSearchService.cer. Edit the configuration file as follows, and enter :wq to save the configuration file and exit. input{
+ file {
+ path => "/tmp/access_log/*"
+ start_position => "beginning"
+ }
+}
+filter {
+ }
+output{
+ elasticsearch{
+ hosts => ["https://192.168.0.227:9200"]
+ index => "myindex"
+ user => "admin"
+ password => "******"
+ cacert => "/logstash/logstash6.8/config/CloudSearchService.cer"
+ manager_template => false
+ ilm_enabled => false
+ ssl => true
+ ssl_certificate_verification => false
+ }
+}
+ password: password for logging in to the cluster
+
+
+
+
+ - Run the following command to import the ECS data collected by Logstash to the cluster:
./bin/logstash -f logstash-simple.conf
+ - Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters > Elasticsearch to switch to the Clusters page.
- From the cluster list, locate the row that contains the cluster to which you want to import data and click Access Kibana in the Operation column.
- In the Kibana navigation pane on the left, choose Dev Tools.
- On the Console page of Kibana, search for the imported data.
On the Console page of Kibana, run the following command to search for data. View the search results. If the searched data is consistent with the imported data, the data has been imported successfully.
+GET myindex/_search
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528499141.html b/docs/css/umn/en-us_topic_0000001528499141.html
new file mode 100644
index 00000000..cb0347a3
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528499141.html
@@ -0,0 +1,25 @@
+
+
+Restarting a Cluster
+If a cluster becomes faulty, you can restart it to check if it can run normally.
+ Prerequisites- The target cluster is not frozen and has no task in progress.
- If a cluster is available, ensure that it has stopped processing service requests (such as importing data and searching for data). Otherwise, data may be lost when the cluster is restarted. You are advised to perform this operation during off-peak hours.
+
+ ContextCSS supports quick restart and rolling restart.
+ Quick Restart- All clusters support this function.
- If you select a node type for quick restart, all nodes of the selected type will be restarted together.
- If you select a node name for quick restart, only the specified node will be restarted.
- The cluster is unavailable during quick restart.
+
+ Rolling Restart- Rolling restart is supported only when a cluster has at least three nodes (including master nodes, client nodes, and cold data nodes).
- Rolling restart can be performed only by specifying node types. If you select a node type for rolling restart, the nodes of the selected type will be restarted in sequence.
- During the rolling restart, only the nodes that are being restarted are unavailable and other nodes can run normally.
- When the data volume is large, rolling restart will take a long time.
+
+
+ Quick Restart- Log in to the CSS management console.
- In the navigation tree on the left, select a cluster type. The cluster management list page is displayed.
- In the Operation column of the target cluster, choose More > Restart.
- On the Restart Cluster page, select Quick Restart.
You can quick restart nodes by Node type or Node name. If you select Node type, then you can select multiple node types and perform quick restart at the time. If you select Node name, you can perform quick restart only on one node at a time.
+ - Refresh the page and check the cluster status. During the restart, the cluster status is Processing, and the task status is Restarting. If the cluster status changes to Available, the cluster has been restarted successfully.
+
+ Rolling Restart- Log in to the CSS management console.
- In the navigation tree on the left, select a cluster type. The cluster management list page is displayed.
- In the Operation column of the target cluster, choose More > Restart.
- On the Restart Cluster page, select Rolling Restart.
You can perform rolling restart by Node type. Select specific node types for restart.
+ - Refresh the page and check the cluster status. During the restart, the cluster status is Processing, and the task status is Restarting. If the cluster status changes to Available, the cluster has been restarted successfully.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528499145.html b/docs/css/umn/en-us_topic_0000001528499145.html
new file mode 100644
index 00000000..a7582aff
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528499145.html
@@ -0,0 +1,47 @@
+
+
+High-cardinality Field Histogram Aggregation
+High-cardinality fields are usually used for histogram grouping and aggregation instead of single-point grouping and aggregation. For example, collecting the statistics of logs at a certain period. Assume that the following query statement exists:
+ POST testindex/_search?pretty
+{
+ "size": 0,
+ "aggs": {
+ "avg_score": {
+ "avg": {
+ "field": "score"
+ },
+ "aggs": {
+ "groupbytime": {
+ "date_histogram": {
+ "field": "timestamp",
+ "calendar_interval": "day"
+ }
+ }
+ }
+ }
+ }
+}
+ This query groups the field timestamp using a histogram and calculates the average score. timestamp is a typical high-cardinality field. To use the enhanced aggregation for the preceding query, set parameters as follows:
+ // Configure an index
+"settings" : {
+ "index" : {
+ "search" : {
+ "turbo" : {
+ "enabled" : "true" // Enable optimization
+ }
+ },
+ "sort" : { // Specify a sorting key
+ "field" : [
+ "timestamp"
+ ]
+ }
+ }
+}
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528499153.html b/docs/css/umn/en-us_topic_0000001528499153.html
new file mode 100644
index 00000000..6ad2fcfb
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528499153.html
@@ -0,0 +1,24 @@
+
+
+Storage-Compute Decoupling
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528499157.html b/docs/css/umn/en-us_topic_0000001528499157.html
new file mode 100644
index 00000000..74771be2
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528499157.html
@@ -0,0 +1,122 @@
+
+
+Memory Flow Control
+ContextElasticsearch provides a circuit breaker, which will terminate requests if the memory usage exceeds its threshold. However, Elasticsearch does not check the heap memory usage when an API is called, and does not allow users to configure the threshold for a single request. In this case, memory usage can only be calculated during request processing, which may lead to frequent circuit breaking and cannot avoid heap memory waste. To solve this problem, CSS checks the heap memory usage when receiving REST requests, blocking excess API requests and protecting nodes. You can configure global memory flow control, or configure the request path and heap memory threshold for a specific request path. Before a request is processed, the system checks the configured heap memory threshold. If the threshold is exceeded, the request path will be blocked.
+ - Memory flow control may affect request processing performance.
- If the memory flow control is enabled, some Kibana search requests may fail.
- If memory flow control is enabled in Elasticsearch 5.5.1, _mget requests will be blocked and Kibana access will be abnormal. You can add _mget requests to the request whitelist to avoid this problem.
+
+ The following table describes memory flow control parameters.
+
+ Table 1 Memory flow control parametersParameter
+ |
+Type
+ |
+Description
+ |
+
+
+flowcontrol.memory.enabled
+ |
+Boolean
+ |
+Whether to enable memory flow control. This function is disabled by default. Enabling memory flow control may slightly affect node performance.
+Value: true or false
+Default value: false
+ |
+
+flowcontrol.memory.allow_path
+ |
+List<String>
+ |
+Request path whitelist for memory flow control.
+Whitelisted paths are blocked in memory flow control. Wildcard characters are supported. By default, query APIs controlled by the cluster are not blocked in memory flow control. This prevents the failure to query cluster information when the memory usage reaches the threshold.
+Example:
+- "flowcontrol.memory.allow_path": "/index/_search",
- "flowcontrol.memory.allow_path": "/index*/_search",
- "flowcontrol.memory.allow_path": ["/index/_search", "/index1/_bulk"],
+A maximum of 10 paths can be configured. A path can contain up to 32 characters.
+The default value is null.
+ |
+
+flowcontrol.memory.heap_limit
+ |
+String
+ |
+Maximum global heap memory usage of a node. The value cannot be less than 10% of the heap memory.
+Value range: 10%–100%
+Default value: 90%
+ |
+
+flowcontrol.memory.*.filter_path
+ |
+String
+ |
+Paths under memory flow control.
+The default value is **, indicating all paths. If flowcontrol.memory.heap_limit is configured and flowcontrol.memory.*.filter_path is not, it indicates that all the paths, except those in the whitelist, are under control. The whitelist takes precedence over the single-path rule. If a path is specified in both flowcontrol.memory.allow_path and flowcontrol.memory.*.filter_path, the requests from the path will be allowed.
+For example, if flowcontrol.memory.allow_path and flowcontrol.memory.*.filter_path are both set to abc/_search, then abc/_search will not be under flow control.
+Maximum length: 32 characters
+ |
+
+flowcontrol.memory.*.heap_limit
+ |
+String
+ |
+Heap memory usage threshold of request paths. If the heap memory usage exceeds the threshold, flow control will be triggered.
+Value range: 0–100%
+Default value: 90%
+ |
+
+
+
+
+
+ Procedure- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable memory flow control.
- Enabling memory flow control
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.memory.enabled": true,
+ "flowcontrol.memory.allow_path": "/index/_search",
+ "flowcontrol.memory.heap_limit": "85%"
+ }
+}
+ - Enabling memory flow control for a request path
Configure the heap memory usage threshold for a request path. You can configure the priorities of such threshold rules.
+PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.memory.enabled": true,
+ "flowcontrol.memory": {
+ "flowcontrol_search": {
+ "filter_path": "index1/_search",
+ "heap_limit": "50%"
+ },
+ "flowcontrol_bulk": {
+ "filter_path": "index*/_bulk",
+ "heap_limit": "50%"
+ }
+ }
+ }
+}
+ - Deleting the memory flow control configuration of a request path
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.memory.enabled": true,
+ "flowcontrol.memory": {
+ "flowcontrol_search": {
+ "filter_path": null,
+ "heap_limit": null
+ }
+ }
+ }
+}
+ - Disabling cluster memory flow control
PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.memory.enabled": false
+ }
+}
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528499165.html b/docs/css/umn/en-us_topic_0000001528499165.html
new file mode 100644
index 00000000..15ab497a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528499165.html
@@ -0,0 +1,43 @@
+
+
+Global Path Whitelist for Flow Control
+ContextThe following table describes the global path whitelist parameters for flow control.
+
+ Table 1 Global path whitelist parameters for flow controlParameter
+ |
+Type
+ |
+Description
+ |
+
+
+flowcontrol.path.white_list
+ |
+List<String>
+ |
+Paths that are not under flow control. These paths are not affected by memory flow control, CPU flow control, or one-click blocking; but are under IP address-based flow control.
+A maximum of 10 paths can be configured. A path can contain up to 32 characters.
+This parameter is left blank by default.
+ NOTE: You are advised not to configure this parameter, unless required by plug-ins.
+
+ |
+
+
+
+
+
+ Procedure- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools. Run the following command to configure the global path whitelist for flow control:
PUT _cluster/settings
+{
+ "persistent": {
+ "flowcontrol.path.white_list": "xxxx"
+ }
+}
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528499169.html b/docs/css/umn/en-us_topic_0000001528499169.html
new file mode 100644
index 00000000..d1d7a98a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528499169.html
@@ -0,0 +1,102 @@
+
+
+Other Management APIs
+- Querying the created patterns.
This API is used to query the pattern list and query a specified pattern by name.
+An example request is as follows:
+GET auto_sync/pattern
+GET auto_sync/pattern/{pattern_name}
+The following is an example of the response:
+{
+ "patterns" : [
+ {
+ "name" : "pattern1",
+ "pattern" : {
+ "remote_cluster" : "leader",
+ "remote_index_patterns" : [
+ "log*"
+ ],
+ "local_index_pattern" : "{{remote_index}}-sync",
+ "settings" : { }
+ }
+ }
+ ]
+}
+ - Deleting a created schema.
This API is used to delete a specified pattern.
+An example request is as follows:
+DELETE auto_sync/pattern/{pattern_name}
+ - Obtaining the automatic synchronization status.
This API is used to obtain the synchronization status of matched indexes.
+An example request is as follows:
+GET auto_sync/stats
+The following is an example of the response:
+{
+ "success_count" : 3,
+ "failed_count" : 0,
+ "failed_remote_cluster_state_requests_count" : 0,
+ "last_fail_exception" : { },
+ "last_fail_remote_cluster_requests_exception" : { }
+}
+ - Obtaining the synchronization status of the index that is being synchronized.
An example request is as follows:
+GET {index_name}/sync_stats
+The following is an example of the response:
+{
+ "indices" : {
+ "data1_follower" : {
+ "shards" : {
+ "0" : [
+ {
+ "primary" : false,
+ "total_synced_times" : 27,
+ "total_empty_times" : 25,
+ "total_synced_files" : 4,
+ "total_synced_bytes" : 3580,
+ "total_paused_nanos" : 0,
+ "total_paused_times" : 0,
+ "current" : {
+ "files_count" : 0,
+ "finished_files_count" : 0,
+ "bytes" : 0,
+ "finished_bytes" : 0
+ }
+ },
+ {
+ "primary" : true,
+ "total_synced_times" : 28,
+ "total_empty_times" : 26,
+ "total_synced_files" : 20,
+ "total_synced_bytes" : 17547,
+ "total_paused_nanos" : 0,
+ "total_paused_times" : 0,
+ "current" : {
+ "files_count" : 0,
+ "finished_files_count" : 0,
+ "bytes" : 0,
+ "finished_bytes" : 0
+ }
+ }
+ ]
+ }
+ }
+ }
+}
+ - Changing the synchronization period.
The synchronization period is 30 seconds by default and can be modified.
+An example request is as follows (change the synchronization period to 2 seconds):
+PUT {index_name}/_settings
+{
+ "index.remote_sync.sync_interval": "2s"
+}
+ - Enabling forcible synchronization
By default, the plug-in determines whether to synchronize metadata based on whether the number of documents in the index of the primary cluster changes. If the primary cluster only updates documents and the number of documents remains unchanged, the plug-in does not synchronize the updates to the secondary cluster. The configuration can be modified. After this function is enabled, the index metadata of the primary cluster is forcibly synchronized to the secondary cluster in each synchronization period.
+The following is an example of enabling forcible synchronization:
+PUT _cluster/settings
+{
+ "persistent": {
+ "remote_sync.force_synchronize": true
+ }
+}
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528499181.html b/docs/css/umn/en-us_topic_0000001528499181.html
new file mode 100644
index 00000000..553a8ca5
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528499181.html
@@ -0,0 +1,13 @@
+
+
+Auditing
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528499197.html b/docs/css/umn/en-us_topic_0000001528499197.html
new file mode 100644
index 00000000..2e1514ab
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528499197.html
@@ -0,0 +1,114 @@
+
+
+Checking Index Monitoring Information
+You can check preconfigured index monitoring visualizations on the Dashboard and Visualizations pages of Kibana. You can also customize tables and charts.
+ PrerequisitesA cluster has been created and index monitoring has been enabled.
+
+ Checking Dashboard Charts- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- In the navigation tree on the left, click Dashboard.
- Click [Monitoring] Index Monitoring Dashboard to view the preconfigured dashboard.
Figure 1 Preconfigured dashboard charts
+The preconfigured dashboard displays the number of read and write operations per second in the cluster and the top 10 indexes with the most read and write operations per second.
+
+Table 1 Preconfigured chartsChart Name
+ |
+Description
+ |
+
+
+[monitoring] markdown
+ |
+Markdown chart, which briefly describes the dashboard content.
+ |
+
+[monitoring] Indexing Rate (/s)
+ |
+Number of documents written to a cluster per second.
+ |
+
+[monitoring] Search Rate (/s)
+ |
+Average number of queries per second in a cluster.
+ |
+
+[monitoring] indexing rate of index for top10
+ |
+Top 10 indexes with the most documents written per second.
+ |
+
+[monitoring] search rate of index for top10
+ |
+Top 10 indexes with the most queries per second.
+ |
+
+[monitoring] total docs count
+ |
+Total number of documents in a cluster.
+ |
+
+[monitoring] total docs delete
+ |
+Total number of deleted documents in a cluster.
+ |
+
+[monitoring] total store size in bytes
+ |
+Total storage occupied by documents in a cluster.
+ |
+
+[monitoring] indices store_size for top10
+ |
+Top 10 indexes that occupy the largest storage space.
+ |
+
+[monitoring] indices docs_count for top10
+ |
+Top 10 indexes with the largest number of documents.
+ |
+
+[monitoring] indexing time in millis of index for top10(ms)
+ |
+Top 10 indexes with the longest document write latency in a unit time (ms).
+ |
+
+[monitoring] search query time in millis of index for top10(ms)
+ |
+Top 10 indexes with the longest index query time in a unit time (ms).
+ |
+
+[monitoring] segment count of index for top10
+ |
+Top 10 indexes with the largest number of index segments.
+ |
+
+[monitoring] segment memory in bytes of index for top10
+ |
+Top 10 indexes with the largest heap memory usage of index segments.
+ |
+
+
+
+
+ The index pattern of monitoring-eye-css-* cannot be deleted during index monitoring. Otherwise, the monitoring chart will be abnormal.
+
+
+
+ Customizing Visualizations ChartsThe index monitoring module periodically stores the index/stats information in the monitoring-eys-css index. You can use the Kibana chart function to draw customized charts.
+ The following procedure describes how to check the trend of the document quantity in a chart as an example.
+ - Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- Choose Visualize.
- Click Create visualization and select TSVB.
- Set chart parameters and view the visualizations.
On the Data tab page, index_stats.primaries.docs.count indicates the number of documents in the primary shard. Derivative indicates the difference between aggregation buckets. Set Unit to 1s, visualizing network rates as "per second". Select Positive only to prevent negative numbers after resetting. To sort statistics by index, set Group by to Terms and By to index_stats.index. Statistics will be grouped by index name. Figure 2 TSVB page
+
+To view data in different time segments, set the aggregation interval, or the displayed data will be incomplete. On the Panel options tab page, set Interval to 1m or 30m to adjust the interval of timestamp.
+Figure 3 Setting the interval
+
+
+ Importing Index Monitoring ChartsYou can import or export charts on Kibana. If the index monitoring charts are not displayed, you can import the charts to Kibana again to load the monitoring view.
+ The following describes how to import a chart to Kibana:
+ - Create the monitoring-kibana.ndjson file by referring to kibana-monitor.
- Log in to Kibana and choose Management > Stack Management > Saved objects.
Figure 4 Selecting saved objects
+ - Click Import and upload the monitoring-kibana.ndjson file created in step 1.
Figure 5 Uploading a file
+ - After the upload is complete, click Done. The index monitoring chart is successfully imported.
Figure 6 Successfully importing index monitoring charts
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528499201.html b/docs/css/umn/en-us_topic_0000001528499201.html
new file mode 100644
index 00000000..3a8d3194
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528499201.html
@@ -0,0 +1,170 @@
+
+
+Viewing Basic Cluster Information
+On the Cluster Information page, you can view the information about a cluster, including the private network address, public IP address, version, and node.
+ - Log in to the CSS management console.
- Choose Clusters > Elasticsearch. The cluster list is displayed.
- Click a cluster name to go to the Cluster Information page and view the basic information about the cluster.
+
Table 1 Parameters for configuring basic informationType
+ |
+Parameter
+ |
+Description
+ |
+
+
+Cluster Information
+ |
+Name
+ |
+Cluster name. The name can be customized.
+You can click on the right to change the cluster name.
+ |
+
+ID
+ |
+Unique ID of a cluster, which is automatically generated by the system.
+Each cluster in the same region has a unique ID.
+ |
+
+Version
+ |
+Cluster version information.
+For details about how to upgrade the cluster version, see Upgrading the Cluster Version.
+ |
+
+Cluster Status
+ |
+Current status of a cluster
+ |
+
+Task Status
+ |
+Current task status of a cluster. If no task is in progress, -- is displayed.
+ |
+
+Created
+ |
+Time when a cluster was created
+ |
+
+Cluster Storage Capacity (GB)
+ |
+Storage capacity of a cluster
+ |
+
+Used Cluster Storage (GB)
+ |
+Used storage capacity of a cluster
+ |
+
+Configuration
+ |
+Region
+ |
+Region where a cluster is located
+ |
+
+AZ
+ |
+AZ where a cluster is located
+ |
+
+VPC
+ |
+VPC to which the cluster belongs
+ |
+
+Subnet
+ |
+Subnet to which the cluster belongs
+ |
+
+Security Group
+ |
+Security group to which a cluster belongs.
+To change the security group of a cluster, click Change Security Group on the right.
+ NOTICE: Before changing the security group, ensure that the port 9200 required for service access has been enabled. Incorrect security group configuration may cause service access failures. Exercise caution when performing this operation.
+
+ |
+
+Security Mode
+ |
+Security mode of a cluster.
+- Enabled: The current cluster is a security cluster.
- Disabled: The current cluster is a non-security cluster.
+For details about how to change the security mode of a cluster, see Changing the Security Mode.
+ |
+
+Reset Password
+ |
+This parameter is displayed only for security clusters.
+Click Reset to change the password of the administrator account admin of the security cluster.
+ NOTE: Requirements for administrator passwords:
+ - The password can contain 8 to 32 characters.
- The password must contain at least three of the following character types: uppercase letters, lowercase letters, digits, and special characters. The following special characters are supported: ~!@#$%^&*()-_=+\|[{}];:,<.>/?
- Do not use the administrator name, or the administrator name spelled backwards.
- You are advised to change the password periodically.
+
+ |
+
+Enterprise Project
+ |
+Enterprise project to which a cluster belongs.
+You can click the project name to view the basic information about the enterprise project.
+ |
+
+Public IP Address
+ |
+Public network access information, which is displayed only for clusters in security mode.
+- For a security cluster with public network access enabled, the configured public network address is displayed. You can use this address to access the security cluster from the public network.
- For a security cluster with public network access disabled, -- is displayed.
+When using a public IP address to access a cluster, you are advised to enable access control and configure an access whitelist to improve cluster security. For details about how to configure the public network access, see Accessing a Cluster from a Public Network.
+ |
+
+Access Control
+ |
+Whether to set access control for a cluster. This parameter is displayed only for clusters with public network access enabled.
+- Enabled: Only IP addresses in the whitelist can access the cluster through the public network.
- Disabled: Any IP address can access the cluster through the public network.
+Click Set to configure the access control and the whitelist.
+ |
+
+Bandwidth
+ |
+The bandwidth for public network access. This parameter is displayed only for clusters with public network access enabled.
+Click Edit to change the bandwidth size.
+ |
+
+HTTPS Access
+ |
+Whether to enable the HTTPS access protocol for a cluster.
+- Disabled: The HTTP protocol is used for cluster access.
- Enabled: The HTTPS protocol is used for cluster access. Only security clusters can enable this function. If HTTPS Access is enabled, you can click Download Certificate to obtain the CER security certificate for accessing the security cluster. Currently, the security certificate cannot be used in the public network environment.
+For details about how to change the access mode of a cluster in security mode, see Switching the Protocol of Security Clusters.
+ |
+
+Private IPv4 Address
+ |
+Private IP address and port number of a cluster, which can be used to access the cluster. If the cluster has only one node, the IP address and port number of only one node are displayed, for example, 10.62.179.32:9200. If the cluster has multiple nodes, the IP addresses and port numbers of all nodes are displayed, for example, 10.62.179.32:9200,10.62.179.33:9200.
+ |
+
+Node
+ |
+Node Specifications
+ |
+Specifications of nodes in a cluster
+ |
+
+Node Storage Type
+ |
+Storage capacity and storage type of nodes in a cluster
+ |
+
+Nodes
+ |
+Number of nodes in a cluster
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659081.html b/docs/css/umn/en-us_topic_0000001528659081.html
new file mode 100644
index 00000000..071b4b38
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659081.html
@@ -0,0 +1,52 @@
+
+
+Switching Hot and Cold Data
+CSS provides you with cold data nodes. You can store data that requires query response in seconds on high-performance nodes and store data that requires query response in minutes on cold data nodes with large capacity and low specifications.
+ - When creating a cluster, you need to configure nodes as data nodes. When you enable the cold data node function, data nodes become hot nodes.
- You can enable the cold data node, master node, and client node functions at the same time.
- You can increase nodes and expand storage capacity of cold data nodes. The maximum storage capacity is determined by the node specifications. Local disks do not support storage capacity expansion.
+
+ Hot and Cold Data Node SwitchoverIf you enable cold data nodes when creating a cluster, the cold data nodes are labeled with cold. Other data nodes become hot nodes and are labeled with hot. You can specify indexes to allocate data to cold or hot nodes.
+ You can configure a template to store indices on the specified cold or hot node.
+ The following figure shows this process. Log in to the Kibana Console page of the cluster, modify the template by configuring the index starting with myindex, and store the indexes on the cold node. In this case, the myindex* date is stored on the cold data node by modifying the template.
+ - For the 5.x version, run the following command to create a template:
PUT _template/test
+{
+ "order": 1,
+ "template": "myindex*",
+ "settings": {
+ "index": {
+ "refresh_interval": "30s",
+ "number_of_shards": "3",
+ "number_of_replicas": "1",
+ "routing.allocation.require.box_type": "cold"
+ }
+ }
+}
+ - For 6.x or later versions, run the following command to create a template:
PUT _template/test
+{
+ "order": 1,
+ "index_patterns": "myindex*",
+ "settings": {
+ "refresh_interval": "30s",
+ "number_of_shards": "3",
+ "number_of_replicas": "1",
+ "routing.allocation.require.box_type": "cold"
+ }
+}
+
+ You can perform operations on the created index.
+ PUT myindex/_settings
+ {
+ "index.routing.allocation.require.box_type": "cold"
+ }
+ You can cancel the configurations of hot and cold data nodes.
+ PUT myindex/_settings
+{
+ "index.routing.allocation.require.box_type": null
+ }
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659085.html b/docs/css/umn/en-us_topic_0000001528659085.html
new file mode 100644
index 00000000..90afb1b0
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659085.html
@@ -0,0 +1,40 @@
+
+
+Changing Policies
+You can change any managed index policy. ISM has constraints to ensure that policy changes do not break indexes.
+ If an index is stuck in its current status, never proceeding, and you want to update its policy immediately, make sure that the new policy includes the same status (same name, action, and order) as the old policy. In this case, ISM applies the new policy even if the policy is being executed.
+ If you update the policy without including an identical status, ISM updates the policy only after all actions in the current status finish executing. Alternatively, you can select a specific status in the old policy and have the new policy take effect.
+ To change a policy using Kibana, do the following:
+ - Under Managed Indices, select the indexes to which you want to attach the new policy.
- Click Change policy in the upper right corner. The Choose managed indices page is displayed. Configure parameters required for changing a policy.
+
Table 1 Parameters required for changing a policyParameter
+ |
+Description
+ |
+
+
+Managed indices
+ |
+Select the indexes to which you want to attach the new policy. Multiple indexes can be selected.
+ |
+
+State filters
+ |
+Select an index status. When a status is selected, the new policy is attached to an index in this status.
+ |
+
+New policy
+ |
+Select a new policy.
+ |
+
+
+
+
+ - After configuration is complete, click Change.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659089.html b/docs/css/umn/en-us_topic_0000001528659089.html
new file mode 100644
index 00000000..38e081f3
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659089.html
@@ -0,0 +1,29 @@
+
+
+Enhanced Cluster Features
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659093.html b/docs/css/umn/en-us_topic_0000001528659093.html
new file mode 100644
index 00000000..6f263ac2
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659093.html
@@ -0,0 +1,36 @@
+
+
+Clusters in Security Mode
+When creating an Elasticsearch cluster, you can enable the security mode for it. Identity authentication is required when users access a security cluster. You can also authorize and encrypt security clusters.
+ Identity VerificationTo access a security cluster, you need to enter the username and password. The identity verification is required for the following two types of users:
+ - Administrator: The default administrator username is admin, and the password is the one specified during cluster creation.
- Users: Enter the username and password created through Kibana.
+
+ AuthorizationOn the Kibana console, click Security to control user permissions in Elasticsearch clusters. You can configure hierarchical user permissions by cluster, index, document, and field. For details, see Creating a User and Granting Permissions by Using Kibana.
+ You can add or delete users, and map users to different roles for permissions control.
+ Figure 1 Configuring users
+ You can use role mapping to configure roles and map a user, backend role, and host name to a role.
+ Figure 2 Role mapping
+ You can set permissions for each role to access clusters, indexes and documents and assign Kibana tenants different roles.
+ Figure 3 Configuring role permissions
+ You can set action groups, assign the groups to roles, and configure the roles' permission for accessing indexes and documents.
+ Figure 4 Configuring action groups
+ You can view the parameters of authentication and authorization for the current cluster. You can also run the securityadmin command to modify the configuration.
+ Figure 5 Viewing cluster parameters
+ You can also clear the security cache.
+ Figure 6 Clearing the security cache
+
+ EncryptionWhen key data is transferred between nodes or through the HTTP protocol, SSL/TLS encryption is used to ensure data security.
+ You can perform the preceding functions on Kibana, using .yml files (not recommended), or by calling RESTful APIs. For more information about the security mode, see Security.
+
+ Resetting the Administrator PasswordIf you want to change the administrator password of a security cluster or you have forgotten the password, reset the password.
+ - On the Clusters page, locate the target cluster whose password you want to reset and click the cluster name. The Cluster Information page is displayed.
- In the Configuration area, click Reset next to Reset Password.
- The password can contain 8 to 32 characters.
- The password must contain at least three of the following character types: uppercase letters, lowercase letters, digits, and special characters. The following special characters are supported: ~!@#$%^&*()-_=+\|[{}];:,<.>/?
- Do not use the administrator name, or the administrator name spelled backwards.
- You are advised to change the password periodically.
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659101.html b/docs/css/umn/en-us_topic_0000001528659101.html
new file mode 100644
index 00000000..5e8d001c
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659101.html
@@ -0,0 +1,170 @@
+
+
+Access Logs
+ContextYou can check access logs in either of the following ways:
+ - Enable and check access logs via an independent API. Configure the API parameters to record the access log time and size. The access log content is returned through a REST API.
- Print access logs. Your access logs are printed as files in backend logs.
+ Enabling the access log function may affect cluster performance.
+ The following table describes access log parameters.
+
+ Table 1 Access log parametersParameter
+ |
+Type
+ |
+Description
+ |
+
+
+duration_limit
+ |
+String
+ |
+Duration recorded in an access log.
+Value range: 10 to 120
+Unit: s
+Default value: 30
+ |
+
+capacity_limit
+ |
+String
+ |
+Size of an access log. After access logging is enabled, the size of recorded requests is checked. If the size exceeds the value of this parameter, the access logging stops.
+Value range: 1 to 5
+Unit: MB
+Default value: 1
+ |
+
+
+
+
+ Access logging stops if either duration_limit or capacity_limit reaches the threshold.
+
+
+ Procedure- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable access logs.
- Enabling access logs for all nodes in a cluster
PUT /_access_log?duration_limit=30s&capacity_limit=1mb
+ - Enabling access logs for a node in a cluster
PUT /_access_log/{nodeId}?duration_limit=30s&capacity_limit=1mb
+{nodeId} indicates the ID of the node where you want to enable access logs.
+
+ - Use APIs to check access logs.
+
Example response: {
+ "_nodes" : {
+ "total" : 1,
+ "successful" : 1,
+ "failed" : 0
+ },
+ "cluster_name" : "css-flowcontroller",
+ "nodes" : {
+ "8x-ZHu-wTemBQwpcGivFKg" : {
+ "name" : "css-flowcontroller-ess-esn-1-1",
+ "host" : "10.0.0.98",
+ "count" : 2,
+ "access" : [
+ {
+ "time" : "2021-02-23 02:09:50",
+ "remote_address" : "/10.0.0.98:28191",
+ "url" : "/_access/security/log?pretty",
+ "method" : "GET",
+ "content" : ""
+ },
+ {
+ "time" : "2021-02-23 02:09:52",
+ "remote_address" : "/10.0.0.98:28193",
+ "url" : "/_access/security/log?pretty",
+ "method" : "GET",
+ "content" : ""
+ }
+ ]
+ }
+ }
+}
+
+ Table 2 Response parametersParameter
+ |
+Description
+ |
+
+
+name
+ |
+Node name
+ |
+
+host
+ |
+Node IP address
+ |
+
+count
+ |
+Number of node access requests in a statistical period
+ |
+
+access
+ |
+Details about node access requests in a statistical period For details, see Table 3.
+ |
+
+
+
+
+
+
+Table 3 accessParameter
+ |
+Description
+ |
+
+
+time
+ |
+Request time
+ |
+
+remote_address
+ |
+Source IP address and port number of the request
+ |
+
+url
+ |
+Original URL of the request
+ |
+
+method
+ |
+Method corresponding to the request path
+ |
+
+content
+ |
+Request content
+ |
+
+
+
+
+ - Enable or disable the access log function.
All user access operation can be logged. By default, logs are recorded in the acces_log.log file in the background. The maximum size of a log file is 250 MB, and there can be a maximum of five log files. You can back up access log files to OBS.
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659105.html b/docs/css/umn/en-us_topic_0000001528659105.html
new file mode 100644
index 00000000..3a42febb
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659105.html
@@ -0,0 +1,142 @@
+
+
+Checking the Index Read and Write Traffic
+You can call an API to query the index read and write traffic within a period of time.
+ PrerequisitesA cluster has been created and index monitoring has been enabled.
+
+ Procedure- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- Choose Dev Tools in the navigation pane on the left and run the following commands to query the index read and write traffic:
- Check read and write traffic of all the indexes.
GET /_cat/monitoring
+ - Check read and write traffic of a specific index.
GET /_cat/monitoring/{indexName}
+{indexName} indicates the name of the index whose read and write traffic you want to check.
+ - Check the read and write traffic of indexes for different periods.
GET _cat/monitoring?begin=1650099461000
+GET _cat/monitoring?begin=2022-04-16T08:57:41
+GET _cat/monitoring?begin=2022-04-16T08:57:41&end=2022-04-17T08:57:41
+
+Table 1 Parameter descriptionParameter
+ |
+Mandatory
+ |
+Description
+ |
+
+
+begin
+ |
+No
+ |
+Start time (UTC time) of the monitoring data you want to view.
+Time format: strict_date_optional_time|epoch_millis
+The default start time is five minutes before the current time.
+ |
+
+end
+ |
+No
+ |
+End time (UTC time) of the monitoring data you want to view.
+Time format: strict_date_optional_time|epoch_millis
+The default end time is the current time.
+ |
+
+
+
+
+
+ These parameters cannot be used for system indexes, whose names start with a dot (.).
+
+Information similar to the following is displayed:
+index begin end status pri rep init unassign docs.count docs.deleted store.size pri.store.size delete.rate indexing.rate search.rate
+test 2022-03-25T09:46:53.765Z 2022-03-25T09:51:43.767Z yellow 1 1 0 1 9 0 5.9kb 5.9kb 0/s 0/s 0/s
+
+Table 2 Parameters in the returned informationParameter
+ |
+Description
+ |
+
+
+index
+ |
+Index name
+ |
+
+begin
+ |
+Start time of the monitoring data you queried.
+ |
+
+end
+ |
+End time of the monitoring data you queried.
+ |
+
+status
+ |
+Index status within the queried monitoring interval.
+ |
+
+pri
+ |
+The number of index shards within the queried monitoring interval.
+ |
+
+rep
+ |
+The number of index replicas within the queried monitoring interval.
+ |
+
+init
+ |
+The number of initialized indexes within the queried monitoring interval.
+ |
+
+unassign
+ |
+The number of unallocated indexes within the queried monitoring interval.
+ |
+
+docs.count
+ |
+The number of documents within the queried monitoring interval.
+ |
+
+docs.deleted
+ |
+The number of deleted documents within the queried monitoring interval.
+ |
+
+store.size
+ |
+Index storage size within the queried monitoring interval.
+ |
+
+pri.store.size
+ |
+Size of the primary index shard within the queried monitoring interval.
+ |
+
+delete.rate
+ |
+Number of indexes deleted per second within the queried monitoring interval.
+ |
+
+indexing.rate
+ |
+Number of indexes wrote per second within the queried monitoring interval.
+ |
+
+search.rate
+ |
+Number of indexes queried per second within the queried monitoring interval.
+ |
+
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659117.html b/docs/css/umn/en-us_topic_0000001528659117.html
new file mode 100644
index 00000000..6d195129
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659117.html
@@ -0,0 +1,51 @@
+
+
+Optimizing the Performance of Vector Retrieval
+Optimizing Write Performance
+
+ Optimizing Query Performance
+ - If the off-heap memory required by the vector index exceeds the circuit breaker limit, index entry swap-in and swap-out occur, which affects the query performance. In this case, you can increase the circuit breaker threshold of off-heap memory.
PUT _cluster/settings
+{
+ "persistent": {
+ "native.cache.circuit_breaker.cpu.limit": "75%"
+ }
+}
+ - If the fetch overhead is high, you can configure _source to reduce the fdt file size to reduce the fetch overhead.
PUT my_index
+{
+ "settings": {
+ "index": {
+ "vector": "true"
+ },
+ "index.soft_deletes.enabled": false
+ },
+ "mappings": {
+ "_source": {
+ "excludes": ["my_vector"]
+ },
+ "properties": {
+ "my_vector": {
+ "type": "vector",
+ "dimension": 128,
+ "indexing": true,
+ "algorithm": "GRAPH",
+ "metric": "euclidean"
+ }
+ }
+ }
+}
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659121.html b/docs/css/umn/en-us_topic_0000001528659121.html
new file mode 100644
index 00000000..e171a1db
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659121.html
@@ -0,0 +1,22 @@
+
+
+Enhanced Aggregation
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659125.html b/docs/css/umn/en-us_topic_0000001528659125.html
new file mode 100644
index 00000000..6889bbb5
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659125.html
@@ -0,0 +1,62 @@
+
+
+Grouping and Aggregation of Low-cardinality Fields
+Low-cardinality fields have high data clustering performance when being sorted, which facilitates vectorized optimization. Assume that the following query statement exists:
+ POST testindex/_search
+{
+ "size": 0,
+ "aggs": {
+ "groupby_region": {
+ "terms": {
+ "field": "region"
+ },
+ "aggs": {
+ "groupby_host": {
+ "terms": {
+ "field": "host"
+ },
+ "aggs": {
+ "avg_cpu_usage": {
+ "avg": {
+ "field": "cpu_usage"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+ Assume that the region and host are low-cardinality fields. To use the enhanced aggregation, set the parameters as follows:
+ The clustering key must be a prefix subset of the sorting key.
+
+ // Configure an index
+"settings" : {
+ "index" : {
+ "search" : {
+ "turbo" : {
+ "enabled" : "true" // Enable optimization
+ }
+ },
+ "sort" : { // Specify a sorting key
+ "field" : [
+ "region",
+ "host",
+ "other"
+ ]
+ },
+ "cluster" : {
+ "field" : [ // Specify a clustering key
+ "region",
+ "host"
+ ]
+ }
+ }
+}
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659133.html b/docs/css/umn/en-us_topic_0000001528659133.html
new file mode 100644
index 00000000..a9245d16
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659133.html
@@ -0,0 +1,52 @@
+
+
+Monitoring Metrics of Clusters
+You can use Cloud Eye to monitor cluster metrics of CSS in real time and quickly handle exceptions. For details about Cloud Eye, see the Cloud Eye User Guide.
+ Table 1 lists the metrics supported by CSS.
+
+ Table 1 Supported metricsMetric
+ |
+Description
+ |
+Formula
+ |
+Value Range
+ |
+Monitoring Interval
+ |
+
+
+Disk Usage
+ |
+Calculates the disk usage of a CSS cluster.
+Unit: %
+ |
+Used disk space of a cluster/Total disk space of a cluster
+ |
+0 to 100%
+ |
+1 minute
+ |
+
+Cluster Health Status
+ |
+Measures the health status of a CSS cluster.
+ |
+-
+ |
+Available values are 0, 1, and 2.
+- 0: The cluster is 100% available.
- 1: The data is complete while some replicas are missing. Exceptions may occur because the high availability is compromised. This is a warning that should prompt investigation.
- 2: Data is missing and the cluster fails to work.
+ |
+1 minute
+ |
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659137.html b/docs/css/umn/en-us_topic_0000001528659137.html
new file mode 100644
index 00000000..a0cdc075
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659137.html
@@ -0,0 +1,58 @@
+
+
+Managing Tags
+Tags are cluster identifiers. Adding tags to clusters can help you identify and manage your cluster resources.
+ You can add tags to a cluster when creating the cluster or add them on the details page of the created cluster.
+ If your organization has enabled tag policies for CSS, you must comply with the tag policy rules when creating clusters, otherwise, clusters may fail to be created. Contact the organization administrator to learn more about tag policies.
+ Managing Tags of a New Cluster- Log in to the CSS management console.
- Click Create Cluster in the upper right corner. The Create Cluster page is displayed.
- On the Create Cluster page, set Advanced Settings to Custom. Add tags for a cluster.
You can select a predefined tag and set Tag value for the tag. You can click View Predefined Tag to switch to the TMS management console and view existing tags.
+You can also create new tags by specifying Tag key and Tag value.
+You can add a maximum of 10 tags for a CSS cluster. If the entered tag is incorrect, you can click Delete on the right of the tag to delete the tag.
+
+Table 1 Naming rules for a tag key and valueParameter
+ |
+Description
+ |
+
+
+Tag key
+ |
+Must be unique in a cluster.
+The value cannot contain more than 64 characters.
+It can contain only numbers, letters, and the following special characters: _.:=+-@ The value cannot start or end with a space.
+Cannot be left blank.
+ |
+
+Tag value
+ |
+The value cannot contain more than 64 characters.
+It can contain only numbers, letters, and the following special characters: _.:=+-@ The value cannot start or end with a space.
+Cannot be left blank.
+ |
+
+
+
+
+
+
+ Managing Tags of Existing ClustersYou can modify, delete, or add tags for a cluster.
+ - Log in to the CSS management console.
- On the Clusters page, click the name of a cluster for which you want to manage tags.
The Basic Information page is displayed.
+ - In the navigation pane on the left, choose the Tags tab. You can add, modify, or delete tags.
- View
On the Tags page, you can view details about tags of the cluster, including the number of tags and the key and value of each tag.
+ - Add
Click Add in the upper left corner. In the displayed Add Tag dialog box, enter the key and value of the tag to be added, and click OK.
+ - Modify
You can only change the value of an existing tag.
+In the Operation column of a tag, click Edit. In the displayed Edit Tag page, enter a new tag value and click OK.
+ - Delete
In the Operation column of a tag, click Delete. After confirmation, click Yes on the displayed Delete Tag page.
+
+
+
+ Searching for Clusters by Tag- Log in to the CSS management console.
- On the Clusters page, click Search by Tag in the upper right corner of the cluster list.
- Select or enter the tag key and tag value you want to search for, and click Add to add the tag to the search text box.
You can select a tag key or tag value from their drop-down lists. The system returns a list of clusters that exactly match the tag key or tag value. If you enter multiple tags, the cluster that meets requirements of all the tags will be filtered.
+You can add a maximum of 10 tags at one time.
+ - Click Search.
The system searches for the target cluster by tag key and value.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659141.html b/docs/css/umn/en-us_topic_0000001528659141.html
new file mode 100644
index 00000000..f4fe2478
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659141.html
@@ -0,0 +1,71 @@
+
+
+Low-cardinality and High-cardinality Field Mixing
+In the scenario where low-cardinality and high-cardinality fields are mixed, assume that the following query statement exists:
+ POST testindex/_search
+{
+ "size": 0,
+ "aggs": {
+ "groupby_region": {
+ "terms": {
+ "field": "region"
+ },
+ "aggs": {
+ "groupby_host": {
+ "terms": {
+ "field": "host"
+ },
+ "aggs": {
+ "groupby_timestamp": {
+ "date_histogram": {
+ "field": "timestamp",
+ "interval": "day"
+ },
+ "aggs": {
+ "avg_score": {
+ "avg": {
+ "field": "score"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+ Group the low-cardinality fields and create a histogram using the high-cardinality fields. To use the enhanced aggregation for the preceding query, set the parameters as follows:
+ - A clustering key is the prefix subset of a sorting key.
- High-cardinality fields must be in the sorting key, and high-cardinality fields must follow the last low-cardinality field.
+
+ // Configure an index
+"settings" : {
+ "index" : {
+ "search" : {
+ "turbo" : {
+ "enabled" : "true" // Enable optimization
+ }
+ },
+ "sort" : { // Specify a sorting key
+ "field" : [
+ "region",
+ "host",
+ "timestamp",
+ "other"
+ ]
+ },
+ "cluster" : {
+ "field" : [ // Specify a clustering key
+ "region",
+ "host"
+ ]
+ }
+ }
+}
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659149.html b/docs/css/umn/en-us_topic_0000001528659149.html
new file mode 100644
index 00000000..dad09e1d
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659149.html
@@ -0,0 +1,159 @@
+
+
+Temporary Access Statistics Logs
+ContextYou can check access logs in either of the following ways:
+ - Enable and check access logs via an independent API. Configure the API parameters to record the access log time and size. The access log content is returned through a REST API.
- Print access logs. Your access logs are printed as files in backend logs. This section describes how to temporarily access logs in this mode.
+ When the access log function is enabled or disabled, the parameters involved in the command are as follows:
+
+ Table 1 Access log parametersParameter
+ |
+Type
+ |
+Description
+ |
+
+
+duration_limit
+ |
+String
+ |
+Duration recorded in an access log.
+Value range: 10 to 120
+Unit: s
+Default value: 30
+ |
+
+capacity_limit
+ |
+String
+ |
+Size of an access log. After access logging is enabled, the size of recorded requests is checked. If the size exceeds the value of this parameter, the access logging stops.
+Value range: 1 to 5
+Unit: MB
+Default value: 1
+ |
+
+
+
+
+ Access logging stops if either duration_limit or capacity_limit reaches the threshold.
+
+
+ Procedure- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- In the navigation pane on the left, choose Dev Tools and run commands to enable or disable access logs.
- Enable access logs for all nodes in a cluster.
PUT /_access_log?duration_limit=30s&capacity_limit=1mb
+ - Enable access logs for a node in a cluster.
PUT /_access_log/{nodeId}?duration_limit=30s&capacity_limit=1mb
+{nodeId} indicates the ID of the node where you want to enable access logs.
+
+ - View access logs.
+
- Run the following commands to delete access logs.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659153.html b/docs/css/umn/en-us_topic_0000001528659153.html
new file mode 100644
index 00000000..d825f355
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659153.html
@@ -0,0 +1,217 @@
+
+
+Upgrading the Cluster Version
+Same-version upgrade, cross-engine upgrade, and cross-version upgrade are supported. Same-version upgrade is to upgrade the kernel patch of a cluster to fix problems or optimize performance. Cross-engine upgrade is to upgrade an Elasticsearch cluster to an OpenSearch cluster. Cross-version upgrade is to upgrade the cluster version to enhance functions or incorporate versions.
+ DescriptionPrinciple
+ Nodes in the cluster are upgraded one by one so that services are not interrupted. The upgrade process is as follows: bring a node offline, migrate its data to another node, create a new node of the target version, and mount the NIC ports of the offline node to the new node to retain the node IP address. After a new node is added to the cluster, other nodes will be updated in the same way in sequence. If there is a large amount of data in a cluster, the upgrade duration depends on the data migration duration.
+ Process
+ - Pre-Upgrade Check
- Creating a Snapshot
- Creating an Upgrade Task
+ Version Restrictions
+ The supported target version varies according to the current cluster version. For details, see Table 1.
+ Table 1 Version restrictionsCurrent Version
+ |
+Target Version
+ |
+
+
+Elasticsearch: 6.2.3
+ |
+Elasticsearch: 6.5.4 or 6.8.23
+ |
+
+Elasticsearch: 6.5.4
+ |
+Elasticsearch: 6.8.23
+ |
+
+Elasticsearch: 6.8.23
+ |
+Elasticsearch: 7.6.2 or 7.10.2
+ |
+
+Elasticsearch: 7.1.1
+ |
+Elasticsearch: 7.6.2 or 7.10.2
+ |
+
+Elasticsearch: 7.6.2
+ |
+Elasticsearch: 7.10.2
+ |
+
+Elasticsearch: 7.9.3
+ |
+Elasticsearch: 7.10.2
+ |
+
+Elasticsearch: 7.10.2
+ |
+OpenSearch: 1.3.6
+ |
+
+Note:
+- Elasticsearch 7.6.2 and 7.10.2 are mainstream cluster versions. You are advised to upgrade your clusters to these two versions. The supported target versions are displayed in the drop-down list of Target Image.
- Elasticsearch clusters of version 5.X cannot be upgraded across versions. Elasticsearch clusters of versions 6.2.3 and 6.5.4 can be upgraded to 6.8.23 and then to 7.X.X.
- Currently, only Elasticsearch clusters of version 7.10.2 can be upgraded to OpenSearch clusters of version 1.3.6 across engines.
+ |
+
+
+
+
+
+
+ Constraints- A maximum of 20 clusters can be upgraded at the same time. You are advised to perform the upgrade during off-peak hours.
- Clusters that have ongoing tasks cannot be upgraded.
- Once started, an upgrade task cannot be stopped until it succeeds or fails.
- During the upgrade, nodes are replaced one by one. Requests sent to a node that is being replaced may fail. In this case, you are advised to access the cluster through the VPC Endpoint service or a dedicated load balancer.
- During the upgrade, the Kibana and Cerebro components will be rebuilt and cannot be accessed. Different Kibana versions are incompatible with each other. During the upgrade, you may fail to access Kibana due to version incompatibility. A cluster can be accessed after it is successfully upgraded.
+
+ Pre-Upgrade CheckTo ensure a successful upgrade, you must check the items listed in the following table before performing an upgrade.
+
+ Table 2 Pre-upgrade checklistCheck Item
+ |
+Check Method
+ |
+Description
+ |
+Normal Status
+ |
+
+
+Cluster status
+ |
+System check
+ |
+After an upgrade task is started, the system automatically checks the cluster status. Clusters whose status is green or yellow can provide services properly and have no unallocated primary shards.
+ |
+The cluster status is Available.
+ |
+
+Node quantity
+ |
+System check
+ |
+After an upgrade task is started, the system automatically checks the number of nodes. The total number of data nodes and cold data nodes in a cluster must be greater than or equal to 3 so that services will not be interrupted.
+ |
+The total number of data nodes and cold data nodes in a cluster must be greater than or equal to 3.
+ |
+
+Disk capacity
+ |
+System check
+ |
+After an upgrade task is started, the system automatically checks the disk capacity. During the upgrade, nodes are brought offline one by one and then new nodes are created. Ensure that the disk capacity of all the remaining nodes can process all data of the node that has been brought offline.
+ |
+After a node is brought offline, the remaining nodes can contain all data of the cluster.
+ |
+
+Data backup
+ |
+System check
+ |
+Check whether the maximum number of primary and standby shards of indexes in a cluster can be allocated to the remaining data nodes and cold data nodes. Prevent backup allocation failures after a node is brought offline during the upgrade.
+ |
+Maximum number of primary and standby index shards plus 1 must be less than or equal to the total number of data nodes and cold data nodes before the upgrade.
+ |
+
+Data backup
+ |
+System check
+ |
+Before the upgrade, back up data to prevent data loss caused by upgrade faults. When submitting an upgrade task, you can determine whether to enable the system to check for the backup of all indexes.
+ |
+Check whether data has been backed up.
+ |
+
+Resources
+ |
+System check
+ |
+After an upgrade task is started, the system automatically checks resources. Resources will be created during the upgrade. Ensure that resources are available.
+ |
+Resources are available and sufficient.
+ |
+
+Custom plugins
+ |
+System and manual check
+ |
+Perform this check only when custom plugins are installed in the source cluster. If a cluster has a custom plugin, upload all plugin packages of the target version on the plugin management page before the upgrade. During the upgrade, install the custom plugin in the new nodes. Otherwise, the custom plugins will be lost after the cluster is successfully upgraded. After an upgrade task is started, the system automatically checks whether the custom plugin package has been uploaded, but you need to check whether the uploaded plugin package is correct.
+ NOTE: If the uploaded plugin package is incorrect or incompatible, the plugin package cannot be automatically installed during the upgrade. As a result, the upgrade task fails. To restore a cluster, you can terminate the upgrade task and restore the node that fails to be upgraded by Replacing a Specified Node.
+ After the upgrade is complete, the status of the custom plugin is reset to Uploaded.
+
+ |
+The plugin package of the cluster to be upgraded has been uploaded to the plugin list.
+ |
+
+Custom configurations
+ |
+System check
+ |
+During the upgrade, the system automatically synchronizes the content of the cluster configuration file elasticsearch.yml.
+ |
+Clusters' custom configurations are not lost after the upgrade.
+ |
+
+Non-standard operations
+ |
+Manual check
+ |
+Check whether non-standard operations are contained in the upgrade. Non-standard operations refer to manual operations that are not recorded. These operations cannot be automatically transferred during the upgrade, for example, modification of the Kibana.yml configuration file, system configuration, and route return.
+ |
+Some non-standard operations are compatible. For example, the modification of a security plugin can be retained through metadata, and the modification of system configuration can be retained using images. Some non-standard operations, such as the modification of the kibana.yml file, cannot be retained, and you must back up the file in advance.
+ |
+
+Compatibility check
+ |
+System and manual check
+ |
+After a cross-version upgrade task is started, the system automatically checks whether the source and target versions have incompatible configurations. If a custom plugin is installed for a cluster, the version compatibility of the custom plugin needs to be manually checked.
+ |
+Configurations before and after the cross-version upgrade are compatible.
+ |
+
+
+
+
+
+ Creating an Upgrade Task- Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters. On the cluster list page that is displayed, click the name of a cluster.
- On the displayed basic cluster information page, click Version Upgrade.
- On the displayed page, set upgrade parameters.
+
Table 3 Upgrade parametersParameter
+ |
+Description
+ |
+
+
+Upgrade Type
+ |
+- Same-version upgrade: Upgrade the kernel patch of the cluster. The cluster version number remains unchanged.
- Cross-version upgrade: Upgrade the cluster version.
- Cross-engine upgrade: Upgrade an Elasticsearch cluster to an OpenSearch cluster. Currently, only the Elasticsearch cluster of version 7.10.2 can be upgraded to the OpenSearch cluster of version 1.3.6.
+ |
+
+Target Image
+ |
+Image of the target version. When you select an image, the image name and target version details are displayed.
+The supported target versions are displayed in the drop-down list of Target Image. If the target image cannot be selected, the possible causes are as follows:
+- The current cluster is of the latest version.
- The current cluster is created before 2023 and has vector indexes.
- The new version images have not been added at the current region.
+ |
+
+Agency
+ |
+Select an IAM agency to grant the upgrade permission to the current account.
+If no agency is available, click Create Agency to go to the IAM console and create an agency.
+ NOTE: The selected agency must be assigned the Tenant Administrator or VPC Administrator policy.
+
+ |
+
+
+
+
+ - After setting the parameters, click Submit. Determine whether to enable Check full index snapshot and Perform cluster load detection and click OK.
If a cluster is overloaded, the upgrade task may suspend or fail. Enabling Cluster load detection can effectively avoid failures. If any of the following situations occurs during the detection, wait or reduce the load. If you urgently need to upgrade the version and you have understood the upgrade failure risks, you can disable the Cluster load detection function. The cluster load detection items are as follows:
+- nodes.thread_pool.search.queue < 1000: check whether the maximum number of search queues is less than 1000.
- nodes.thread_pool.write.queue < 200: Check whether the maximum number of write queues is less than 200.
- nodes.process.cpu.percent < 90: Check whether the maximum CPU usage is less than 90%.
- nodes.os.cpu.load_average/Number of CPU cores < 80%: Check whether the ratio of the maximum load to the number of CPU cores is less than 80%.
+ - View the upgrade task in the task list. If the task status is Running, you can expand the task list and click View Progress to view the upgrade progress.
If the task status is Failed, you can retry or terminate the task.
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001528659157.html b/docs/css/umn/en-us_topic_0000001528659157.html
new file mode 100644
index 00000000..bd746f1a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001528659157.html
@@ -0,0 +1,64 @@
+
+
+Synchronizing Specified Indexes
+Synchronize a single index.
+ The request URL and request body parameters are as follows:
+ PUT start_remote_sync
+
+ Table 1 Request body parametersParameter
+ |
+Description
+ |
+
+
+remote_cluster
+ |
+Name of the primary cluster. The default name is leader1. You can change the name by configuring the primary cluster information.
+ |
+
+remote_index
+ |
+Name of the index to be synchronized in the primary cluster
+ |
+
+local_index
+ |
+Name of the index being synchronized to the secondary cluster
+ |
+
+settings
+ |
+Index settings of the index being synchronized
+ |
+
+
+
+
+ After the synchronization function is enabled, indexes in the secondary cluster become read-only and are periodically synchronized with indexes in the primary cluster.
+ The following are two examples:
+ - Synchronize a single index from the primary cluster to the secondary cluster.
PUT start_remote_sync
+{
+ "remote_cluster": "leader1",
+ "remote_index": "data1_leader",
+ "local_index": "data1_follower"
+}
+ - Synchronize a single index from the primary cluster to the secondary cluster and modify the index configurations.
PUT start_remote_sync
+{
+ "remote_cluster": "leader1",
+ "remote_index": "data1_leader",
+ "local_index": "data1_follower",
+ "settings": {
+ "number_of_replicas": 4
+ }
+}
+ The following index configurations cannot be modified:
+ - number_of_shards
- version.created
- uuid
- creation_date
- soft_deletes.enabled
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001533829376.html b/docs/css/umn/en-us_topic_0000001533829376.html
new file mode 100644
index 00000000..f7386c1f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001533829376.html
@@ -0,0 +1,19 @@
+
+
+Context
+Feature DescriptionCSS provides enhanced data import function. It optimizes bulk route, and speeds up processing through indexes and word segmentation, improving import performance and reduces bulk rejection. This function applies to clusters that contain a large number of index shards and text indexes, and have high import throughput.
+
+ ConstraintsCurrently, only Elasticsearch clusters of version 7.10.2 and OpenSearch clusters of version 1.3.6 support the import performance enhancement.
+
+ PrerequisitesAn Elasticsearch cluster of version 7.10.2 or OpenSearch cluster has been created on the CSS console.
+
+ Precautions- After the local shard preferential bulk routing optimization and bulk routing optimization are enabled, data writing is not routed based on IDs, and routing-related functions are restricted. For example, ID-based GET requests may fail. The optimization of local shard preferential bulk routing depends on the random distribution of client bulk requests and the balanced distribution of primary shards.
- If index.native_speed_up (the text index acceleration function) is enabled, index_sorting is not supported.
- Prerequisites for enabling index.native_analyzer:
- The index.native_speed_up function has been enabled.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001533988876.html b/docs/css/umn/en-us_topic_0000001533988876.html
new file mode 100644
index 00000000..2ec9d3a1
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001533988876.html
@@ -0,0 +1,19 @@
+
+
+Enhanced Import Performance
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001534148568.html b/docs/css/umn/en-us_topic_0000001534148568.html
new file mode 100644
index 00000000..fe99ff9a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001534148568.html
@@ -0,0 +1,45 @@
+
+
+Bulk Route Optimization
+According to the default routing rule of Elasticsearch, data in a bulk request is routed to different shards. When massive data is written and a large number of index shards exist, excessive internal requests forwarding may trigger bulk rejection. In a large-scale cluster, the long tail effect causes a high bulk request latency.
+ You can specify the index.bulk_routing configuration item to enable bulk route optimization. This function reduces the requests that need to be internally forwarded. For clusters containing a large number of shards, this function can improve write performance and reduce bulk rejection.
+ Procedure- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools.
- On the Dev Tools page, run the following command:
PUT my_index
+{
+ "settings": {
+ "index.bulk_routing": "local_pack"
+ }
+}
+
+Table 1 Values of index.bulk_routingValue
+ |
+Description
+ |
+
+
+default
+ |
+The default routing mechanism of Elasticsearch is used. Records in a bulk request are split and routed independently.
+ |
+
+pack
+ |
+Data of a single bulk request is randomly routed to the same shard.
+ |
+
+local_pack
+ |
+The data of a single bulk request is routed to the local shard of the data node that receives the bulk request. If the node does not contain the corresponding index shard, the data is randomly routed to another node that contains the index shard.
+ |
+
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001534308508.html b/docs/css/umn/en-us_topic_0000001534308508.html
new file mode 100644
index 00000000..fe77d5f8
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001534308508.html
@@ -0,0 +1,21 @@
+
+
+Bulk Aggregation Optimization
+You can specify the index.aggr_perf_batch_size configuration item to enable or disable batch import optimization. After the batch import function is enabled, documents in bulk requests are written in batches. This function reduces the overhead of memory application, application lock, and other calls, improving data import performance.
+ The value range of index.aggr_perf_batch_size is [1, Integer.MAX_VALUE]. The default value is 1, indicating that the batch import function is disabled. If the value is greater than 1, the batch import function is enabled and the value of MIN(bulk_doc_size, aggr_perf_batch_size) indicates the batch size.
+
+ Procedure- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools.
- On the Dev Tools page, run the following command:
PUT my_index
+{
+ "settings": {
+ "index.aggr_perf_batch_size": "128"
+ }
+}
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001555591537.html b/docs/css/umn/en-us_topic_0000001555591537.html
new file mode 100644
index 00000000..efa91523
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001555591537.html
@@ -0,0 +1,25 @@
+
+
+
+ Accessing an Elasticsearch Cluster
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001562137917.html b/docs/css/umn/en-us_topic_0000001562137917.html
new file mode 100644
index 00000000..60d5425f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001562137917.html
@@ -0,0 +1,75 @@
+
+
+How to access Kibana from outside cloud using ELB?
+OverviewCurrently to access Kibana dashboard of CSS Service, a user has to login to OTC consoleand navigate to Kibana login page.
+
+ To make the access convenient a user can utilize the provided python script which willconfigure the Dedicated Loadbalancer of OTC and a user would be able to access Kibanadashboard with a public IP.
+
+ ELB Configuration ScriptScript to Configure ELB to be able to access CSS Kibana Dashboard in https mode. ThisScript will create a Dedicated Loadbalancer with a HTTPS Listener which will be forwardingthe traffic to CSS nodes at 5601 port in order to access Kibana Dashboard.
+ Download Script
+
+ Installing DependencyThe script depends on otcextensions library.
+ If you already have Python with pip installed, you can simply run:
+ pip install otcextensions
+ - To know more details about using otcextensions library you can check otcextensions docs.
A file called clouds.yaml holds all necessary configuration parameters. The file can beplaced either in the local directory, below the user home directory in .config/openstack orin the system-wide directory /etc/openstack. You may use a second file secure.yaml in thesame directories to extra protect clear-text password credentials. For more details see thesection configuration in the official documentation.
+Minimal sample clouds.yaml file:
+clouds:
+ otc:
+ profile: otc
+ auth:
+ username: '<USER_NAME>'
+ password: '<PASSWORD>'
+ project_name: '<eu-de_project>'
+ # or project_id: '<123456_PROJECT_ID>'
+ user_domain_name: 'OTC00000000001000000xxx'
+ # or user_domain_id: '<123456_DOMAIN_ID>'
+ auth_url: 'https://iam.eu-de.otc.t-systems.com:443/v3'
+With this configuration you can start using the CLI with openstack --os-cloud otc *command* or by export OS_CLOUD=otc; openstack *command*.
+ - Environment variables: Authentication using username/password is often used:
export OS_AUTH_URL=<url-to-openstack-identity>
+export OS_PROJECT_NAME=<project-name>
+export OS_USERNAME=<username>
+export OS_PASSWORD=<password>
+export OS_USER_DOMAIN_NAME=<user-domain-name>
+export OS_IDENTITY_API_VERSION=3
+
+ In addition to that a regular clouds.yaml configuration file can be used.
+ More information is available at:
+ https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html
+
+ Pre-RequisitesThe Script requires ID of a CSS Cluster and Certificate ID for creating a HTTPS listener.
+ - You can get a CSS Cluster ID by visiting the OTC console -> CSS Dashboard ->Clusters page, and click on your CSS Cluster to get its details.
- To learn more about Creating and Getting a TLS Certificate, check ELB User Guide
+ Generating a TSL Certificate with openssl command.
+ openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:4096 -keyout private.key -out certificate.crt
+ When adding the certificate and private key, Certificate Type must be "Server Certificate".
+
+
+ Running The ScriptOnce you have certificate_id and cluster_id, you are ready to run the script.
+
+ List ELB Flavorspython3 script.py elb-flavors
+ This will print the L7 Flavors supported by Dedicated Loadbalancer. To print all types of flavors supported you may add --all argument to the command. But we need only L7 flavor type, that's why --all argument is set as optional.
+
+ Configure ELB python3 script.py elb-configure --cluster-id <cluster_id> --certificate-id <certificate_id>
+ Argument --certificate-id is optional, if it's not provided then Loadbalancer will be configured with HTTP listener.
+ Configure ELB with specific Flavor
+ - By default ELB will be configured with smallest L7 flavor type. But if you want to have some specific flavor of your choice, follow below commands.
- Only L7 flavor type must be used since we are creating HTTPS listener.
+ # Prints list of Loadbalancer flavor Types
+ python3 script.py elb-flavors
+
+ # Run the script
+ python3 script.py elb-configure --cluster-id <cluster_id> --certificate-id <certificate_id> --flavor-id <flavor_id>
+
+ Delete ELB python3 script.py elb-delete <loadbalancer_name_or_id>
+ Delete ELB and Release Public EIP
+ python3 script.py elb-delete <loadbalancer_id> --release-public-ip
+ Please use the elb-delete command with caution.
+
+
+ LoggingWhen you run the script a log file is created with name debug.log where you can find details of all the API requests.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001564706853.html b/docs/css/umn/en-us_topic_0000001564706853.html
new file mode 100644
index 00000000..f26c8b58
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001564706853.html
@@ -0,0 +1,18 @@
+
+
+(Optional) Authorizing CSS to Use SMN
+Scenario DescriptionTo use the OpenDistro alarm plugin (opendistro_alerting), authorize your Elasticsearch cluster to use SMN to send notifications. For details about how to use the OpenDistro alarm plugin, see Configuring SMN Alarms.
+ Service authorization is to delegate CSS to use other cloud resources. For example, you can authorize CSS to use SMN to send notifications.
+
+ Constraints and LimitationsOnly the SMN service can be authorized.
+
+ Procedure- Log in to the CSS management console as an administrator with IAM permissions.
- In the navigation pane, choose Service Authorization.
- On the Service Authorization page, click Create Agency. In the dialog box displayed, confirm that the agency is successfully created.
- If an agency has been created, "css_smn_agency exist, no need to created." is displayed in the upper right corner.
- If you do not have the creation permission, a message indicating that the current user does not have the permission and you need to check the account permission on IAM is displayed in the upper right corner.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001564906577.html b/docs/css/umn/en-us_topic_0000001564906577.html
new file mode 100644
index 00000000..0d4c8600
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001564906577.html
@@ -0,0 +1,148 @@
+
+
+Configuring SMN Alarms
+Scenario DescriptionBy default, the open-source OpenDistro alarm plugin (opendistro_alerting) is integrated into CSS to send notifications when data meets specific conditions. This plugin consists of three components: Dashboard, Monitors, and Destinations. CSS integrates the SMN service in the Destinations component and can send alarm messages only through the SMN service as the destination.
+ This section describes how to use the OpenDistro alarm plugin to configure SMN alarms for Elasticsearch clusters in Kibana.
+ For details about the official guide of the open-source alarm plug-in Opendistro Alerting, visit OpenDistro-Monitors.
+
+
+ Constraints and LimitationsThe open-source OpenDistro alarm plugin is installed on Elasticsearch clusters of the versions 7.1.1, 7.6.2, and 7.10.2 by default.
+
+
+ Procedure- Log in to the CSS management console.
- On the Cluster Management > Elasticsearch page, select the target cluster and click Access Kibana in the Operation column.
- On the Kibana page, choose Open Distro for Elasticsearch > Alerting in the navigation pane on the left.
- Create an SMN destination to send alert messages.
- On the Alerting page, click the Destinations tab and click Add destination to configure destination information.
+
Table 1 Destinations parameter descriptionParameter
+ |
+Description
+ |
+
+
+Name
+ |
+User-defined destination name
+ |
+
+Type
+ |
+Retain the default value SMN.
+ |
+
+Topic
+ |
+Select the SMN topic you have created for sending alarm messages.
+ NOTE: For the Elasticsearch cluster of version 7.1.1, you need to manually enter the topic name. Ensure that the topic name is the same as that in the SMN service.
+
+ |
+
+
+
+
+Figure 1 Add destination
+ - Click Create to return to the destination list. If the created SMN destination is displayed in the list, the creation is complete.
Figure 2 Destination list
+
+ - Create a monitoring task and configure the alarm triggering condition and monitoring frequency.
- Click the Monitors tab on the Alerting page and click Create monitors to configure monitoring information.
+
Table 2 Monitor parametersParameter
+ |
+Description
+ |
+
+
+Monitor name
+ |
+User-defined monitor name
+ |
+
+Monitor state
+ |
+Monitoring status. You are advised to keep this function enabled.
+ |
+
+Method of definition
+ |
+Select a method to define monitoring. You are advised to use Define using extraction query.
+- Define using visual graph: use visualized query statement
- Define using extraction query: use specific query statement
+ |
+
+Index
+ |
+Index to be monitored
+ |
+
+Time field
+ |
+When Define using visual graph is selected, select a time field and define counting parameters such as count.
+ |
+
+Frequency
+ |
+Select the monitoring frequency and set the monitoring interval. The options include:
+- By interval
- Daily
- Weekly
- Monthly
- Custom cron expression
+ |
+
+
+
+
+ - Click Create. The Create trigger page is displayed.
- On the Create trigger page, set the alarm triggering conditions and actions to be triggered.
+
Table 3 Trigger parametersParameter
+ |
+Description
+ |
+
+
+Trigger name
+ |
+User-defined trigger name
+ |
+
+Severity level
+ |
+Sensitivity of a trigger, that is, the number of alarms that are triggered before an alarm message is sent. 1 indicates the highest sensitivity.
+ |
+
+Trigger condition
+ |
+Trigger condition. An alarm is triggered when the trigger condition is hit.
+ |
+
+Action name
+ |
+Name of a trigger action
+ |
+
+Destination
+ |
+Select the SMN destination created in section 4.
+ |
+
+Message subject
+ |
+Title of the alarm message. This parameter is required only when Elasticsearch clusters of version 7.10.2 is used.
+ |
+
+Message
+ |
+Body of an alarm message. By default, the subject and body are defined when the destination is an email.
+ |
+
+Action throttling
+ |
+Message sending frequency. It limits the number of notification messages can be received in a specified period.
+For example, if this parameter is set to 10 minutes, SMN sends only one alarm notification in the next 10 minutes even if the trigger condition is hit for multiple times. After 10 minutes, SMN sends another alarm notification if the alarm condition is met.
+ |
+
+
+
+
+Figure 3 Setting the destination of a trigger action
+ - Click Send test message. If a subscriber receives an email, as shown in Figure 5, the trigger is configured successfully.
Figure 4 Sending test messages
+Figure 5 Email notification
+ - Click Create to return to the Monitor details page.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001583146906.html b/docs/css/umn/en-us_topic_0000001583146906.html
new file mode 100644
index 00000000..404c4cce
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001583146906.html
@@ -0,0 +1,14 @@
+
+
+Deleting a Snapshot
+If you no longer need a snapshot, delete it to release storage resources. If the automatic snapshot creation function is enabled, snapshots that are automatically created cannot be deleted manually, and the system automatically deletes these snapshots on the half hour after the time specified by Retention Period (days). If you disable the automatic snapshot creation function while retaining the automated snapshots, then you can manually delete them later. If you do not manually delete the automatically created snapshots and enable the automatic snapshot creation function again, then all snapshots with Snapshot Type set to Automated in the snapshot list of the cluster can only be automatically deleted by the system.
+ After a snapshot is deleted, its data cannot be restored. Exercise caution when deleting a snapshot.
+
+ - In the snapshot list, locate the snapshot that you want to delete.
- Click Delete in the Operation column. In the dialog box that is displayed, confirm the snapshot information and click OK.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001583300810.html b/docs/css/umn/en-us_topic_0000001583300810.html
new file mode 100644
index 00000000..d81acf35
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001583300810.html
@@ -0,0 +1,29 @@
+
+
+Restoring Data
+You can use existing snapshots to restore the backup index data to a specified cluster.
+ PrerequisitesTo use the function of creating or restoring snapshots, the account or IAM user logging in to the CSS management console must have both of the following permissions:
+ - Tenant Administrator for project OBS in region Global service
- CSS Administrator in the current region
+
+ Precautions- Cluster snapshots will increase the CPU usage and disk I/O. You are advised to take cluster snapshots during off-peak hours.
- If snapshots have been stored in the OBS bucket, the OBS bucket cannot be changed. You can disable the snapshot function, enable the snapshot function, and specify a new OBS bucket. After you disable the snapshot function, you cannot use previously created snapshots to restore the cluster.
- If a cluster is in the Unavailable status, you can use the cluster snapshot function only to restore clusters and view existing snapshot information.
- During backup and restoration of a cluster, you can perform only certain operations, including scaling out, accessing Kibana, viewing metric, and deleting other snapshots of clusters. However, you cannot perform the following operations: restarting or deleting the cluster, deleting a snapshot that is in the Creating or Restoring status, and creating or restoring another snapshot. If a snapshot is being created or restored for a cluster, any automatic snapshot creation task initiated for the cluster will be canceled.
- Cluster data cannot be queried during snapshot restoration.
- If you restore a CSS cluster snapshot to another cluster, indexes with the same name in the destination cluster will be overwritten. If the snapshot and the destination cluster use different shards, the indexes with the same name will not be overwritten.
- The version of the destination cluster used for restoration must be the same as or higher than that of the source cluster.
+
+ Restoring DataYou can use snapshots whose Snapshot Status is Available to restore cluster data. The stored snapshot data can be restored to other clusters.
+ Restoring data will overwrite current data in clusters. Therefore, exercise caution when restoring data.
+ - In the Snapshots area, locate the row that contains the snapshot you want to restore and click Restore in the Operation column.
- On the Restore page, set restoration parameters.
Index: Enter the name of the index you want to restore. If you do not specify any index name, data of all indexes will be restored. The value can contain 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?) are not allowed. You can use the asterisk (*) to match multiple indexes. For example, index* indicates that all indexes with the prefix index in snapshots are restored.
+Rename Pattern: Enter a regular expression. Indexes that match the regular expression are restored. The default value index_(.+) indicates restoring data of all indexes. The value contains 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?,) are not allowed.
+Rename Replacement: Enter the index renaming rule. The default value restored_index_$1 indicates that restored_ is added in front of the names of all restored indexes. The value contains 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?,) are not allowed.
+ The Rename Pattern and Rename Replacement take effect only when they are configured at the same time.
+
+Cluster: Select the cluster that you want to restore. You can select the current cluster or others. However, you can only restore the snapshot to clusters whose status is Available. If the status of the current cluster is Unavailable, you cannot restore the snapshot to the current cluster. When you restore data to another cluster, the version of the target cluster must be later than or equal to that of the current cluster. If the target cluster you selected has an index with the same name as the original cluster, data in the index will be overwritten after the restoration. Exercise caution when performing this operation.
+Overwrite Index Shards of the Buckets with the Same Name in the Target Cluster: By default, the shards are not overwritten. Data is restored using snapshots by overwriting the snapshot files. After the index with the same name in the target cluster is overwritten, the index data in the target cluster may be lost. Exercise caution when performing this operation.
+Figure 1 Restoring a snapshot
+ - Click OK. If restoration succeeds, Task Status of the snapshot in the snapshot list will change to Restoration succeeded, and the index data is generated again according to the snapshot information.
Figure 2 Successful restoration
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001583460750.html b/docs/css/umn/en-us_topic_0000001583460750.html
new file mode 100644
index 00000000..bda17c91
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001583460750.html
@@ -0,0 +1,67 @@
+
+
+Managing Automatic Snapshot Creation
+Snapshots are automatically created at a specified time according to the rules you create. You can enable or disable the automatic snapshot creation function and set the automatic snapshot creation policy.
+ PrerequisitesTo use the function of creating or restoring snapshots, the account or IAM user logging in to the CSS management console must have both of the following permissions:
+ - Tenant Administrator for project OBS in region Global service
- CSS Administrator in the current region
+
+ Precautions- When creating a backup for the first time, you are advised to back up data of all indexes.
- Cluster snapshots will increase the CPU usage and disk I/O. You are advised to take cluster snapshots during off-peak hours.
- Before creating a snapshot, you need to perform basic configurations, including configuring the OBS bucket for storing snapshots and IAM agency used for security authentication.
- If there are available snapshots in the snapshot list when you configure the OBS bucket for storing cluster snapshots for the first time, you cannot change the bucket for snapshots that are subsequently created automatically or manually. Exercise caution when you configure the OBS bucket.
- If snapshots have been stored in the OBS bucket, the OBS bucket cannot be changed. You can disable the snapshot function, enable the snapshot function, and specify a new OBS bucket. After you disable the snapshot function, you cannot use previously created snapshots to restore the cluster.
- If a cluster is in the Unavailable status, you can use the cluster snapshot function only to restore clusters and view existing snapshot information.
- During backup and restoration of a cluster, you can perform only certain operations, including scaling out, accessing Kibana, viewing metric, and deleting other snapshots of clusters. However, you cannot perform the following operations: restarting or deleting the cluster, deleting a snapshot that is in the Creating or Restoring status, and creating or restoring another snapshot. If a snapshot is being created or restored for a cluster, any automatic snapshot creation task initiated for the cluster will be canceled.
- The first snapshot of a cluster is a full snapshot, and subsequent snapshots are incremental snapshots. CSS snapshot files depend on each other.
+
+ Managing Automatic Snapshot Creation- In the CSS navigation pane on the left, click Clusters.
- On the Clusters page that is displayed, click the name of the target cluster. In the navigation pane on the left, choose Cluster Snapshots.
- On the displayed Cluster Snapshots page, click the icon to the right of Cluster Snapshot to enable the cluster snapshot function.
- Enable the cluster snapshot function. OBS buckets and IAM agencies are automatically created to store snapshots. The automatically created OBS bucket and IAM agency are displayed on the page. You can also click
on the right of Basic Configuration to edit the configuration. To ensure the security of snapshot data, you can select a key to encrypt the snapshot.
+Table 1 Cluster snapshot parameterParameter
+ |
+Description
+ |
+
+
+OBS bucket
+ |
+Select an OBS bucket for storing snapshots from the drop-down list box. You can also click Create Bucket on the right to create an OBS bucket. For details, see Creating a Bucket.
+The created or existing OBS bucket must meet the following requirements:
+- Storage Class is Standard or Warm.
- Region must be the same as that of the created cluster.
+ |
+
+Backup Path
+ |
+Storage path of the snapshot in the OBS bucket.
+The backup path configuration rules are as follows: - The backup path cannot contain the following characters: \:*?"<>|
- The backup path cannot start with a slash (/).
- The backup path cannot start or end with a period (.).
- The backup path cannot contain more than 1,023 characters.
+
+ |
+
+IAM Agency
+ |
+IAM agency authorized by the current account for CSS to access or maintain data stored in OBS You can also click Create IAM Agency on the right to create an IAM agency. For details, see Creating an Agency.
+The created or existing IAM agency must meet the following requirements:
+- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- The agency must have the Tenant Administrator permission for the OBS(S3) project in OBS(S3).
+ |
+
+Snapshot Encryption
+ |
+Whether to enable the snapshot encryption function. Enabling the snapshot encryption function ensures the security of your snapshot data.
+After the snapshot encryption function is enabled, select a key from the Key Name drop-down list. If no key is available, click Create/View Key to go to the KMS management console and create or modify a key. For details, see Creating a CMK.
+- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
- If a snapshot has been stored in the OBS bucket, you cannot modify the parameters used for encrypting the snapshot.
- If the key used for encryption is in the Pending deletion or disable status, you cannot perform backup and restoration operations on the cluster. Specifically, you cannot create new snapshots for the cluster, or use existing snapshots to restore clusters. In this case, switch to the KMS management console and change the status of the target key to enable so that backup and restore operations are allowed on the cluster.
- If you delete the key used for encryption, you cannot perform backup and restore operations on the cluster. In addition, you cannot restore the deleted key. Therefore, exercise caution when deleting a key. If the key is deleted or is in the Pending deletion or disable state, automatic snapshot creation is allowed based on the configured snapshot policy. However, all automatic snapshot creation tasks will fail, and the failed tasks are displayed in the failed task list in the Failed Tasks dialog box. In such scenario, you are advised to disable the automatic snapshot creation function.
+ |
+
+
+
+
+Figure 1 Edit Basic configuration
+ - Enable the automatic snapshot creation function. The Configure Automatic Snapshot Creation dialog box is displayed. If the automatic snapshot creation function is enabled, you can click
on the right of Automatic Snapshot Creation to modify the snapshot policy.
+Figure 2 Automatic snapshot creation
+ - Click OK to save the snapshot policy.
Snapshots that are automatically created according to the snapshot policy are displayed in the snapshot list, along with manually created snapshots. You can distinguish them by the Snapshot Type setting. In the upper right corner of the snapshot list, enter the keyword of the snapshot name or snapshot ID to search for the desired snapshots.
+Figure 3 Automatic snapshot creation
+ - (Optional) Disable the automatic snapshot creation function.
After you disable the automatic snapshot creation function, the system stops automatic creation of snapshots. If the system is creating a snapshot based on the automatic snapshot creation policy and the snapshot is not yet displayed in the snapshot list, you cannot disable the automatic snapshot creation function. In this case, if you click the button next to Automatic Snapshot Creation, a message is displayed, indicating that you cannot disable the function. You are advised to disable the function after the system completes automatic creation of the snapshot, and the created snapshot is displayed in the snapshot list.
+When disabling the automatic snapshot creation function, you can choose whether to delete the snapshots that have been automatically created by selecting Delete automated snapshots in the displayed dialog box. By default, automatically created snapshots are not deleted.
+- If you do not select Delete automated snapshots, automatically created snapshots are not deleted when you disable the automatic snapshot creation function. You can manually delete them later. For details, see Deleting a Snapshot. If you do not manually delete the automatically created snapshots and enable the automatic snapshot creation function again, then all snapshots with Snapshot Type set to Automated in the snapshot list of the cluster can only be automatically deleted by the system. The system automatically deletes snapshots based on the policy configured when the automatic snapshot creation function is enabled. For example, if the number of retained snapshots is set to 10 in this policy and more than 10 snapshots are created, the system automatically deletes the excess snapshots on the half hour.
- If you select Delete automated snapshots, all snapshots with Snapshot Type set to Automated in the snapshot list will be deleted when you disable the automatic snapshot creation function.
+ If snapshots are disabled, existing snapshots will not be automatically deleted. If you need to delete the snapshots, manage the bucket that stores snapshots on the OBS console.
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001583669884.html b/docs/css/umn/en-us_topic_0000001583669884.html
new file mode 100644
index 00000000..060582c0
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001583669884.html
@@ -0,0 +1,154 @@
+
+
+Viewing Basic Information About an Opensearch Cluster
+On the basic information page of an Opensearch cluster, you can view the private network address, public network address, version, and node of the cluster.
+ - Log in to the CSS management console.
- Choose . The cluster list page is displayed.
- Click a cluster name to go to the Cluster Information page and view the basic information about the cluster.
+
Table 1 Basic informationType
+ |
+Parameter
+ |
+Description
+ |
+
+
+Cluster Information
+ |
+Name
+ |
+Cluster name. The name can be customized.
+You can click on the right to change the cluster name.
+ |
+
+ID
+ |
+Unique ID of a cluster, which is automatically generated by the system.
+Each cluster in the same region has a unique ID.
+ |
+
+Version
+ |
+Cluster version information.
+ |
+
+Cluster Status
+ |
+Current status of a cluster
+ |
+
+Task Status
+ |
+Current task status of a cluster. If no task is in progress, -- is displayed.
+ |
+
+Created
+ |
+Time when a cluster was created
+ |
+
+Cluster Storage Capacity (GB)
+ |
+Storage capacity of a cluster
+ |
+
+Used Cluster Storage (GB)
+ |
+Used storage capacity of a cluster
+ |
+
+Configuration
+ |
+Region
+ |
+Region where a cluster is located
+ |
+
+AZ
+ |
+AZ where a cluster is located
+ |
+
+VPC
+ |
+VPC to which the cluster belongs
+ |
+
+Subnet
+ |
+Subnet to which the cluster belongs
+ |
+
+Security Group
+ |
+Security group to which a cluster belongs.
+To change the security group of a cluster, click Change Security Group on the right.
+ NOTICE: Before changing the security group, ensure that the port 9200 required for service access has been enabled. Incorrect security group configuration may cause service access failures. Exercise caution when performing this operation.
+
+ |
+
+Security Mode
+ |
+Security mode of a cluster.
+- Enabled: The current cluster is a security cluster.
- Disabled: The current cluster is a non-security cluster.
+ |
+
+Reset Password
+ |
+This parameter is displayed only for security clusters.
+Click Reset to change the password of the administrator account admin of the security cluster.
+ NOTE: Requirements for administrator passwords:
+ - The password can contain 8 to 32 characters.
- The password must contain at least three of the following character types: uppercase letters, lowercase letters, digits, and special characters. The following special characters are supported: ~!@#$%^&*()-_=+\|[{}];:,<.>/?
- Do not use the administrator name, or the administrator name spelled backwards.
- You are advised to change the password periodically.
+
+ |
+
+Enterprise Project
+ |
+Enterprise project to which a cluster belongs.
+You can click the project name to view the basic information about the enterprise project.
+ |
+
+Public IP Address
+ |
+Public network access information, which is displayed only for clusters in security mode.
+- For a security cluster with public network access enabled, the configured public network address is displayed. You can use this address to access the security cluster from the public network.
- For a security cluster with public network access disabled, -- is displayed.
+When using a public IP address to access a cluster, you are advised to enable access control and configure an access whitelist to improve cluster security. For details about how to configure the public network access, see Accessing a Cluster from a Public Network.
+ |
+
+HTTPS Access
+ |
+Indicates whether to enable the HTTPS access protocol for a cluster.
+- Disabled: The HTTP protocol is used for cluster access.
- Enabled: The HTTPS protocol is used for cluster access. Only security clusters can enable this function. If HTTPS Access is enabled, you can click Download Certificate to obtain the CER security certificate for accessing the security cluster. Currently, the security certificate cannot be used in the public network environment.
+ |
+
+Private IPv4 Address
+ |
+Private IP address and port number of a cluster, which can be used to access the cluster. If the cluster has only one node, the IP address and port number of only one node are displayed, for example, 10.62.179.32:9200. If the cluster has multiple nodes, the IP addresses and port numbers of all nodes are displayed, for example, 10.62.179.32:9200,10.62.179.33:9200.
+ |
+
+Node
+ |
+Node Specifications
+ |
+Specifications of nodes in a cluster
+ |
+
+Node Storage Type
+ |
+Storage capacity and storage type of nodes in a cluster
+ |
+
+Nodes
+ |
+Number of nodes in a cluster
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001583669892.html b/docs/css/umn/en-us_topic_0000001583669892.html
new file mode 100644
index 00000000..a468c2c0
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001583669892.html
@@ -0,0 +1,22 @@
+
+
+Accessing a Cluster
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001583989096.html b/docs/css/umn/en-us_topic_0000001583989096.html
new file mode 100644
index 00000000..49938cba
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001583989096.html
@@ -0,0 +1,28 @@
+
+
+Managing Clusters
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001584149004.html b/docs/css/umn/en-us_topic_0000001584149004.html
new file mode 100644
index 00000000..4c0fdb06
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001584149004.html
@@ -0,0 +1,340 @@
+
+
+Creating a Cluster
+This section describes how to create an OpenSearch cluster.
+ Public IP address access and Kibana public access can be used only after security mode is enabled.
+
+ Context- When creating a cluster, the number of nodes that can be added varies according to the node type. For details, see Table 1.
+
Table 1 Number of nodes in different typesNode Type
+ |
+Number
+ |
+
+
+ess
+ |
+ess: 1-32
+ |
+
+ess, ess-master
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ |
+
+ess, ess-client
+ |
+ess: 1-32
+ess-client: 1-32
+ |
+
+ess, ess-cold
+ |
+ess: 1-32
+ess-cold: 1-32
+ |
+
+ess, ess-master, ess-client
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-client: 1-32
+ |
+
+ess, ess-master, ess-cold
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-cold: 1-32
+ |
+
+ess, ess-client, ess-cold
+ |
+ess: 1-32
+ess-client: 1-32
+ess-cold: 1-32
+ |
+
+ess, ess-master, ess-client, ess-cold
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-client: 1-32
+ess-cold: 1-32
+ |
+
+Details about the four node types: - ess: the default node type that is mandatory for cluster creation. The other three node types are optional.
- ess-master: master node
- ess-client: client node
- ess-cold: cold data node
+
+ |
+
+
+
+
+
+
+ Procedure- Log in to the CSS management console.
- In the upper right corner of the page, click Create Cluster. The Create page is displayed.
- Specify Region and AZ.
+
Table 2 Parameter description for Region and AZParameter
+ |
+Description
+ |
+
+
+Region
+ |
+Select a region for the cluster from the drop-down list on the right. Currently, only eu-de and eu-nl are supported.
+ |
+
+AZ
+ |
+Select AZs associated with the cluster region.
+You can select a maximum of three AZs. For details, see Deploying a Cross-AZ Cluster.
+ |
+
+
+
+
+ - Set basic information about the cluster. Specifically, set Version and Name.
+
Table 3 Basic parametersParameter
+ |
+Description
+ |
+
+
+Type
+ |
+Select OpenSearch
+ |
+
+Version
+ |
+Currently, the versions 1.3.6 and 2.11.0 are supported.
+ |
+
+Name
+ |
+Cluster name, which contains 4 to 32 characters. Only letters, numbers, hyphens (-), and underscores (_) are allowed and the value must start with a letter.
+ NOTE: After a cluster is created, you can modify the cluster name as required. Click the name of a cluster to be modified. On the displayed Basic Information page, click next to the cluster name. After the modification is completed, click to save the modification. If you want to cancel the modification, click .
+
+ |
+
+
+
+
+ - Set host specifications of the cluster.
+
Table 4 Specification parametersParameter
+ |
+Description
+ |
+
+
+Nodes
+ |
+Number of nodes in a cluster. Select a number from 1 to 32. You are advised to configure three or more nodes to ensure high availability of the cluster.
+- If neither a master node nor client node is enabled, the nodes specified by this parameter are used to serve as both the master node and client node. Nodes provide the cluster management, data storage, cluster access, and data analysis functions. To ensure data stability in a cluster, you are advised to set this parameter to a value no less than 3.
- If only the master node function is enabled, nodes specified by this parameter are used to store data and provide functions of client nodes.
- If both the master and client node functions are enabled, the nodes specified by this parameter are only used for storing data.
- If only the client node function is enabled, nodes specified by this parameter are used to store data and provide functions of the master node.
+ |
+
+CPU Architecture
+ |
+Support x86. The supported type is determined by the actual regional environment.
+ |
+
+Node Specifications
+ |
+Specifications of nodes in a cluster. You can select a specification as required. Each cluster supports only one specification.
+After you select a node specification, the CPU and memory corresponding to the current specification are displayed below the parameter. For example, if you select css.medium.8, then 1 vCPUs | 8 GB will be displayed, indicating that the node flavor you select contains one vCPU and 8 GB memory.
+ |
+
+Node Storage Type
+ |
+In the current version, the following options are available: High I/O and Ultra-high I/O.
+ |
+
+Node Storage Capacity
+ |
+Storage space. Its value varies with node specifications.
+The node storage capacity must be a multiple of 20.
+ |
+
+Disk Encryption
+ |
+If you select this option, the nodes in the cluster you create will use encrypted disks to protect data. By default, this option is not selected. Note that you cannot modify this setting after the cluster is created. Therefore, exercise caution when performing the setting.
+After you select this option, you need to select an available key from the Key Name drop-down list. If no key is available, click Create/View Key to go to the KMS management console and create or modify a key. For details, see Creating a CMK.
+Enabling disk encryption has no impact on your operations on a cluster (such as accessing the cluster and importing data to the cluster). However, after you enable disk encryption, operation performance deteriorates by about 10%.
+ NOTE: - If the cluster is in the Available status and the key used for disk encryption is in the Pending deletion or Disable status or has been deleted after a cluster is created, cluster scale-out is not allowed and the key cannot be used to create new clusters. You can still restart the cluster, create snapshots, restore the cluster, and import data to the cluster.
- After a cluster is created, do not delete the key used by the cluster. Otherwise, the cluster will become unavailable.
- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
+
+ |
+
+Master node
+ |
+The master node manages all nodes in a cluster. If more than 20 nodes are required to store and analyze a large amount of data, you are advised to enable the master node to ensure cluster stability. Otherwise, you are advised to set only the Nodes parameter and use the nodes as both master and client nodes.
+After enabling the master node, specify Node Specifications, Nodes, and Node Storage Type. The value of Nodes must be an odd number greater than or equal to 3. Up to nine nodes are supported. The value of Node Storage Capacity is fixed. You can select a storage type as required.
+ |
+
+Client node
+ |
+The client node allows clients to access clusters and analyze data. If more than 20 nodes are required to store and analyze a large amount of data, you are advised to enable the client node to ensure cluster stability. Otherwise, you are advised to set only the Nodes parameter and use the nodes as both master and client nodes.
+After enabling the client node, specify Node Specifications, Nodes and Node Storage Type. The value of Nodes ranges from 1 to 32. The value of Node Storage Capacity is fixed. You can select a storage type as required.
+ |
+
+Cold data node
+ |
+The cold data node is used to store historical data, for which query responses can be returned in minutes. If you do not quire a quick query response, store historical data on cold data nodes to reduce costs.
+After enabling cold data node, configure Node Specifications, Nodes, Node Storage Type, and Node Storage Capacity. The value of Nodes ranges from 1 to 32. Select Node Storage Type and Node Storage Capacity as required.
+After the cold data node is enabled, CSS automatically adds cold and hot tags to related nodes.
+ |
+
+
+
+
+ - Set the enterprise project.
When creating a CSS cluster, you can bind an enterprise project to the cluster if you have enabled the enterprise project function. You can select an enterprise project created by the current user from the drop-down list on the right or click View Project Management to go to the Enterprise Project Management console and create a new project or view existing projects.
+ - Click Next: Configure Network. Configure the cluster network.
+
Table 5 Parameter descriptionParameter
+ |
+Description
+ |
+
+
+VPC
+ |
+A VPC is a secure, isolated, and logical network environment.
+Select the target VPC. Click View VPC to enter the VPC management console and view the created VPC names and IDs. If no VPCs are available, create one.
+ NOTE: The VPC must contain CIDRs. Otherwise, cluster creation will fail. By default, a VPC will contain CIDRs.
+
+ |
+
+Subnet
+ |
+A subnet provides dedicated network resources that are isolated from other networks, improving network security.
+Select the target subnet. You can access the VPC management console to view the names and IDs of the existing subnets in the VPC.
+ |
+
+Security Group
+ |
+A security group implements access control for ECSs that have the same security protection requirements in a VPC. To view more details about the security group, click View Security Group.
+ NOTE: Ensure that Port/Range is All or a port range includes port 9200 for the selected security group.
+
+ |
+
+Security Mode
+ |
+After the security mode is enabled, communication will be encrypted and authentication required for the cluster.
+- The default administrator account is admin.
- Set and confirm the Administrator Password. This password will be required when you access this cluster.
+ |
+
+HTTPS Access
+ |
+HTTPS access can be enabled only after the security mode of the cluster is enabled. After HTTPS access is enabled, communication is encrypted when you access the cluster.
+ NOTE: Security clusters use HTTPS for communication, which is much slower than non-security clusters that use HTTP for communication. If you want fast read performance and the permission provided by the security mode to isolate resources (such as indexes, documents, and fields), you can disable the HTTPS Access function. After HTTPS Access is disabled, HTTP protocol is used for cluster communication. In this case, data security cannot be ensured and public IP address cannot be used.
+
+ |
+
+Public IP Address
+ |
+If HTTPS Access is enabled, you can configure Public Network Access and obtain an IP address for public network access. This IP address can be used to access this security cluster through the public network. For details, see Accessing a Cluster from a Public Network.
+ |
+
+
+
+
+ - Click Next: Configure Advanced Settings. Configure the automatic snapshot creation and other functions.
- Configure Cluster Snapshot. Set basic configuration and snapshot configuration.
The cluster snapshot function is enabled by default. You can also disable this function as required. To store automatic snapshots in OBS, an agency will be created to access OBS. Additional cost will be incurred if snapshots are stored in standard storage.
+
+Table 6 Cluster snapshot parameterParameter
+ |
+Description
+ |
+
+
+OBS bucket
+ |
+Select an OBS bucket for storing snapshots from the drop-down list box. You can also click Create Bucket on the right to create an OBS bucket. For details, see Creating a Bucket.
+The created or existing OBS bucket must meet the following requirements:
+- Storage Class is Standard or Warm.
- Region must be the same as that of the created cluster.
+ |
+
+Backup Path
+ |
+Storage path of the snapshot in the OBS bucket.
+The backup path configuration rules are as follows: - The backup path cannot contain the following characters: \:*?"<>|
- The backup path cannot start with a slash (/).
- The backup path cannot start or end with a period (.).
- The backup path cannot contain more than 1,023 characters.
+
+ |
+
+IAM Agency
+ |
+IAM agency authorized by the current account for CSS to access or maintain data stored in OBS You can also click Create IAM Agency on the right to create an IAM agency. For details, see Creating an Agency.
+The created or existing IAM agency must meet the following requirements:
+- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- The agency must have the Tenant Administrator permission for the OBS(S3) project in OBS(S3).
+ |
+
+Snapshot Encryption
+ |
+Whether to enable the snapshot encryption function. Enabling the snapshot encryption function ensures the security of your snapshot data.
+After the snapshot encryption function is enabled, select a key from the Key Name drop-down list. If no key is available, click Create/View Key to go to the KMS management console and create or modify a key. For details, see Creating a CMK.
+- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
- If a snapshot has been stored in the OBS bucket, you cannot modify the parameters used for encrypting the snapshot.
- If the key used for encryption is in the Pending deletion or disable status, you cannot perform backup and restoration operations on the cluster. Specifically, you cannot create new snapshots for the cluster, or use existing snapshots to restore clusters. In this case, switch to the KMS management console and change the status of the target key to enable so that backup and restore operations are allowed on the cluster.
- If you delete the key used for encryption, you cannot perform backup and restore operations on the cluster. In addition, you cannot restore the deleted key. Therefore, exercise caution when deleting a key. If the key is deleted or is in the Pending deletion or disable state, automatic snapshot creation is allowed based on the configured snapshot policy. However, all automatic snapshot creation tasks will fail, and the failed tasks are displayed in the failed task list in the Failed Tasks dialog box. In such scenario, you are advised to disable the automatic snapshot creation function.
+ |
+
+
+
+
+
+Table 7 Automatic snapshot creation parameterParameter
+ |
+Description
+ |
+
+
+Snapshot Name Prefix
+ |
+The snapshot name prefix contains 1 to 32 characters and must start with a lowercase letter. Only lowercase letters, digits, hyphens (-), and underscores (_) are allowed. A snapshot name consists of a snapshot name prefix and a timestamp, for example, snapshot-1566921603720.
+ |
+
+Time Zone
+ |
+Time zone for the backup time, which cannot be changed. Specify Backup Started Time based on the time zone.
+ |
+
+Backup Start Time
+ |
+The time when the backup starts automatically every day. You can specify this parameter only in full hours, for example, 00:00 or 01:00. The value ranges from 00:00 to 23:00. Select a time from the drop-down list.
+ |
+
+Retention Period (days)
+ |
+The number of days that snapshots are retained in the OBS bucket. The value ranges from 1 to 90. You can specify this parameter as required. The system automatically deletes expired snapshots every hour at half past the hour.
+ |
+
+
+
+
+ - Configure advanced settings for the cluster.
- Default: The VPC Endpoint Service, Kibana Public Access, and Tag functions are disabled by default. You can manually enable these functions after the cluster is created.
- Custom: You can enable the VPC Endpoint Service, Kibana Public Access, and Tag functions as required.
+
+Table 8 Parameters for advanced settingsParameter
+ |
+Description
+ |
+
+
+Kibana Public Access
+ |
+You can configure this parameter only when security mode is enabled for a cluster. After enabling this function, you can obtain a public IP address for accessing Kibana. For details, see Accessing a Cluster from a Kibana Public Network.
+ |
+
+Tag
+ |
+Adding tags to clusters can help you identify and manage your cluster resources. You can customize tags or use tags predefined by Tag Management Service (TMS). For details, see Managing Tags.
+If your organization has enabled tag policies for CSS, you must comply with the tag policy rules when creating clusters, otherwise, clusters may fail to be created. Contact the organization administrator to learn more about tag policies.
+ |
+
+
+
+
+
+ - Click Next: Confirm. Check the configuration and click Next to create a cluster.
- Click Back to Cluster List to switch to the Clusters page. The cluster you created is listed on the displayed page and its status is Creating. If the cluster is successfully created, its status will change to Available.
If the cluster creation fails, create the cluster again.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001584708761.html b/docs/css/umn/en-us_topic_0000001584708761.html
new file mode 100644
index 00000000..92d6abac
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001584708761.html
@@ -0,0 +1,44 @@
+
+
+Performance Data
+- Test environment
- Cluster: 3 Cloud M6 ECSs (8 vCPUs | 64 GB memory)
- Data: open-source web server access logs and internal service dataset (dns_logs)
- Configuration: 120 shards, no replicas, and all the enhanced features enabled
+ - Test result
+
Type
+ |
+Performance (Before)
+ |
+Performance (After)
+ |
+Improved By
+ |
+
+
+Open-source dataset
+ |
+85 Mbit/s
+ |
+131 Mbit/s
+ |
+54%
+ |
+
+Service dataset
+ |
+124 Mbit/s
+ |
+218 Mbit/s
+ |
+76%
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001584828717.html b/docs/css/umn/en-us_topic_0000001584828717.html
new file mode 100644
index 00000000..7037c7a8
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001584828717.html
@@ -0,0 +1,27 @@
+
+
+Text Index Acceleration
+- You can configure index.native_speed_up to enable or disable text index acceleration. This function optimizes the index process and memory usage to accelerate index building for text fields (text and keyword).
- You can configure index.native_analyzer to enable or disable word segmentation acceleration. For texts that require common word segmentation, you can use the analyzer to accelerate word segmentation.
+ Procedure- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools.
- On the Dev Tools page, run the following command:
PUT my_index
+{
+ "settings": {
+ "index.native_speed_up": true,
+ "index.native_analyzer": true
+ },
+ "mappings": {
+ "properties": {
+ "my_field": {
+ "type": "text"
+ }
+ }
+ }
+}
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001584988497.html b/docs/css/umn/en-us_topic_0000001584988497.html
new file mode 100644
index 00000000..0daacb8a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001584988497.html
@@ -0,0 +1,20 @@
+
+
+Optimization of Other Parameters
+After the import performance is enhanced, the number of index merge tasks increases accordingly. You can adjust the following configuration to reduce the impact of merge task overhead on the import performance:
+ You can increase the value of index.merge.scheduler.max_thread_count to increase the number of shard merge threads and reduce the traffic limit on data import. The default value is 4 and you are advised to set it to 8.
+ Procedure- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster, and click Access Kibana in the Operation column.
- In the navigation tree on the left, choose Dev Tools.
- On the Dev Tools page, run the following command:
PUT my_index
+{
+ "settings": {
+ "index.merge.scheduler.max_thread_count": 8
+ }
+}
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001585148465.html b/docs/css/umn/en-us_topic_0000001585148465.html
new file mode 100644
index 00000000..760e6df3
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001585148465.html
@@ -0,0 +1,21 @@
+
+
+Instructions
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001587956758.html b/docs/css/umn/en-us_topic_0000001587956758.html
new file mode 100644
index 00000000..590ea6c0
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001587956758.html
@@ -0,0 +1,19 @@
+
+
+OpenSearch Dashboards
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001590323656.html b/docs/css/umn/en-us_topic_0000001590323656.html
new file mode 100644
index 00000000..01aedd9d
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001590323656.html
@@ -0,0 +1,37 @@
+
+
+Accessing a Cluster Using a VPC Endpoint
+If the VPC endpoint service is enabled, you can use a private domain name or node IP address generated by the endpoint to access the cluster. When the VPC endpoint service is enabled, a VPC endpoint will be created by default. You can select Private Domain Name Creation as required.
+ VPC Endpoint uses a shared load balancer for intranet access. If your workloads require quick access, you are advised to connect a dedicated load balancer to the cluster. For details, see Connecting to a Dedicated Load Balancer.
+ The public IP address access and VPC endpoint service share a load balancer. If you have configured a public access whitelist, public and private IP addresses that access the cluster through VPCEP are restricted because the public IP address access shares the load balancer with the VPC endpoint service. In this case, you need to add IP address 198.19.128.0/17 to the public access whitelist to allow traffic through VPCEP.
+
+ Enabling the VPC Endpoint Service- Log in to the CSS management console.
- Click Create Cluster in the upper right corner.
- On the Create Cluster page, set Advanced Settings to Custom. Enable the VPC endpoint service.
- Private Domain Name Creation: If you enable this function, the system automatically creates a private domain name for you, which you can use to access the cluster.
- VPC Endpoint Service Whitelist: You can add an authorized account ID to the VPC endpoint service whitelist. Then you can access the cluster using the private domain name or the node IP address.
- You can click Add to add multiple accounts.
- Click Delete in the Operation column to delete the accounts that are not allowed to access the cluster.
+ - If the authorized account ID is set to *, all users are allowed to access the cluster.
- You can view authorized account IDs on the My Credentials page.
+
+
+
+ Managing VPC Endpoint ServiceYou can enable the VPC endpoint service while creating a cluster, and also enable it by performing the following steps after cluster creation.
+ - Log in to the CSS management console.
- In the navigation pane, choose Clusters > OpenSearch.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster.
- Click the VPC Endpoint Service tab, and turn on the button next to VPC Endpoint Service.
In the displayed dialog box, you can determine whether to enable the private domain name. Click Yes to enable the VPC endpoint service.
+
+ - (Optional) Click Modify next to VPC Endpoint Service Whitelist to update the existing whitelist.
- Manage VPC endpoints.
The VPC Endpoint Service page displays all VPC endpoints connected to the current VPC endpoint service.
+Click Accept or Reject in the Operation column to change the node status. If you reject the connection with a VPC endpoint, you cannot access the cluster through the private domain name generated by that VPC endpoint.
+
+
+ Accessing the Cluster Using the Private Domain Name or Node IP Address- Obtain the private domain name or node IP address.
Log in to the CSS console, click the target cluster name and go to the Cluster Information page. Click the VPC Endpoint Service tab and view the private domain name.
+
+ - Run the cURL command to execute the API or call the API by using a program before accessing the cluster. For details about Elasticsearch operations and APIs, see the Elasticsearch Reference.
The ECS must meet the following requirements:
+- Sufficient disk space is allocated for the ECS.
- The ECS and the cluster must be in the same VPC. After enabling the VPC endpoint service, you can access the cluster from the ECS even when the cluster is not in the same VPC as the ECS.
- The security group of the ECS must be the same as that of the cluster.
If this requirement is not met, modify the ECS security group or configure the inbound and outbound rules of the ECS security group to allow the ECS security group to be accessed by all security groups of the cluster. For details, see Configuring Security Group Rules.
+ - Configure security group rule settings of the target CSS cluster. Set TCP protocol and port 9200 or a port range including port 9200 for both the outbound and inbound directions.
+- If the cluster you access does not have the security mode enabled, run the following command:
+
- If the cluster you access has the security mode enabled, access the cluster using HTTPS and add the username, password, and -u to the cURL command.
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001590323664.html b/docs/css/umn/en-us_topic_0000001590323664.html
new file mode 100644
index 00000000..2be56b56
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001590323664.html
@@ -0,0 +1,54 @@
+
+
+Manually Creating a Snapshot
+You can manually create a snapshot at any time to back up all data or data of specified indexes.
+ PrerequisitesTo use the function of creating or restoring snapshots, the account or IAM user logging in to the CSS management console must have both of the following permissions:
+ - Tenant Administrator for project OBS in region Global service
- CSS Administrator in the current region
+
+ Precautions- When creating a backup for the first time, you are advised to back up data of all indexes.
- Cluster snapshots will increase the CPU usage and disk I/O. You are advised to take cluster snapshots during off-peak hours.
- Before creating a snapshot, you need to perform basic configurations, including configuring the OBS bucket for storing snapshots and IAM agency used for security authentication.
- If there are available snapshots in the snapshot list when you configure the OBS bucket for storing cluster snapshots, you cannot change the bucket for snapshots that are created later. Exercise caution when you configure the OBS bucket.
- If snapshots have been stored in the OBS bucket, the OBS bucket cannot be changed. You can disable the snapshot function, then enable the snapshot function, and specify a new OBS bucket. After you disable the snapshot function, you cannot use previously created snapshots to restore the cluster.
- If a cluster is in the Unavailable status, you can use the cluster snapshot function only to restore clusters and view existing snapshot information.
- During backup and restoration of a cluster, you can perform only certain operations, including scaling out, accessing Kibana, viewing metric, and deleting other snapshots of clusters. However, you cannot perform the following operations: restarting or deleting the cluster, deleting a snapshot that is in the Creating or Restoring status, and creating or restoring another snapshot. If a snapshot is being created or restored for a cluster, any automatic snapshot creation task initiated for the cluster will be canceled.
- The first snapshot of a cluster is a full snapshot, and subsequent snapshots are incremental snapshots. CSS snapshot files depend on each other.
+
+ Manually Creating a Snapshot- In the CSS navigation pane on the left, click Clusters.
- On the Clusters page that is displayed, click the name of the target cluster. In the navigation pane on the left, choose Cluster Snapshots.
- On the displayed Cluster Snapshots page, click the icon to the right of Cluster Snapshot to enable the cluster snapshot function.
- Enable the cluster snapshot function. OBS buckets and IAM agencies are automatically created to store snapshots. The automatically created OBS bucket and IAM agency are displayed on the page. You can also click
on the right of Basic Configuration to edit the configuration. To ensure the security of snapshot data, you can select a key to encrypt the snapshot.
+Table 1 Cluster snapshot parameterParameter
+ |
+Description
+ |
+
+
+OBS bucket
+ |
+Select an OBS bucket for storing snapshots from the drop-down list box. You can also click Create Bucket on the right to create an OBS bucket. For details, see Creating a Bucket.
+The created or existing OBS bucket must meet the following requirements:
+- Storage Class is Standard or Warm.
+ |
+
+IAM Agency
+ |
+IAM agency authorized by the current account for CSS to access or maintain data stored in OBS You can also click Create IAM Agency on the right to create an IAM agency. For details, see Creating an Agency.
+The created or existing IAM agency must meet the following requirements:
+- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- The agency must have the Tenant Administrator permission for the OBS(S3) project in OBS(S3).
+ |
+
+Snapshot Encryption
+ |
+Whether to enable the snapshot encryption function. Enabling the snapshot encryption function ensures the security of your snapshot data.
+After the snapshot encryption function is enabled, select a key from the Key Name drop-down list. If no key is available, click Create/View Key to go to the KMS management console and create or modify a key. For details, see Creating a CMK.
+- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
- If a snapshot has been stored in the OBS bucket, you cannot modify the parameters used for encrypting the snapshot.
- If the key used for encryption is in the Pending deletion or disable status, you cannot perform backup and restoration operations on the cluster. Specifically, you cannot create new snapshots for the cluster, or use existing snapshots to restore clusters. In this case, switch to the KMS management console and change the status of the target key to enable so that backup and restore operations are allowed on the cluster.
- If you delete the key used for encryption, you cannot perform backup and restore operations on the cluster. In addition, you cannot restore the deleted key. Therefore, exercise caution when deleting a key. If the key is deleted or is in the Pending deletion or disable state, automatic snapshot creation is allowed based on the configured snapshot policy. However, all automatic snapshot creation tasks will fail, and the failed tasks are displayed in the failed task list in the Failed Tasks dialog box. In such scenario, you are advised to disable the automatic snapshot creation function.
+ |
+
+
+
+
+Figure 1 Edit Basic configuration
+ - After basic configurations are completed, click Create.
+
Figure 2 Creating a snapshot
+ - Click OK.
After the snapshot is created, it will be displayed in the snapshot list. The status Available indicates that the snapshot is created successfully. along with manually created snapshots. You can distinguish them by the Snapshot Type setting. In the upper right corner of the snapshot list, enter the keyword of the snapshot name or snapshot ID to search for the desired snapshots.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001590332948.html b/docs/css/umn/en-us_topic_0000001590332948.html
new file mode 100644
index 00000000..bff08094
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001590332948.html
@@ -0,0 +1,85 @@
+
+
+Scaling Out a Cluster
+If the workloads on the data plane of a cluster change, you can scale out the cluster by increasing the number or capacity of its nodes. Services are not interrupted during cluster scale-out.
+ Prerequisites- The target cluster is available and has no tasks in progress.
- The target cluster has sufficient quotas available.
+
+ Constraints- The Node Specifications cannot be modified during scale-out. You can modify Node Specifications by referring to Changing Specifications.
- If you change the number and storage capacity of a specified type of node, nodes in other types will not be changed.
- The quota of nodes in different types varies. For details, see Table 1.
+
Table 1 Number of nodes in different typesNode Type
+ |
+Number
+ |
+
+
+ess
+ |
+ess: 1-32
+ |
+
+ess, ess-master
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ |
+
+ess, ess-client
+ |
+ess: 1-32
+ess-client: 1-32
+ |
+
+ess, ess-cold
+ |
+ess: 1-32
+ess-cold: 1-32
+ |
+
+ess, ess-master, ess-client
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-client: 1-32
+ |
+
+ess, ess-master, ess-cold
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-cold: 1-32
+ |
+
+ess, ess-client, ess-cold
+ |
+ess: 1-32
+ess-client: 1-32
+ess-cold: 1-32
+ |
+
+ess, ess-master, ess-client, ess-cold
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-client: 1-32
+ess-cold: 1-32
+ |
+
+Details about the four node types: - ess: the default node type that is mandatory for cluster creation. The other three node types are optional.
- ess-master: master node
- ess-client: client node
- ess-cold: cold data node
+
+ |
+
+
+
+
+
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, choose the Scale Cluster tab and click Scale out to set parameters.
- Action: Select Scale out.
- Resource: The changed amount of resources.
- Nodes: The number of nodes and node storage capacity of the default data node.
- Nodes: For details, see Table 1.
- The value range of Node Storage Type depends on the Node Specifications. The value must be a multiple of 20.
+
+ - Click Next.
- Confirm the information and click Submit.
- Click Back to Cluster List to switch to the Clusters page. The Task Status is Scaling out. When Cluster Status changes to Available, the cluster has been successfully scaled out.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001590603388.html b/docs/css/umn/en-us_topic_0000001590603388.html
new file mode 100644
index 00000000..2a56c1e1
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001590603388.html
@@ -0,0 +1,382 @@
+
+
+Sample Code for Two-Way Authentication During the Access to a Cluster
+This section provides the sample code for two-way authentication during the access to a cluster from a Java client.
+ ESSecuredClientWithCerDemo Code 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103 | import org.apache.commons.io.IOUtils;
+import org.apache.http.auth.AuthScope;
+import org.apache.http.auth.UsernamePasswordCredentials;
+import org.apache.http.client.CredentialsProvider;
+import org.apache.http.impl.client.BasicCredentialsProvider;
+import org.apache.http.HttpHost;
+import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.RequestOptions;
+import org.elasticsearch.client.RestClient;
+import org.elasticsearch.client.RestClientBuilder;
+import org.elasticsearch.client.RestHighLevelClient;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.security.KeyStore;
+import java.security.SecureRandom;
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLSession;
+import javax.net.ssl.TrustManagerFactory;
+public class ESSecuredClientWithCerDemo {
+ private static final String KEY_STORE_PWD = "";
+ private static final String TRUST_KEY_STORE_PWD = "";
+ private static final String CA_JKS_PATH = "ca.jks";
+ private static final String CLIENT_JKS_PATH = "client.jks";
+ private static final String ELB_ADDRESS = "127.0.0.1";
+ private static final int ELB_PORT = 9200;
+ private static final String CSS_USERNAME = "user";
+ private static final String CSS_PWD = "";
+ public static void main(String[] args) {
+ // Create a client.
+ RestHighLevelClient client = initESClient(ELB_ADDRESS, CSS_USERNAME, CSS_PWD);
+ try {
+ // Search match_all, which is equivalent to {\"query\": {\"match_all\": {}}}.
+ SearchRequest searchRequest = new SearchRequest();
+ SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
+ searchSourceBuilder.query(QueryBuilders.matchAllQuery());
+ searchRequest.source(searchSourceBuilder);
+ // query
+ SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT);
+ System.out.println("query result: " + searchResponse.toString());
+ SearchHits hits = searchResponse.getHits();
+ for (SearchHit hit : hits) {
+ System.out.println(hit.getSourceAsString());
+ }
+ System.out.println("query success");
+ Thread.sleep(2000L);
+ } catch (InterruptedException | IOException e) {
+ e.printStackTrace();
+ } finally {
+ IOUtils.closeQuietly(client);
+ }
+ }
+ private static RestHighLevelClient initESClient(String clusterAddress, String userName, String password) {
+ final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
+ credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(userName, password));
+ SSLContext ctx = null;
+ try {
+ KeyStore ks = getKeyStore(CLIENT_JKS_PATH, KEY_STORE_PWD, "JKS");
+ KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509");
+ kmf.init(ks, KEY_STORE_PWD.toCharArray());
+ KeyStore tks = getKeyStore(CA_JKS_PATH, TRUST_KEY_STORE_PWD, "JKS");
+ TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509");
+ tmf.init(tks);
+ ctx = SSLContext.getInstance("SSL", "SunJSSE");
+ ctx.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom());
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ SSLIOSessionStrategy sessionStrategy = new SSLIOSessionStrategy(ctx, new HostnameVerifier() {
+ @Override
+ public boolean verify(String arg0, SSLSession arg1) {
+ return true;
+ }
+ });
+ SecuredHttpClientConfigCallback httpClientConfigCallback = new SecuredHttpClientConfigCallback(sessionStrategy,
+ credentialsProvider);
+ RestClientBuilder builder = RestClient.builder(new HttpHost(clusterAddress, ELB_PORT, "https"))
+ .setHttpClientConfigCallback(httpClientConfigCallback);
+ RestHighLevelClient client = new RestHighLevelClient(builder);
+ return client;
+ }
+ private static KeyStore getKeyStore(String path, String pwd, String type) {
+ KeyStore keyStore = null;
+ FileInputStream is = null;
+ try {
+ is = new FileInputStream(path);
+ keyStore = KeyStore.getInstance(type);
+ keyStore.load(is, pwd.toCharArray());
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ IOUtils.closeQuietly(is);
+ }
+ return keyStore;
+ }
+}
+ |
+
+
+
+ SecuredHttpClientConfigCallback Code 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59 | import org.apache.http.client.CredentialsProvider;
+import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
+import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
+import org.elasticsearch.client.RestClientBuilder;
+import org.elasticsearch.common.Nullable;
+import java.util.Objects;
+class SecuredHttpClientConfigCallback implements RestClientBuilder.HttpClientConfigCallback {
+ @Nullable
+ private final CredentialsProvider credentialsProvider;
+ /**
+ * The {@link SSLIOSessionStrategy} for all requests to enable SSL / TLS encryption.
+ */
+ private final SSLIOSessionStrategy sslStrategy;
+ /**
+ * Create a new {@link SecuredHttpClientConfigCallback}.
+ *
+ * @param credentialsProvider The credential provider, if a username/password have been supplied
+ * @param sslStrategy The SSL strategy, if SSL / TLS have been supplied
+ * @throws NullPointerException if {@code sslStrategy} is {@code null}
+ */
+ SecuredHttpClientConfigCallback(final SSLIOSessionStrategy sslStrategy,
+ @Nullable final CredentialsProvider credentialsProvider) {
+ this.sslStrategy = Objects.requireNonNull(sslStrategy);
+ this.credentialsProvider = credentialsProvider;
+ }
+ /**
+ * Get the {@link CredentialsProvider} that will be added to the HTTP client.
+ *
+ * @return Can be {@code null}.
+ */
+ @Nullable
+ CredentialsProvider getCredentialsProvider() {
+ return credentialsProvider;
+ }
+ /**
+ * Get the {@link SSLIOSessionStrategy} that will be added to the HTTP client.
+ *
+ * @return Never {@code null}.
+ */
+ SSLIOSessionStrategy getSSLStrategy() {
+ return sslStrategy;
+ }
+ /**
+ * Sets the {@linkplain HttpAsyncClientBuilder#setDefaultCredentialsProvider(CredentialsProvider) credential provider},
+ *
+ * @param httpClientBuilder The client to configure.
+ * @return Always {@code httpClientBuilder}.
+ */
+ @Override
+ public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder httpClientBuilder) {
+ // enable SSL / TLS
+ httpClientBuilder.setSSLStrategy(sslStrategy);
+ // enable user authentication
+ if (credentialsProvider != null) {
+ httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider);
+ }
+ return httpClientBuilder;
+ }
+}
+ |
+
+
+
+ pom.xml Code<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>1</groupId>
+ <artifactId>ESClient</artifactId>
+ <version>1.0-SNAPSHOT</version>
+ <name>ESClient</name>
+
+ <properties>
+ <maven.compiler.source>8</maven.compiler.source>
+ <maven.compiler.target>8</maven.compiler.target>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <elasticsearch.version>7.10.2</elasticsearch.version>
+ </properties>
+ <dependencies>
+ <dependency>
+ <groupId>org.elasticsearch.client</groupId>
+ <artifactId>transport</artifactId>
+ <version>${elasticsearch.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.elasticsearch</groupId>
+ <artifactId>elasticsearch</artifactId>
+ <version>${elasticsearch.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.elasticsearch.client</groupId>
+ <artifactId>elasticsearch-rest-high-level-client</artifactId>
+ <version>${elasticsearch.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>commons-io</groupId>
+ <artifactId>commons-io</artifactId>
+ <version>2.11.0</version>
+ </dependency>
+ </dependencies>
+</project>
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001590612676.html b/docs/css/umn/en-us_topic_0000001590612676.html
new file mode 100644
index 00000000..79def12c
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001590612676.html
@@ -0,0 +1,18 @@
+
+
+Removing Specified Nodes
+If a cluster can process existing data without fully using its nodes, you can remove one or more specified nodes from the cluster to reduce costs. Services will not be interrupted during the removal of specified nodes.
+ PrerequisitesThe target cluster is available and has no tasks in progress.
+
+ Constraints- Ensure that the disk usage after scale-in is less than 80% and each AZ of each node type has at least one node.
- In a cross-AZ cluster, the difference between the numbers of the same type nodes in different AZs cannot exceed 1.
- For a cluster without master nodes, the number of removed data nodes and cold data nodes in a scale-in must be fewer than half of the original number of data nodes and cold data nodes, and the number of remaining data nodes and cold data nodes after a scale-in must be greater than the maximum number of index replicas.
- For a cluster with master nodes, the number of removed master nodes in a scale-in must be fewer than half of the original master node number. After scale-in, there has to be an odd number of master nodes, and there has to be at least three of them.
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, click the Scale In tab.
- On the Scale In tab page, set the following parameters:
- Whether to perform data migration: If this option is selected, data migration is performed. If the target node contains disabled indexes or indexes that have no replicas, this option must be selected.
- In the data node table, select the node to be scaled in.
+ - Click Next.
- Confirm the information and click Submit.
- Click Back to Cluster List to switch to the Clusters page. The Task Status is Scaling in. When Cluster Status changes to Available, the cluster has been successfully scaled in.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001590963076.html b/docs/css/umn/en-us_topic_0000001590963076.html
new file mode 100644
index 00000000..c6efa7ae
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001590963076.html
@@ -0,0 +1,57 @@
+
+
+Accessing a Cluster from a Public Network
+You can access a security cluster that has the HTTPS access enabled through the public IP address provided by the system.
+ By default, CSS uses a shared load balancer for public network access. You can use a dedicated load balancer to improve performance. For details about its configuration, see Connecting to a Dedicated Load Balancer.
+ If public network access is enabled for CSS, then EIP and bandwidth resources will be used and billed.
+
+ Configuring Public Network Access- Log in to the CSS management console.
- On the Create Cluster page, enable Security Mode. Set the administrator password and enable HTTPS access.
- Select Automatically assign for Public IP Address and set related parameters.
+
Table 1 Public network access parametersParameter
+ |
+Description
+ |
+
+
+Bandwidth
+ |
+Bandwidth for accessing Kibana with the public IP address
+ |
+
+Access Control
+ |
+If you disable this function, all IP addresses can access the cluster through the public IP address. If you enable access control, only IP addresses in the whitelist can access the cluster through the public IP address.
+ |
+
+Whitelist
+ |
+IP address or IP address range allowed to access a cluster. Use commas (,) to separate multiple addresses. This parameter can be configured only when Access Control is enabled.
+ |
+
+
+
+
+
+
+ Managing Public Network AccessYou can configure, modify, view the public network access of, or disassociate the public IP address from a cluster.
+ - Log in to the CSS management console.
- In the navigation pane, choose Clusters > OpenSearch.
- On the Clusters page, click the name of the target cluster. On the Basic Information page that is displayed, manage the public network access configurations.
- Configure public network access
If you did not configure the public network access during cluster creation, you can configure it on the cluster details page after configuring the cluster.
+Click Associate next to Public IP Address, set the access bandwidth, and click OK.
+If the association fails, wait for several minutes and try again.
+ - Modify public network access
For a cluster for which you have configured public network access, you can click Edit next to Bandwidth to modify the bandwidth, or you can click Set next to Access Control to set the access control function and the whitelist for access.
+ - View the associated public IP address
On the basic information page of a cluster, you can view the public IP address associated with the cluster.
+ - Disassociate a public IP address from a cluster
To disassociate the public IP address, click Disassociate next to Public IP Address.
+
+
+
+ Accessing a Cluster Through the Public IP AddressAfter configuring the public IP address, you can use it to access the cluster.
+ For example, run the following cURL commands to view the index information in the cluster. In this example, the public access IP address of one node in the cluster is 10.62.179.32 and the port number is 9200.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001590963080.html b/docs/css/umn/en-us_topic_0000001590963080.html
new file mode 100644
index 00000000..22ff4db2
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001590963080.html
@@ -0,0 +1,65 @@
+
+
+Managing Automatic Snapshot Creation
+Snapshots are automatically created at a specified time each day according to the rules you create. You can enable or disable the automatic snapshot creation function and set the automatic snapshot creation policy.
+ PrerequisitesTo use the function of creating or restoring snapshots, the account or IAM user logging in to the CSS management console must have both of the following permissions:
+ - Tenant Administrator for project OBS in region Global service
- CSS Administrator in the current region
+
+ Precautions- When creating a backup for the first time, you are advised to back up data of all indexes.
- Cluster snapshots will increase the CPU usage and disk I/O. You are advised to take cluster snapshots during off-peak hours.
- Before creating a snapshot, you need to perform basic configurations, including configuring the OBS bucket for storing snapshots and IAM agency used for security authentication.
- If there are available snapshots in the snapshot list when you configure the OBS bucket for storing cluster snapshots, you cannot change the bucket for snapshots that are created later. Exercise caution when you configure the OBS bucket.
- If snapshots have been stored in the OBS bucket, the OBS bucket cannot be changed. You can disable the snapshot function, enable the snapshot function, and specify a new OBS bucket. After you disable the snapshot function, you cannot use previously created snapshots to restore the cluster.
- If a cluster is in the Unavailable status, you can use the cluster snapshot function only to restore clusters and view existing snapshot information.
- During backup and restoration of a cluster, you can perform only certain operations, including scaling out, accessing Kibana, viewing metric, and deleting other snapshots of clusters. However, you cannot perform the following operations: restarting or deleting the cluster, deleting a snapshot that is in the Creating or Restoring status, and creating or restoring another snapshot. If a snapshot is being created or restored for a cluster, any automatic snapshot creation task initiated for the cluster will be canceled.
- The first snapshot of a cluster is a full snapshot, and subsequent snapshots are incremental snapshots. CSS snapshot files depend on each other.
+
+ Managing Automatic Snapshot Creation- Log in to the CSS management console.
- In the navigation pane, choose Clusters > OpenSearch.
- On the Clusters page that is displayed, click the name of the target cluster. In the navigation pane on the left, choose Cluster Snapshots.
- On the displayed Cluster Snapshots page, click the icon to the right of Cluster Snapshot to enable the cluster snapshot function.
- Enable the cluster snapshot function. OBS buckets and IAM agencies are automatically created by CSS to store snapshots. The automatically created OBS bucket and IAM agency are displayed on the page. You can also click
on the right of Basic Configuration to edit the configuration. To ensure the security of snapshot data, you can select a key to encrypt the snapshot.
+Table 1 Cluster snapshot parameterParameter
+ |
+Description
+ |
+
+
+OBS bucket
+ |
+Select an OBS bucket for storing snapshots from the drop-down list box. You can also click Create Bucket on the right to create an OBS bucket. For details, see Creating a Bucket.
+The created or existing OBS bucket must meet the following requirements:
+- Storage Class is Standard or Warm.
- Region must be the same as that of the created cluster.
+ |
+
+Backup Path
+ |
+Storage path of the snapshot in the OBS bucket.
+The backup path configuration rules are as follows: - The backup path cannot contain the following characters: \:*?"<>|
- The backup path cannot start with a slash (/).
- The backup path cannot start or end with a period (.).
- The backup path cannot contain more than 1,023 characters.
+
+ |
+
+IAM Agency
+ |
+IAM agency authorized by the current account for CSS to access or maintain data stored in OBS You can also click Create IAM Agency on the right to create an IAM agency. For details, see Creating an Agency.
+The created or existing IAM agency must meet the following requirements:
+- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- The agency must have the Tenant Administrator permission for the OBS(S3) project in OBS(S3).
+ |
+
+Snapshot Encryption
+ |
+Whether to enable the snapshot encryption function. Enabling the snapshot encryption function ensures the security of your snapshot data.
+After the snapshot encryption function is enabled, select a key from the Key Name drop-down list. If no key is available, click Create/View Key to go to the KMS management console and create or modify a key. For details, see Creating a CMK.
+- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
- If a snapshot has been stored in the OBS bucket, you cannot modify the parameters used for encrypting the snapshot.
- If the key used for encryption is in the Pending deletion or disable status, you cannot perform backup and restoration operations on the cluster. Specifically, you cannot create new snapshots for the cluster, or use existing snapshots to restore clusters. In this case, switch to the KMS management console and change the status of the target key to enable so that backup and restore operations are allowed on the cluster.
- If you delete the key used for encryption, you cannot perform backup and restore operations on the cluster. In addition, you cannot restore the deleted key. Therefore, exercise caution when deleting a key. If the key is deleted or is in the Pending deletion or disable state, automatic snapshot creation is allowed based on the configured snapshot policy. However, all automatic snapshot creation tasks will fail, and the failed tasks are displayed in the failed task list in the Failed Tasks dialog box. In such scenario, you are advised to disable the automatic snapshot creation function.
+ |
+
+
+
+
+Figure 1 Editing basic configurations
+ - Enable the automatic snapshot creation function. The Configure Automatic Snapshot Creation dialog box is displayed. If the automatic snapshot creation function is enabled, you can click
on the right of Automatic Snapshot Creation to modify the snapshot policy.
+Figure 2 Automatic snapshot creation
+ - Click OK to save the snapshot policy.
Snapshots that are automatically created according to the snapshot policy are displayed in the snapshot list, along with manually created snapshots. You can distinguish them by the Snapshot Type setting. In the upper right corner of the snapshot list, enter the keyword of the snapshot name or snapshot ID to search for the desired snapshots.
+Figure 3 Automatic snapshot creation
+ - (Optional) Disable the automatic snapshot creation function.
After you disable the automatic snapshot creation function, the system stops automatic creation of snapshots. If the system is creating a snapshot based on the automatic snapshot creation policy and the snapshot is not yet displayed in the snapshot list, you cannot disable the automatic snapshot creation function. In this case, if you click the button next to Automatic Snapshot Creation, a message is displayed, indicating that you cannot disable the function. You are advised to disable the function after the system completes automatic creation of the snapshot, and the created snapshot is displayed in the snapshot list.
+When disabling the automatic snapshot creation function, you can choose whether to delete the snapshots that have been automatically created by selecting Delete automated snapshots in the displayed dialog box. By default, automatically created snapshots are not deleted.
+- If you do not select Delete automated snapshots, automatically created snapshots are not deleted when you disable the automatic snapshot creation function. You can manually delete them later. For details, see Deleting a Snapshot. If you retain the automatically created snapshots and enable automatic snapshot creation again, then all snapshots whose Snapshot Type is Automated can only be automatically deleted by the system. Specifically, the system automatically deletes snapshots based on the snapshot policy configured when you enable the automatic snapshot creation function again. For example, if you set Retention Period (days) to 10, the system will automatically delete the snapshots that have been retained for more than 10 days.
- If you select Delete automated snapshots, all snapshots with Snapshot Type set to Automated in the snapshot list will be deleted when you disable the automatic snapshot creation function.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001590972372.html b/docs/css/umn/en-us_topic_0000001590972372.html
new file mode 100644
index 00000000..11a6bc1f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001590972372.html
@@ -0,0 +1,84 @@
+
+
+Scaling in a Cluster
+If a cluster can process existing data without fully using its resources, you can scale in the cluster to reduce costs. You are advised to scale in clusters during off-peak hours.
+ PrerequisitesThe target cluster is available and has no tasks in progress.
+
+ Constraints- Only the number of nodes can be modified during cluster scale-in. The node specifications and node storage capacity cannot be modified. You can modify node specifications by referring to Changing Specifications. You can modify node storage capacity by referring to Scaling Out a Cluster.
- If you change the number and storage capacity of a specified type of node, nodes in other types will not be changed.
- Ensure that the disk usage after scale-in is less than 80% and each AZ of each node type has at least one node.
- When scaling in a cluster, the data in the node to be deleted is migrated to other nodes. The timeout threshold for data migration is five hours. If data migration is not complete within 5 hours, the cluster scale-in fails. You are advised to perform scale-in for multiple times when the cluster has huge amounts of data.
- For a cluster without master nodes, the number of remaining data nodes (including cold data nodes and other types of nodes) after scale-in must be greater than half of the original node number, and greater than the maximum number of index replicas.
- For a cluster with master nodes, the number of removed master nodes in a scale-in must be fewer than half of the original master node number. After scale-in, there has to be an odd number of master nodes, and there has to be at least three of them.
- A cluster with two nodes cannot be scaled in. You can create a cluster using a single node.
- The quota of nodes in different types varies. For details, see Table 1.
+
Table 1 Number of nodes in different typesNode Type
+ |
+Number
+ |
+
+
+ess
+ |
+ess: 1-32
+ |
+
+ess, ess-master
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ |
+
+ess, ess-client
+ |
+ess: 1-32
+ess-client: 1-32
+ |
+
+ess, ess-cold
+ |
+ess: 1-32
+ess-cold: 1-32
+ |
+
+ess, ess-master, ess-client
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-client: 1-32
+ |
+
+ess, ess-master, ess-cold
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-cold: 1-32
+ |
+
+ess, ess-client, ess-cold
+ |
+ess: 1-32
+ess-client: 1-32
+ess-cold: 1-32
+ |
+
+ess, ess-master, ess-client, ess-cold
+ |
+ess: 1-200
+ess-master: an odd number ranging from 3 to 9
+ess-client: 1-32
+ess-cold: 1-32
+ |
+
+Details about the four node types: - ess: the default node type that is mandatory for cluster creation. The other three node types are optional.
- ess-master: master node
- ess-client: client node
- ess-cold: cold data node
+
+ |
+
+
+
+
+
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, choose the Scale Cluster tab and click Scale in to set parameters.
- Action: Select Scale in.
- Resources: The changed amount of resources.
- Nodes: The number of the default data nodes. For details about the value range that can be changed, see Table 1.
+ - Click Next.
- Confirm the information and click Submit.
- Click Back to Cluster List to switch to the Clusters page. The Task Status is Scaling in. When Cluster Status changes to Available, the cluster has been successfully scaled in.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001591285452.html b/docs/css/umn/en-us_topic_0000001591285452.html
new file mode 100644
index 00000000..22f71692
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001591285452.html
@@ -0,0 +1,20 @@
+
+
+(Optional) Interconnecting with a Dedicated Load Balancer
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001591285456.html b/docs/css/umn/en-us_topic_0000001591285456.html
new file mode 100644
index 00000000..3e25b34a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001591285456.html
@@ -0,0 +1,28 @@
+
+
+Restoring Data
+You can use existing snapshots to restore the backup index data to a specified cluster.
+ PrerequisitesTo use the function of creating or restoring snapshots, the account or IAM user logging in to the CSS management console must have both of the following permissions:
+ - Tenant Administrator for project OBS in region Global service
- CSS Administrator in the current region
+
+ Precautions- Cluster snapshots will increase the CPU usage and disk I/O. You are advised to take cluster snapshots during off-peak hours.
- If snapshots have been stored in the OBS bucket, the OBS bucket cannot be changed. You can disable the snapshot function, then enable the snapshot function, and specify a new OBS bucket. After you disable the snapshot function, you cannot use previously created snapshots to restore the cluster.
- If a cluster is in the Unavailable status, you can use the cluster snapshot function only to restore clusters and view existing snapshot information.
- During backup and restoration of a cluster, you can perform only certain operations, including scaling out, accessing Kibana, viewing metric, and deleting other snapshots of clusters. However, you cannot perform the following operations: restarting or deleting the cluster, deleting a snapshot that is in the Creating or Restoring status, and creating or restoring another snapshot. If a snapshot is being created or restored for a cluster, any automatic snapshot creation task initiated for the cluster will be canceled.
- Cluster data cannot be queried during snapshot restoration.
- If you restore a CSS cluster snapshot to another cluster, indexes with the same name in the destination cluster will be overwritten. If the snapshot and the destination cluster use different shards, the indexes with the same name will not be overwritten.
- The version of the destination cluster used for restoration must be the same as or higher than that of the source cluster.
+
+ Restoring DataYou can use snapshots whose Snapshot Status is Available to restore cluster data. The stored snapshot data can be restored to other clusters.
+ Restoring data will overwrite current data in clusters. Therefore, exercise caution when restoring data.
+ - In the Snapshots area, locate the row that contains the snapshot you want to restore and click Restore in the Operation column.
- On the Restore page, set restoration parameters.
Index: Enter the name of the index you want to restore. If you do not specify any index name, data of all indexes will be restored. The value can contain 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?) are not allowed. You can use the asterisk (*) to match multiple indexes. For example, index* indicates that all indexes with the prefix index in snapshots are restored.
+Rename Pattern: Enter a regular expression. Indexes that match the regular expression are restored. The default value index_(.+) indicates restoring data of all indexes. The value contains 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?,) are not allowed.
+Rename Replacement: Enter the index renaming rule. The default value restored_index_$1 indicates that restored_ is added in front of the names of all restored indexes. The value contains 0 to 1,024 characters. Uppercase letters, spaces, and certain special characters (including "\<|>/?,) are not allowed.
+ The Rename Pattern and Rename Replacement take effect only when they are configured at the same time.
+
+Cluster: Select the name of the cluster to be restored. You can select the cluster of the current version. However, you can only restore the snapshot to clusters whose status is Available. If the status of the current cluster is Unavailable, you cannot restore the snapshot to the current cluster. If the target cluster you selected has an index with the same name as the original cluster, data in the index will be overwritten after the restoration. Exercise caution when performing this operation.
+Figure 1 Restoring a snapshot
+ - Click OK. If restoration succeeds, Task Status of the snapshot in the snapshot list will change to Restoration succeeded, and the index data is generated again according to the snapshot information.
Figure 2 Successful restoration
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001591294758.html b/docs/css/umn/en-us_topic_0000001591294758.html
new file mode 100644
index 00000000..c46d5333
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001591294758.html
@@ -0,0 +1,79 @@
+
+
+Changing the Security Mode
+After a cluster is created, its security mode can be changed in the following methods:
+
+ ContextYou can create clusters in multiple security modes. For details about the differences between security modes, see Table 1.
+ Table 1 Cluster security modesSecurity Mode
+ |
+Scenario
+ |
+Advantage
+ |
+Disadvantage
+ |
+
+
+Non-Security Mode
+ |
+Intranet services and test scenarios
+ |
+Simple. Easy to access.
+ |
+Poor security. Anyone can access such clusters.
+ |
+
+Security Mode + HTTP Protocol
+ |
+User permissions can be isolated, which is applicable to scenarios sensitive to cluster performance.
+ |
+Security authentication is required for accessing such clusters, which improves cluster security. Accessing a cluster through HTTP protocol can retain the high performance of the cluster.
+ |
+Cannot be accessed from the public network.
+ |
+
+Security Mode + HTTPS Protocol
+ |
+Scenarios that require high security and public network access.
+ |
+Security authentication is required for accessing such clusters, which improves cluster security. HTTPS protocol allows public network to access such clusters.
+ |
+The performance of clusters using HTTPS is 20% lower than that of using HTTP.
+ |
+
+
+
+
+
+
+ Prerequisites- You are advised to back up data before changing the cluster security mode.
- The target cluster is available and has no tasks in progress.
+
+ Constraints- A cluster automatically restarts when its security mode is being changed. Services are interrupted during the restart. The authentication mode for calling the cluster will change after the restart, and client configurations need to be adjusted accordingly.
- If a cluster has already opened the Kibana session box, a session error message will be displayed after you change the cluster security mode. In this case, clear the cache and open Kibana again.
+
+ Switching from the Non-Security Mode to Security ModeYou can change a non-security cluster to a security cluster that uses HTTP or HTTPS. After a cluster's security mode is enabled, security authentication is required for accessing the cluster.
+ - Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- Choose the Configure Security Mode tab.
- Enable the security mode. Enter and confirm the administrator password of the cluster.
- Enable or disable HTTPS Access.
- If you enable HTTPS Access: The HTTPS protocol is used to encrypt cluster communication and you can configure public networks to access the cluster.
- If you disable HTTPS Access: The HTTP protocol is used and you cannot configure public networks to access the cluster.
+ - Click Submit. Confirm the information and the cluster list page is displayed.
The Task Status of the cluster is The security mode is changing. When the cluster status changes to Available, the security mode has been successfully changed.
+
+
+ Switching from the Security to Non-Security ModeYou can change a security cluster that uses HTTP or HTTPS to a non-security cluster. After a cluster's security mode is disabled, security authentication is no longer required for accessing the cluster.
+ - Clusters in non-security mode can be accessed without security authentication, and HTTP protocol is used to transmit data. Ensure the security of the cluster access environment and do not expose the access interface to the public network.
- During the switchover from the security mode to the non-security mode, the indexes of the original security cluster will be deleted. Back up data before disabling the security mode.
- If a security cluster has been bound to a public IP address, unbind it before changing the security mode.
- If a security cluster has enabled Kibana public network access, disable it before changing the security mode.
+
+ - Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters. On the displayed Clusters page, locate the target cluster and choose More > Modify Configuration in the Operation column.
- Choose the Configure Security Mode tab.
- Disable the security mode.
- Click Submit. Confirm the information and the cluster list page is displayed.
The Task Status of the cluster is The security mode is changing. When the cluster status changes to Available, the security mode has been successfully changed.
+
+
+ Switching the Protocol of Security ClustersYou can change the protocol of a security cluster.
+ If a security cluster has been bound to a public IP address, you need to unbind it before changing HTTPS protocol to HTTP.
+
+ - Log in to the CSS management console.
- In the navigation pane on the left, choose Clusters. On the displayed Clusters page, locate the target cluster and choose More > Modify Configuration in the Operation column.
- Choose the Configure Security Mode tab.
- Enable or disable HTTPS Access.
- If you enable HTTPS Access:
HTTPS protocol is used to encrypt cluster communication and you can configure public network access.
+ - If you disable HTTPS Access: An alarm message is displayed. Click OK to disable the function.
When the HTTP protocol is used, cluster communication is no longer encrypted and the public network access function cannot be enabled.
+
+ - Click Submit. Confirm the information and the cluster list page is displayed.
The Task Status of the cluster is The security mode is changing. When the cluster status changes to Available, the security mode has been successfully changed.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001591298678.html b/docs/css/umn/en-us_topic_0000001591298678.html
new file mode 100644
index 00000000..df846443
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001591298678.html
@@ -0,0 +1,36 @@
+
+
+Creating and Managing Index Policies
+You can manage the indexes of OpenSearch clusters. ISM is a plugin that allows you to automate periodic and administrative operations based on changes on the index age, index size, or number of documents. When using the ISM plug-in, you can define policies that automatically handle index rollovers or deletions based on your needs.
+ Creating an Index Policy- Log in to Kibana and choose or Index Management on the left. The index management page is displayed.
- Click Create policy to create an index policy.
- In the Configuration method dialog box, select JSON editor and click Continue. The page for creating an index policy is displayed.
- Enter a policy ID in the Policy ID text box and enter your policy in the Define policy text box.
Figure 1 Configuring a policy
+ - Click Create.
+
+ Attaching a Policy to an IndexYou can attach a policy to one or more indexes and add the policy ID to an index template. When you create indexes using that index template pattern, the policy will be attached to all created indexes.
+ - Method 1: OpenSearch Dashboard CLI
On the Dev Tools page of the OpenSearch Dashboards, run the following command to associate the policy ID with the index template:
+PUT _template/<template_name>
+{
+ "index_patterns": ["index_name-*"],
+ "settings": {
+ "opendistro.index_state_management.policy_id": "policy_id"
+ }
+}
+- <template_name>: Replace it with the name of a created index template.
- policy_id: Replace it with a custom policy ID.
+For details about how to create an index template, see Index Templates.
+ - Method 2: OpenSearch Dashboards Console
- On the Index Management page of the OpenSearch Dashboards, choose Indices.
Figure 2 Choosing Indices
+ - In the Indices list, select the target index to which you want to attach a policy.
- Click Apply policy in the upper right corner.
Figure 3 Adding a policy
+ - Select the policy you created from the Policy ID drop-down list.
Figure 4 Selecting an index policy
+ - Click Apply.
After you attach a policy to an index, ISM creates a job that runs every 5 minutes by default, to execute the policy, check conditions, and convert the index to different statuses.
+
+
+
+ Managing Index Policies- On the Index Management page of the OpenSearch Dashboards, choose Managed Indices.
- If you want to change the policy, click Change policy. For details, see Changing Policies.
Figure 5 Changing policies
+ - To delete a policy, select your policy, and click Remove policy.
- To retry a policy, select your policy, and click Retry policy.
+ For details, see Index State Management.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001591298682.html b/docs/css/umn/en-us_topic_0000001591298682.html
new file mode 100644
index 00000000..598a1def
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001591298682.html
@@ -0,0 +1,74 @@
+
+
+Creating and Authorizing a User on the OpenSearch Dashboards
+PrerequisitesThe security mode has been enabled for the OpenSearch cluster.
+
+ Parameters
+ Table 1 Parameters for creating and authorizing a user on KibanaParameter
+ |
+Description
+ |
+
+
+Permission
+ |
+Single permission, for example, creating an index (for example, indices:admin/create)
+ |
+
+Action group
+ |
+A group of permissions. For example, the predefined SEARCH action group grants roles permissions to use _search and _msearchAPI.
+ |
+
+Role
+ |
+A role is a combination of permissions and action groups, including operation permissions on clusters, indexes, documents, or fields.
+ |
+
+Backend role
+ |
+(Optional) Other external roles from the backend such as LDAP/Active Directory
+ |
+
+User
+ |
+A user can send operation requests to Elasticsearch clusters. The user has credentials such as username and password, and zero or multiple backend roles and custom attributes.
+ |
+
+Role mapping
+ |
+A user will be assigned a role after successful authentication. Role mapping is to map a role to a user (or a backend role). For example, the mapping from kibana_user (role) to jdoe (user) means that John Doe obtains all permissions of kibana_user after being authenticated by kibana_user. Similarly, the mapping from all_access (role) to admin (backend role) means that any user with the backend role admin (from the LDAP/Active Directory server) has all the permissions of role all_access after being authenticated. You can map each role to multiple users or backend roles.
+ |
+
+
+
+
+ You can customize the username, role name, and tenant name in the OpenSearch Dashboards.
+
+
+ Procedure- Log in to the OpenSearch Dashboards.
- Log in to the CSS management console.
- In the navigation pane, choose Clusters > OpenSearch.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- Enter the administrator username and password to log in to the OpenSearch Dashboards.
- Username: admin (default administrator account name)
- Password: Enter the administrator password you set when creating the cluster in security mode.
Figure 1 Logging in to OpenSearch
+
+
+ - Creating a user.
- On the OpenSearch Dashboards page, choose Security. The Security page is displayed.
Figure 2 Going to the Security page
+ - Choose Internal users on the left. The user creation page is displayed.
Figure 3 Creating a user
+ - Click Create internal user. The user information configuration page is displayed.
- In the Credentials area, enter the username and password.
Figure 4 Entering the username and password
+ - Click Create. After the user is created, it is displayed in the user list.
Figure 5 User information
+
+ - Create a role and grant permissions to the role.
- Select Roles from the Security drop-down list box.
- On the Roles page, click Create role. The role creation page is displayed.
- In the Name area, set the role name.
Figure 6 Setting a role name
+ - On the Cluster Permissions page, set the cluster permission. Set cluster permissions based on service requirements. If this parameter is not specified for a role, the role has no cluster-level permissions.
Figure 7 Assigning cluster-level permissions
+ - In the Index Permissions area, set the index permission.
Figure 8 Setting index permissions
+ - On the Tenant Permissions page, set role permissions.
Figure 9 Role permissions
+After the setting is complete, you can view the created role on the Roles page.
+
+ - Map a user with a role to bind them.
- Select Roles from the Security drop-down list box.
- On the Roles page, select the role to be mapped. The role mapping page is displayed.

+ - On the Mapped users tab page, click Map users and select the user to be mapped from the users drop-down list box.

+ - Click Map.
- After the configuration is complete, you can check whether the configuration takes effect in OpenSearch Dashboards.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001591456866.html b/docs/css/umn/en-us_topic_0000001591456866.html
new file mode 100644
index 00000000..19053c3a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001591456866.html
@@ -0,0 +1,104 @@
+
+
+Managing Logs
+CSS provides log backup and search functions to help you locate faults. You can back up cluster logs to OBS buckets and download required log files to analyze and locate faults.
+ Log Query- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster. The cluster information page is displayed.
- In the navigation pane on the left, choose Log Management.
- Query logs on the log management page.
Select the node, log type, and log level you want to query, and then click . The query result is displayed.
+When you search for logs, the latest 10,000 logs are matched. A maximum of 100 logs are displayed.
+
+
+ Enabling Log Backup- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster. The cluster information page is displayed.
- Click the Logs tab and toggle on the Log Management switch.
- In the Edit Log Backup Configuration dialog box, set the parameters.
In the displayed dialog box, OBS Bucket and IAM Agency are automatically created for log backup. You can change the default value by referring to Table 1.
+If the Log Management function has been enabled for the cluster, you can click on the right of Log Backup Configuration and modify the configuration in the displayed Edit Log Backup Configuration dialog box. For details, see Table 1.
+
+Table 1 Parameters for configuring log backupParameter
+ |
+Description
+ |
+Remarks
+ |
+
+
+OBS Bucket
+ |
+Select an OBS bucket from the drop-down list for storing logs. You can also click Create Bucket on the right to create an OBS bucket.
+ |
+The OBS bucket and the cluster must be in the same region.
+ NOTE: To let an IAM user access an OBS bucket, you need to grant the GetBucketStoragePolicy, GetBucketLocation, ListBucket, and ListAllMyBuckets permissions to the user.
+
+ |
+
+Backup Path
+ |
+Storage path of logs in the OBS bucket
+ |
+The backup path configuration rules are as follows: - The backup path cannot contain the following characters: \:*?"<>|
- The backup path cannot start with a slash (/).
- The backup path cannot start or end with a period (.).
- The total length of the backup path cannot exceed 1,023 characters.
+
+ |
+
+IAM Agency
+ |
+IAM agency authorized by the current account for CSS to access or maintain data stored in the OBS bucket. You can also click Create IAM Agency on the right to create an IAM agency.
+ |
+The IAM agency must meet the following requirements:
+- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- Mandatory policies: Tenant Administrator
+ |
+
+
+
+
+ - Back up logs.
- Automatically backing up logs
Click the icon on the right of Auto Backup to enable the auto backup function.
+After the automatic backup function is enabled, set the backup start time in the Configure Auto Backup dialog box. When the scheduled time arrives, the system will back up logs automatically.
+After the Automatic Snapshot Creation function is enabled, you can click on the right of the parameter to change the backup start time.
+ - Manually backing up logs
On the Log Backup tab page, click Back Up. On the displayed page, click Yes to start backup.
+If Task Status in the log backup list is Successful, the backup is successful.
+ All logs in the cluster are copied to a specified OBS path. You can view or download log files from the path of the OBS bucket.
+
+
+ - Search for logs.
On the Log Search page, select the target node, log type, and log level, and click . The search results are displayed.
+When you search for logs, the latest 10,000 logs are matched. A maximum of 100 logs are displayed.
+
+
+ Viewing LogsAfter backing up logs, you can click Backup Path to go to the OBS console and view the logs.
+ Backed up logs mainly include deprecation logs, run logs, index slow logs, and search slow logs. Table 2 lists the storage types of the OBS bucket.
+
+ Table 2 Log typesLog Name
+ |
+Description
+ |
+
+
+clustername_deprecation.log
+ |
+Deprecation log
+ |
+
+clustername_index_indexing_slowlog.log
+ |
+Search slow log
+ |
+
+clustername_index_search_slowlog.log
+ |
+Index slow log
+ |
+
+clustername.log
+ |
+Elasticsearch run log
+ |
+
+clustername_access.log
+ |
+Access log
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001591616594.html b/docs/css/umn/en-us_topic_0000001591616594.html
new file mode 100644
index 00000000..0111cbe0
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001591616594.html
@@ -0,0 +1,44 @@
+
+
+Viewing the Default Plugin List
+CSS clusters have default plug-ins. You can view the default plugin information on the console or Kibana.
+ Viewing Plugins on the Console- Log in to the CSS management console.
- In the navigation pane, choose Clusters. Click the target cluster name and go to the Cluster Information page of the cluster.
- Click the Plugins tab.
- On the Default tab page, view default plugins supported by the current version.
+
+ Viewing Plugins on the Kibana- Log in to the CSS management console.
- In the navigation pane, choose Clusters. Locate the target cluster and click Access Kibana in the Operation column to log in to OpenSearch Dashboard.
- Go to Dev Tools and run the following command to view the cluster plugin information:
GET _cat/plugins?v
+The following is an example of the response body:
+name component version
+css-3657-ess-esn-1-1 analysis-dynamic-synonym 1.3.6
+css-3657-ess-esn-1-1 analysis-icu 1.3.6
+css-3657-ess-esn-1-1 analysis-ik 1.3.6
+css-3657-ess-esn-1-1 analysis-kuromoji 1.3.6
+css-3657-ess-esn-1-1 analysis-logtxt 1.0.0
+css-3657-ess-esn-1-1 analysis-nori 1.3.6
+css-3657-ess-esn-1-1 analysis-pinyin 1.3.6
+css-3657-ess-esn-1-1 analysis-stconvert 1.3.6
+css-3657-ess-esn-1-1 hpack 2.0.0
+css-3657-ess-esn-1-1 ingest-attachment 1.3.6
+css-3657-ess-esn-1-1 obs-store-plugin 1.3.6
+css-3657-ess-esn-1-1 opensearch-alerting 1.3.6.0
+css-3657-ess-esn-1-1 opensearch-anomaly-detection 1.3.6.0
+css-3657-ess-esn-1-1 opensearch-asynchronous-search 1.3.6.0
+css-3657-ess-esn-1-1 opensearch-cross-cluster-replication 1.3.6.0
+css-3657-ess-esn-1-1 opensearch-index-management 1.3.6.0
+css-3657-ess-esn-1-1 opensearch-job-scheduler 1.3.6.0
+css-3657-ess-esn-1-1 opensearch-knn 1.3.6.0
+css-3657-ess-esn-1-1 opensearch-ml 1.3.6.0
+css-3657-ess-esn-1-1 opensearch-observability 1.3.6.0
+css-3657-ess-esn-1-1 opensearch-performance-analyzer 1.3.6.0
+css-3657-ess-esn-1-1 opensearch-reports-scheduler 1.3.6.0
+css-3657-ess-esn-1-1 opensearch-security 1.3.6.0
+css-3657-ess-esn-1-1 opensearch-sql 1.3.6.0
+css-3657-ess-esn-1-1 repository-obs 1.3.6
+name indicates the cluster node name, component indicates the plugin name, and version indicates the plugin version.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001591776270.html b/docs/css/umn/en-us_topic_0000001591776270.html
new file mode 100644
index 00000000..750954bb
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001591776270.html
@@ -0,0 +1,39 @@
+
+
+Switching Hot and Cold Data
+CSS provides you with cold data nodes. You can store data that requires query response in seconds on hot data nodes with high performance and store historical data that requires query response in minutes on cold data nodes with large capacity and low specifications.
+ - When creating a cluster, you need to configure data nodes. After cold data nodes are selected, the original data nodes become hot data nodes.
- You can enable the cold data node, master node, and client node functions at the same time.
- You can increase nodes and expand storage capacity of cold data nodes. The maximum storage capacity is determined by the node specifications. Local disks do not support storage capacity expansion.
+
+ Switching Between Hot and Cold DataIf you enable cold data nodes when creating a cluster, the cold data nodes are labeled with cold. Other data nodes become hot nodes and are labeled with hot. You can specify indexes to allocate data to cold or hot nodes.
+ You can configure a template to store indexes on the specified cold or hot node.
+ Log in to the Kibana Console page of the cluster, store the indexes starting with myindex on the cold node. In this way, you can use a template to store the myindex* date on the cold data node.
+ Run the following command to create a template: PUT _template/test
+{
+ "order": 1,
+ "index_patterns": "myindex*",
+ "settings": {
+ "refresh_interval": "30s",
+ "number_of_shards": "3",
+ "number_of_replicas": "1",
+ "routing.allocation.require.box_type": "cold"
+ }
+}
+
+ You can perform operations on the created index.
+ PUT myindex/_settings
+ {
+ "index.routing.allocation.require.box_type": "cold"
+ }
+ You can cancel the configurations of hot and cold data nodes.
+ PUT myindex/_settings
+{
+ "index.routing.allocation.require.box_type": null
+ }
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001591776274.html b/docs/css/umn/en-us_topic_0000001591776274.html
new file mode 100644
index 00000000..13769738
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001591776274.html
@@ -0,0 +1,18 @@
+
+
+Logging In to the OpenSearch Dashboards
+PrerequisitesAn OpenSearch cluster has been created.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001599872681.html b/docs/css/umn/en-us_topic_0000001599872681.html
new file mode 100644
index 00000000..4c982a3d
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001599872681.html
@@ -0,0 +1,385 @@
+
+
+Monitoring Metrics
+FunctionThis topic describes CSS metrics that can be monitored by Cloud Eye as well as their namespaces and dimensions. You can search for the monitoring metrics and alarms generated for CSS by using the Cloud Eye console or calling APIs.
+
+
+ Monitoring Metrics- Table 1 describes the monitoring metrics of CSS clusters.
- Monitored object: Cloud service nodes of CSS clusters
- Monitoring period (original metric): 1 minute
+ Accumulated value: The value is accumulated from the time when a node is started. After the node is restarted, the value is reset to zero and accumulated again.
+
+
+ Table 1 CSS metricsMetric ID
+ |
+Metric
+ |
+Description
+ |
+Value Range
+ |
+Monitored Target
+ |
+Monitoring Interval (Raw Data)
+ |
+
+
+jvm_heap_usage
+ |
+JVM Heap Usage
+ |
+JVM heap memory usage of a node.
+Unit: %
+ |
+0-100%
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+cpu_usage
+ |
+CPU Usage
+ |
+CPU usage.
+Unit: %
+ |
+0-100%
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+load_average
+ |
+Average Load
+ |
+Average number of queuing tasks per minute on a node
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+open_file_descriptors
+ |
+Open File Descriptors
+ |
+Number of opened file descriptors on a node
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+max_file_descriptors
+ |
+Max. Allowed File Descriptors
+ |
+Maximum number of allowed file descriptors
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+thread_pool_write_queue
+ |
+Tasks in Write Queue
+ |
+Number of job queues in a write thread pool
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+thread_pool_search_queue
+ |
+Tasks in Search Queue
+ |
+Number of job queues in a search thread pool
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+thread_pool_force_merge_queue
+ |
+Tasks in ForceMerge Queue
+ |
+Number of job queues in a force merge thread pool
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+thread_pool_write_rejected
+ |
+Rejected Tasks in Write Queue
+ |
+Number of rejected jobs in a write thread pool
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+thread_pool_search_rejected
+ |
+Rejected Tasks in Search Queue
+ |
+Number of rejected jobs in a search thread pool
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+thread_pool_force_merge_rejected
+ |
+Rejected Tasks in ForceMerge Queue
+ |
+Number of rejected jobs in a force merge thread pool
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+thread_pool_write_threads
+ |
+Size of Write Thread Pool
+ |
+Size of a write thread pool
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+thread_pool_search_threads
+ |
+Size of Search Thread Pool
+ |
+Size of a search thread pool
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+thread_pool_force_merge_threads
+ |
+Size of ForceMerge Thread Pool
+ |
+Size of a force merge thread pool
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+free_fs_size
+ |
+Available Size of File Systems
+ |
+Available size of file systems in a CSS cluster
+Unit: byte
+ |
+≥ 0 bytes
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+total_fs_size
+ |
+Total Size of File Systems
+ |
+Total size of file systems in a CSS cluster
+Unit: byte
+ |
+≥ 0 bytes
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+jvm_old_gc_count
+ |
+Total GCs of Old-Generation JVM
+ |
+Number of old-generation garbage collection times
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+jvm_old_gc_time
+ |
+Total GC Duration of Old-Generation JVM
+ |
+Old-generation garbage collection duration.
+Unit: ms
+ |
+≥ 0 ms
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+jvm_young_gc_count
+ |
+Total GCs of Young-Generation JVM
+ |
+Number of young-generation garbage collection times
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+jvm_young_gc_time
+ |
+GC Duration of Young-Generation JVM
+ |
+Young-generation garbage collection duration.
+Unit: ms
+ |
+≥ 0 ms
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+mem_free_in_bytes
+ |
+Available Memory
+ |
+Unused memory space of a node.
+Unit: byte
+ |
+≥ 0 bytes
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+mem_free_percent
+ |
+Available Memory Percentage
+ |
+Percentage of unused memory space on a node.
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+mem_used_in_bytes
+ |
+Used Memory
+ |
+Used memory space of a node.
+Unit: byte
+ |
+≥ 0 bytes
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+current_opened_http_count
+ |
+Currently Open HTTP Connections
+ |
+Number of HTTP connections on a node
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+total_opened_http_count
+ |
+Total Open HTTP Connections
+ |
+Total number of HTTP connections on a node
+ |
+≥ 0
+ |
+CSS cluster - cloud service node
+ |
+1 minute
+ |
+
+
+
+
+
+ Dimension
+ Table 2 Dimension descriptionKey
+ |
+Value
+ |
+
+
+cluster_id
+ |
+CSS cluster
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001633220693.html b/docs/css/umn/en-us_topic_0000001633220693.html
new file mode 100644
index 00000000..0d57b182
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001633220693.html
@@ -0,0 +1,54 @@
+
+
+Manually Creating a Snapshot
+You can manually create a snapshot at any time to back up all data or data of specified indexes.
+ PrerequisitesTo use the function of creating or restoring snapshots, the account or IAM user logging in to the CSS management console must have both of the following permissions:
+ - Tenant Administrator for project OBS in region Global service
- CSS Administrator in the current region
+
+ Precautions- When creating a backup for the first time, you are advised to back up data of all indexes.
- Cluster snapshots will increase the CPU usage and disk I/O. You are advised to take cluster snapshots during off-peak hours.
- Before creating a snapshot, you need to perform basic configurations, including configuring the OBS bucket for storing snapshots and IAM agency used for security authentication.
- If there are available snapshots in the snapshot list when you configure the OBS bucket for storing cluster snapshots for the first time, you cannot change the bucket for snapshots that are subsequently created automatically or manually. Exercise caution when you configure the OBS bucket.
- If snapshots have been stored in the OBS bucket, the OBS bucket cannot be changed. You can disable the snapshot function, enable the snapshot function, and specify a new OBS bucket. After you disable the snapshot function, you cannot use previously created snapshots to restore the cluster.
- If a cluster is in the Unavailable status, you can use the cluster snapshot function only to restore clusters and view existing snapshot information.
- During backup and restoration of a cluster, you can perform only certain operations, including scaling out, accessing Kibana, viewing metric, and deleting other snapshots of clusters. However, you cannot perform the following operations: restarting or deleting the cluster, deleting a snapshot that is in the Creating or Restoring status, and creating or restoring another snapshot. If a snapshot is being created or restored for a cluster, any automatic snapshot creation task initiated for the cluster will be canceled.
- The first snapshot of a cluster is a full snapshot, and subsequent snapshots are incremental snapshots. CSS snapshot files depend on each other.
+
+ Manually Creating a Snapshot- In the CSS navigation pane on the left, click Clusters.
- On the Clusters page that is displayed, click the name of the target cluster. In the navigation pane on the left, choose Cluster Snapshots.
- On the displayed Cluster Snapshots page, click the icon to the right of Cluster Snapshot to enable the cluster snapshot function.
- Enable the cluster snapshot function. OBS buckets and IAM agencies are automatically created to store snapshots. The automatically created OBS bucket and IAM agency are displayed on the page. You can also click
on the right of Basic Configuration to edit the configuration. To ensure the security of snapshot data, you can select a key to encrypt the snapshot.
+Table 1 Cluster snapshot parameterParameter
+ |
+Description
+ |
+
+
+OBS bucket
+ |
+Select an OBS bucket for storing snapshots from the drop-down list box. You can also click Create Bucket on the right to create an OBS bucket. For details, see Creating a Bucket.
+The created or existing OBS bucket must meet the following requirements:
+- Storage Class is Standard or Warm.
+ |
+
+IAM Agency
+ |
+IAM agency authorized by the current account for CSS to access or maintain data stored in OBS You can also click Create IAM Agency on the right to create an IAM agency. For details, see Creating an Agency.
+The created or existing IAM agency must meet the following requirements:
+- Agency Type must be Cloud service.
- Set Cloud Service to CSS.
- The agency must have the Tenant Administrator permission for the OBS(S3) project in OBS(S3).
+ |
+
+Snapshot Encryption
+ |
+Whether to enable the snapshot encryption function. Enabling the snapshot encryption function ensures the security of your snapshot data.
+After the snapshot encryption function is enabled, select a key from the Key Name drop-down list. If no key is available, click Create/View Key to go to the KMS management console and create or modify a key. For details, see Creating a CMK.
+- You cannot use default master keys whose aliases end with /default in KMS to encrypt snapshots.
- If a snapshot has been stored in the OBS bucket, you cannot modify the parameters used for encrypting the snapshot.
- If the key used for encryption is in the Pending deletion or disable status, you cannot perform backup and restoration operations on the cluster. Specifically, you cannot create new snapshots for the cluster, or use existing snapshots to restore clusters. In this case, switch to the KMS management console and change the status of the target key to enable so that backup and restore operations are allowed on the cluster.
- If you delete the key used for encryption, you cannot perform backup and restore operations on the cluster. In addition, you cannot restore the deleted key. Therefore, exercise caution when deleting a key. If the key is deleted or is in the Pending deletion or disable state, automatic snapshot creation is allowed based on the configured snapshot policy. However, all automatic snapshot creation tasks will fail, and the failed tasks are displayed in the failed task list in the Failed Tasks dialog box. In such scenario, you are advised to disable the automatic snapshot creation function.
+ |
+
+
+
+
+Figure 1 Edit Basic configuration
+ - After basic configurations are completed, click Create.
+
Figure 2 Create snapshot
+ - Click OK.
After the snapshot is created, it will be displayed in the snapshot list. The status Available indicates that the snapshot is created successfully. along with manually created snapshots. You can distinguish them by the Snapshot Type setting. In the upper right corner of the snapshot list, enter the keyword of the snapshot name or snapshot ID to search for the desired snapshots.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001633221741.html b/docs/css/umn/en-us_topic_0000001633221741.html
new file mode 100644
index 00000000..abf173df
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001633221741.html
@@ -0,0 +1,23 @@
+
+
+Index Backup and Restoration
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001633303977.html b/docs/css/umn/en-us_topic_0000001633303977.html
new file mode 100644
index 00000000..a2702838
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001633303977.html
@@ -0,0 +1,12 @@
+
+
+Backup and Restoration Overview
+You can back up index data in clusters. If data loss occurs or you want to retrieve data of a specified duration, you can restore the index data. Index backup is implemented by creating cluster snapshots. When creating a backup for the first time, you are advised to back up data of all indexes.
+ - Managing Automatic Snapshot Creation: Snapshots are automatically created at a specified time each day according to the rules you create. You can enable or disable the automatic snapshot creation function and set the automatic snapshot creation policy.
- Manually Creating a Snapshot: You can manually create a snapshot at any time to back up all data or data of specified indexes.
- Restoring Data: You can use existing snapshots to restore the backup index data to a specified cluster.
- Deleting a Snapshot: Delete snapshots you do not require and release resources.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001633949601.html b/docs/css/umn/en-us_topic_0000001633949601.html
new file mode 100644
index 00000000..a41a203a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001633949601.html
@@ -0,0 +1,28 @@
+
+
+OpenSearch
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001637436445.html b/docs/css/umn/en-us_topic_0000001637436445.html
new file mode 100644
index 00000000..22aaf0ec
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001637436445.html
@@ -0,0 +1,35 @@
+
+
+
+ Scaling In/Out a Cluster
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640645481.html b/docs/css/umn/en-us_topic_0000001640645481.html
new file mode 100644
index 00000000..402199e2
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640645481.html
@@ -0,0 +1,226 @@
+
+
+Scenario Description
+CSS integrates shared load balancers and allows you to bind public network access and enable the VPC Endpoint service. Dedicated load balancers provide more functions and higher performance than shared load balancers. This section describes how to connect a cluster to a dedicated load balancer.
+ Advantages of connecting a cluster to a dedicated load balancer: - A non-security cluster can also use capabilities of the Elastic Load Balance (ELB) service.
- You can use customized certificates for HTTPS bidirectional authentication.
- Seven-layer traffic monitoring and alarm configuration are supported, allowing you to view the cluster status at any time.
+
+ There are eight service forms for clusters in different security modes to connect to dedicated load balancers. Table 1 describes the ELB capabilities for the eight service forms. Table 2 describes the configurations for the eight service forms.
+ You are not advised connecting a load balancer that has been bound to a public IP address to a non-security cluster. Access from the public network using such a load balancer may bring security risks because non-security clusters can be accessed over HTTP without security authentication.
+
+
+ Table 1 ELB capabilities for different clustersSecurity Mode
+ |
+Service Form Provided by ELB for External Systems
+ |
+ELB Load Balancing
+ |
+ELB Traffic Monitoring
+ |
+ELB Two-way Authentication
+ |
+
+
+Non-security
+ |
+No authentication
+ |
+Supported
+ |
+Supported
+ |
+Not supported
+ |
+
+One-way authentication
+Two-way authentication
+ |
+Supported
+ |
+Supported
+ |
+Supported
+ |
+
+Security mode + HTTP
+ |
+Password authentication
+ |
+Supported
+ |
+Supported
+ |
+Not supported
+ |
+
+One-way authentication + Password authentication
+Two-way authentication + Password authentication
+ |
+Supported
+ |
+Supported
+ |
+Supported
+ |
+
+Security mode + HTTPS
+ |
+One-way authentication + Password authentication
+Two-way authentication + Password authentication
+ |
+Supported
+ |
+Supported
+ |
+Supported
+ |
+
+
+
+
+
+ Table 2 Configuration for interconnecting different clusters with ELBSecurity Mode
+ |
+Service Form Provided by ELB for External Systems
+ |
+ELB Listener
+ |
+Backend Server Group
+ |
+
+Frontend Protocol
+ |
+Port
+ |
+SSL Parsing Mode
+ |
+Backend Protocol
+ |
+Health Check Port
+ |
+Health Check Path
+ |
+
+Non-security
+ |
+No authentication
+ |
+HTTP
+ |
+9200
+ |
+No authentication
+ |
+HTTP
+ |
+9200
+ |
+/
+ |
+
+One-way authentication
+ |
+HTTPS
+ |
+9200
+ |
+One-way authentication
+ |
+HTTP
+ |
+9200
+ |
+
+Two-way authentication
+ |
+HTTPS
+ |
+9200
+ |
+Two-way authentication
+ |
+HTTP
+ |
+9200
+ |
+
+Security mode + HTTP
+ |
+Password authentication
+ |
+HTTP
+ |
+9200
+ |
+No authentication
+ |
+HTTP
+ |
+9200
+ |
+/_opendistro/_security/health
+ |
+
+One-way authentication + Password authentication
+ |
+HTTPS
+ |
+9200
+ |
+One-way authentication
+ |
+HTTP
+ |
+9200
+ |
+
+Two-way authentication + Password authentication
+ |
+HTTPS
+ |
+9200
+ |
+Two-way authentication
+ |
+HTTP
+ |
+9200
+ |
+
+Security mode + HTTPS
+ |
+One-way authentication + Password authentication
+ |
+HTTPS
+ |
+9200
+ |
+One-way authentication
+ |
+HTTPS
+ |
+9200
+ |
+
+Two-way authentication + Password authentication
+ |
+HTTPS
+ |
+9200
+ |
+Two-way authentication
+ |
+HTTPS
+ |
+9200
+ |
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640645485.html b/docs/css/umn/en-us_topic_0000001640645485.html
new file mode 100644
index 00000000..0a01787a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640645485.html
@@ -0,0 +1,14 @@
+
+
+Deleting a Snapshot
+If you no longer need a snapshot, delete it to release storage resources. If the automatic snapshot creation function is enabled, snapshots that are automatically created cannot be deleted manually, and the system automatically deletes these snapshots on the half hour after the time specified by Retention Period (days). If you disable the automatic snapshot creation function while retaining the automated snapshots, then you can manually delete them later. If you do not manually delete the automatically created snapshots and enable the automatic snapshot creation function again, then all snapshots with Snapshot Type set to Automated in the snapshot list of the cluster can only be automatically deleted by the system.
+ After a snapshot is deleted, its data cannot be restored. Exercise caution when deleting a snapshot.
+
+ - In the snapshot list, locate the snapshot that you want to delete.
- Click Delete in the Operation column. In the dialog box that is displayed, confirm the snapshot information and click OK.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640654793.html b/docs/css/umn/en-us_topic_0000001640654793.html
new file mode 100644
index 00000000..50c4b71e
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640654793.html
@@ -0,0 +1,40 @@
+
+
+Adding Master/Client Nodes
+If workloads on the data plane of a cluster increase, you can add master or client nodes as needed. Services are not interrupted while they are added.
+ PrerequisitesThe cluster is in the Available state and has no ongoing task.
+
+ Constraints- If a cluster already has master and client nodes, the Add Master/Client Node tab is not displayed on the Modify Configuration page. In this case, you need to add the master or client nodes by referring to Scaling Out a Cluster.
- When you add master or client nodes, the number of nodes that can be configured varies depending on the node type. For details, see Table 1.
+
Table 1 Number of nodes in different typesNode Type
+ |
+Number
+ |
+
+
+Master node
+ |
+An odd number ranging from 3 to 9
+ |
+
+Client node
+ |
+1 to 32
+ |
+
+
+
+
+
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, choose the Add Master/Client Node tab.
- Select the target node type and set the node specifications, quantity, and storage.
- Master and client nodes cannot be added at the same time.
- If a cluster already has a master or client node, you can only add nodes of the other type.
+ - Click Next.
- Confirm the information and click Submit.
Return to the cluster list page. The Task Status of the cluster is Scaling out.
+- If you added a master node and Cluster Status changed to Available, the master node has been successfully added.
- If you added a client node and Cluster Status changed to Available, the client node has been added. You can restart data nodes and cold data nodes to shut down Cerebro and Kibana processes on the nodes.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640658697.html b/docs/css/umn/en-us_topic_0000001640658697.html
new file mode 100644
index 00000000..4d61845e
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640658697.html
@@ -0,0 +1,17 @@
+
+
+Managing Indexes
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640658701.html b/docs/css/umn/en-us_topic_0000001640658701.html
new file mode 100644
index 00000000..808e83d4
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640658701.html
@@ -0,0 +1,86 @@
+
+
+Accessing a Cluster from a Kibana Public Network
+For CSS clusters that have security mode enabled, you can enable Kibana public access. After the configuration is complete, an IP address will be provided to access Kibana of this cluster over the Internet.
+ You can configure Kibana public access during cluster creation, or after a cluster in security mode is created.
+ The whitelist for Kibana public network access depends on the ELB whitelist. After you updated the whitelist, the new settings take effect immediately for new connections. For existing persistent connections using the IP addresses that have been removed from the whitelist, the new settings take effect about 1 minute after these connections are stopped.
+
+ Configuring Kibana Public Access When Creating a Cluster- Log in to the CSS management console.
- Click Create Cluster in the upper right corner. The Create Cluster page is displayed.
- On the Create Cluster page, enable Security Mode.
- Set Advanced Settings to Custom, enable Kibana Public Access, and set parameters.
+
Table 1 Kibana public access parametersParameter
+ |
+Description
+ |
+
+
+Bandwidth
+ |
+Bandwidth for accessing Kibana with the public IP address
+Value range: 1 to 100
+Unit: Mbit/s
+ |
+
+Access Control
+ |
+If you disable this function, all IP addresses can access Kibana through the public IP address. If you enable this function, only IP addresses or IP address in the whitelist can access Kibana through the public IP address.
+ |
+
+Whitelist
+ |
+IP address or IP address range allowed to access a cluster. Use commas (,) to separate multiple addresses. This parameter can be configured only when Access Control is enabled.
+You are advised to enable this function.
+ |
+
+
+
+
+After the cluster is created, click the cluster name to go to the Basic Information page. On the Kibana Public Access page, you can view the Kibana public IP address.
+
+
+ Configuring Kibana Public Access for an Existing ClusterYou can enable, disable, modify, and view Kibana public access for an existing cluster that has security mode enabled.
+ - Log in to the CSS management console.
- In the navigation pane, choose Clusters > OpenSearch.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster.
- Click the Kibana Public Access tab. Turn on the Kibana Public Access switch to enable the Kibana public access function.
- On the displayed page, set parameters.
+
Table 2 Kibana public access parametersParameter
+ |
+Description
+ |
+
+
+Bandwidth
+ |
+Bandwidth for accessing Kibana with the public IP address
+Value range: 1 to 100
+Unit: Mbit/s
+ |
+
+Access Control
+ |
+If you disable this function, all IP addresses can access Kibana through the public IP address. If you enable this function, only IP addresses or IP address in the whitelist can access Kibana through the public IP address.
+ |
+
+Whitelist
+ |
+IP address or IP address range allowed to access a cluster. Use commas (,) to separate multiple addresses. This parameter can be configured only when Access Control is enabled.
+You are advised to enable this function.
+ |
+
+
+
+
+ - After you set the parameters, click OK.
+
+ Modifying Kibana Public AccessFor clusters configured Kibana public access, you can modify its bandwidth and access control or disable this function.
+ - Log in to the CSS management console.
- In the navigation pane, choose Clusters > OpenSearch.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster.
- Click the Kibana Public Access tab to modify the Kibana public access configuration.
- Modifying bandwidth
Click Modify on the right of Bandwidth. On the Modify Bandwidth page, modify the bandwidth and click OK.
+ - Modifying access control
Click Modify on the right of Access Control. On the Modify Access Control page, set Access Control and Whitelist, and click OK.
+ - Disabling Kibana public access
Toggle off the Kibana Public Access switch.
+
+
+
+ Accessing OpenSearch Dashboard with the Public IP AddressAfter configuring Kibana public access, you will obtain a public IP address that you can use to access OpenSearch Dashboard of this cluster.
+ - Log in to the CSS management console.
- In the navigation pane, choose Clusters > OpenSearch.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster.
- Click the Kibana Public Access tab to obtain the Kibana public IP address.
- Use this IP address to access OpenSearch Dashboard of this cluster through the Internet.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640764229.html b/docs/css/umn/en-us_topic_0000001640764229.html
new file mode 100644
index 00000000..8e38c94b
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640764229.html
@@ -0,0 +1,393 @@
+
+
+Connecting to a Dedicated Load Balancer
+This section describes how to connect a CSS cluster to a dedicated load balancer.
+ (Optional) Preparing a Self-signed CertificateIf the target ELB listener uses the HTTP protocol, skip this step.
+ Prepare and upload a self-signed certificate.
+ You are advised to use a certificate purchased in Cloud Certificate Manager (CCM) or issued by an authoritative organization.
+
+ - Log in to a Linux client where the OpenSSL tool and JDK are installed.
- Run the following commands to create a self-signed certificate:
1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73 | mkdir ca
+mkdir server
+mkdir client
+
+#Use OpenSSL to create a CA certificate.
+cd ca
+#Create the OpenSSL configuration file ca_cert.conf for the CA certificate.
+cat >ca_cert.conf <<EOF
+[ req ]
+distinguished_name = req_distinguished_name
+prompt = no
+
+[ req_distinguished_name ]
+ O = ELB
+EOF
+#Create private key file ca.key for the CA certificate.
+openssl genrsa -out ca.key 2048
+#Create the CSR file ca.csr for the CA certificate.
+openssl req -out ca.csr -key ca.key -new -config ./ca_cert.conf
+#Create a self-signed CA certificate ca.crt.
+openssl x509 -req -in ca.csr -out ca.crt -sha1 -days 5000 -signkey ca.key
+#Convert the CA certificate format to p12.
+openssl pkcs12 -export -clcerts -in ca.crt -inkey ca.key -out ca.p12
+#Convert the CA certificate format to JKS.
+keytool -importkeystore -srckeystore ca.p12 -srcstoretype PKCS12 -deststoretype JKS -destkeystore ca.jks
+
+
+#Use the CA certificate to issue a server certificate.
+cd ../server
+#Create the OpenSSL configuration file server_cert.conf for the server certificate. Change the CN field to the domain name or IP address of the server as required.
+cat >server_cert.conf <<EOF
+[ req ]
+distinguished_name = req_distinguished_name
+prompt = no
+
+[ req_distinguished_name ]
+ O = ELB
+ CN = 127.0.0.1
+EOF
+#Create the private key file server.key for the server certificate.
+openssl genrsa -out server.key 2048
+#Create the CSR request file server.csr for the server certificate.
+openssl req -out server.csr -key server.key -new -config ./server_cert.conf
+#Use the CA certificate to issue the server certificate server.crt.
+openssl x509 -req -in server.csr -out server.crt -sha1 -CAcreateserial -days 5000 -CA ../ca/ca.crt -CAkey ../ca/ca.key
+#Convert the server certificate format to p12.
+openssl pkcs12 -export -clcerts -in server.crt -inkey server.key -out server.p12
+#Convert the service certificate format to JKS.
+keytool -importkeystore -srckeystore server.p12 -srcstoretype PKCS12 -deststoretype JKS -destkeystore server.jks
+
+
+#Use the CA certificate to issue a client certificate.
+cd ../client
+#Create the OpenSSL configuration file client_cert.conf for the client certificate. Change the CN field to the domain name or IP address of the server as required.
+cat >client_cert.conf <<EOF
+[ req ]
+distinguished_name = req_distinguished_name
+prompt = no
+
+[ req_distinguished_name ]
+O = ELB
+CN = 127.0.0.1
+EOF
+#Create private key client.key for the client certificate.
+openssl genrsa -out client.key 2048
+#Create the CSR file client.csr for the client certificate.
+openssl req -out client.csr -key client.key -new -config ./client_cert.conf
+#Use the CA certificate to issue the client certificate client.crt.
+openssl x509 -req -in client.csr -out client.crt -sha1 -CAcreateserial -days 5000 -CA ../ca/ca.crt -CAkey ../ca/ca.key
+#Convert the client certificate to a p12 file that can be identified by the browser.
+openssl pkcs12 -export -clcerts -in client.crt -inkey client.key -out client.p12
+#Convert the client certificate format to JKS.
+keytool -importkeystore -srckeystore client.p12 -srcstoretype PKCS12 -deststoretype JKS -destkeystore client.jks
+ |
+
+
+ - Upload the self-signed certificate. For details, see .
+
+ Creating a Dedicated Load Balancer- Log in to the ELB management console.
- Create a dedicated load balancer. For details, see Creating a Dedicated Load Balancer. Table 1 describes the parameters required for connecting a CSS cluster with a dedicated load balancer.
+
Table 1 Parameters for interconnecting a CSS cluster with a dedicated load balancerParameter
+ |
+Description
+ |
+Example
+ |
+
+
+Type
+ |
+Load balancer type. Select Dedicated.
+ |
+Dedicated
+ |
+
+Billing Mode
+ |
+Billing mode of the dedicated load balancer.
+ |
+Pay-per-use
+ |
+
+Region
+ |
+Region where the CSS cluster is located.
+ |
+-
+ |
+
+IP as Backend Servers
+ |
+A CSS cluster can be connected only after the cross-VPC backend is enabled.
+ |
+Enabled
+ |
+
+Network Type
+ |
+Type of the network used by the load balancer to provide services for external systems.
+ |
+Private IPv4 network
+ |
+
+VPC
+ |
+VPC where the load balancer works. This parameter is mandatory no matter which network type is selected.
+Select the VPC of the CSS cluster.
+ |
+-
+ |
+
+Subnet
+ |
+Subnet where the load balancer is to be created. This parameter is mandatory no matter which network type is selected.
+Select the subnet of the CSS cluster.
+ |
+-
+ |
+
+Specifications
+ |
+You are advised to select Application load balancing (HTTP/HTTPS), which provides better functions and performance.
+ |
+Application load balancing (HTTP/HTTPS)
+Small I
+ |
+
+
+
+
+
+
+ Interconnecting with a Load Balancer A cluster in security mode with HTTPS access enabled does not support HTTP protocol authentication. If you need to enable HTTP protocol authentication, disable the security mode of the cluster.
+ Before changing the security mode, disable load balancing. After the security mode is changed, enable load balancing.
+
+ - Log in to the CSS management console.
- In the navigation pane, choose Clusters > OpenSearch.
- On the Clusters page, select the cluster you want to connect to the load balancer and click the cluster name. The Cluster Information page is displayed.
- In the navigation pane, choose Load Balancing. Enable load balancing and configure basic load balancing information.
- Load Balancer: Select a created load balancer. You can also click Create Load Balancer to create one.
- Agency: Select an agency name. If no agency is available, click Create Agency to create one. The selected agency must have the ELB Administrator and ELB FullAccess permissions.
+ - Click OK. The listener configuration page is displayed.
- In the Listener Configuration area, click
to configure listener information.
+Table 2 Listener configuration informationParameter
+ |
+Description
+ |
+
+
+Frontend Protocol
+ |
+The protocol used by the client and listener to distribute traffic.
+Select a protocol as required.
+ |
+
+Frontend Port
+ |
+The port used by the client and listener to distribute traffic.
+For example, 9200. You need to specify this parameter as required.
+ |
+
+SSL Authentication
+ |
+Authentication mode for the client to access the server.
+Select a parsing mode as required.
+ |
+
+Server Certificate
+ |
+The server certificate is used for SSL handshake negotiation. The certificate content and private key must be provided.
+When SSL Authentication is set to Two-way authentication, this parameter is mandatory.
+ |
+
+CA Certificate
+ |
+Also called client CA public key certificate. It is used to verify the issuer of a client certificate.
+When the HTTPS two-way authentication is enabled, an HTTPS connection can be established only when the client can provide the certificate issued by a specified CA.
+This parameter is mandatory only when the Frontend Protocol is set to HTTPS.
+ |
+
+
+
+
+ - (Optional) In the Connection Mode area, you can click Settings next to Access Control to configure the IP addresses or network segments that are allowed to access the system. If you do not set the IP addresses or network segments, all IP addresses are allowed to access the system by default.
+ In the Health Check area, you can view the health check result of each node IP address. The following table describes the health check results.
+ Health Check Result
+ |
+Description
+ |
+
+
+Normal
+ |
+The IP address of the node is properly connected.
+ |
+
+Abnormal
+ |
+The node IP address is connected and unavailable.
+ |
+
+
+
+
+
+
+ Accessing a Cluster Using the Curl CommandRun the following commands to check whether the dedicated load balancer can be connected to a cluster.
+
+ Table 3 Commands for accessing different clustersSecurity Mode
+ |
+Service Form Provided by ELB for External Systems
+ |
+Curl Command for Accessing a Cluster
+ |
+
+
+Non-security
+ |
+No authentication
+ |
+curl http://IP:9200
+ |
+
+One-way authentication
+ |
+curl -k --cert ./client.crt --key ./client.key https://IP:9200
+ |
+
+Two-way authentication
+ |
+curl --cacert ./ca.crt --cert ./client.crt --key ./client.key https://IP:9200
+ |
+
+Security mode + HTTP
+ |
+Password authentication
+ |
+curl http://IP:9200 -u user:pwd
+ |
+
+One-way authentication + Password authentication
+ |
+curl -k --cert ./client.crt --key ./client.key https://IP:9200 -u user:pwd
+ |
+
+Two-way authentication + Password authentication
+ |
+curl --cacert ./ca.crt --cert ./client.crt --key ./client.key https://IP:9200 -u user:pwd
+ |
+
+Security mode + HTTPS
+ |
+One-way authentication + Password authentication
+ |
+curl -k --cert ./client.crt --key ./client.key https://IP:9200 -u user:pwd
+ |
+
+Two-way authentication + Password authentication
+ |
+curl --cacert ./ca.crt --cert ./client.crt --key ./client.key https://IP:9200 -u user:pwd
+ |
+
+
+
+
+
+ Table 4 VariablesVariable
+ |
+Description
+ |
+
+
+IP
+ |
+ELB IP address
+ |
+
+user
+ |
+Username for accessing the CSS cluster
+ |
+
+pwd
+ |
+Password of the user
+ |
+
+
+
+
+ If the Elasticsearch cluster information is returned, the connection is successful. For example, if a security cluster using the HTTPS protocol is connected to a load balancer using two-way authentication, the information shown in Figure 1 is returned.
+ Figure 1 Accessing a cluster
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640773493.html b/docs/css/umn/en-us_topic_0000001640773493.html
new file mode 100644
index 00000000..f9871503
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640773493.html
@@ -0,0 +1,29 @@
+
+
+Overview
+You can scale in or out a cluster and change cluster specifications. In this way, you can improve cluster efficiency and reduce O&M costs.
+ Scaling Out a Cluster- If a data node (ess) processes many data writing and querying requests and responds slowly, you can expand its storage capacity to improve its efficiency. If some nodes turn unavailable due to the excessive data volume or misoperations, you can add new nodes to ensure the cluster availability.
- Cold data nodes (ess-cold) are used to share the workload of data nodes. To prevent cold data loss, you can expand the storage capacity of the cold data node or add new ones.
+
+ Changing Specifications- If the allocation of new indexes or shards takes too long or the node coordination and scheduling are inefficient, you can change the master node (ess-master) specifications.
- If too many tasks need to be distributed or too many results have been aggregated, you can change the client node (ess-client) specifications.
- If the speed of data writing and query decreases suddenly, you can change the data node (ess) specifications.
- If cold data query becomes slow, you can change the cold node (ess-cold) specifications.
+
+ Scaling in a Cluster- If a cluster can process existing data without fully using its resources, you can scale in the cluster to reduce costs.
+
+ Removing Specified Nodes- If a cluster can process existing data without fully using its nodes, you can remove one or more specified nodes from the cluster to reduce costs.
+
+ Replacing a Specified Node- If a node in the cluster is faulty, you can create a new node with the same specifications to replace it.
+
+ Adding Master/Client Nodes- If the workloads on the data plane of a cluster increase, you can dynamically scale the cluster by adding master/client nodes.
+
+ Changing the Security Mode
+ After a cluster is created, its security mode can be changed in the following methods: - Change a non-security cluster to a security cluster that uses HTTP or HTTPS protocol.
- Change a security cluster that uses HTTP or HTTPS protocol to a non-security cluster.
- Change the protocol of a security cluster.
+
+ Changing AZs
+ You can Add AZ or Migrate AZ. - Add AZ: Add one or two AZs to a single-AZ cluster, or add an AZ to a dual-AZ cluster to improve cluster availability.
- Migrate AZ: Completely migrate data from the current AZ to another AZ that has sufficient resources.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640773505.html b/docs/css/umn/en-us_topic_0000001640773505.html
new file mode 100644
index 00000000..c6f1e878
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640773505.html
@@ -0,0 +1,68 @@
+
+
+Changing AZs
+CSS supports cross-AZ deployment. You can add an AZ to obtain more resources or improve cluster availability, and can migrate your current AZ to one with higher specifications. This section describes how to add or migrate your AZs.
+ DescriptionYou can Add AZ or Migrate AZ. - Add AZ: Add one or two AZs to a single-AZ cluster, or add an AZ to a dual-AZ cluster to improve cluster availability.
- Migrate AZ: Completely migrate data from the current AZ to another AZ that has sufficient resources.
+
+
+ Prerequisites- Ensure that an AZ with sufficient resources exists.
- The target cluster is available and has no tasks in progress.
- Make sure that no non-standard operations have been performed in the cluster. If you have made non-standard modifications, such as modifying return routes, system parameters, and Kibana configurations, these modifications will be lost after the AZ change and your services may be affected.
+
+ Constraints- To ensure service continuity, the total number of data nodes and cold data nodes in a cluster must be greater than or equal to 3.
- During the change, nodes are brought offline one by one and then new nodes are created. Ensure that the disk capacity of other nodes can store all the data of the node after a single node is brought offline.
- To prevent backup allocation failures after a node is brought offline during the change, ensure that the maximum number of primary and standby index shards of an index can be allocated to the remaining data nodes and cold data nodes. That is, the maximum number of primary and standby shards of an index plus 1 is less than or equal to the total number of data nodes and cold data nodes in the current cluster.
- You are advised to back up data before the change to prevent data loss caused by upgrade faults.
- Before a change completes, some nodes may have been moved to a new AZ. In this case, the AZs before and after the change are both displayed. After the change succeeds, the new AZs and their nodes will be displayed properly.
- When adding AZs, the current AZ must be retained in the change. When adding one or two AZs to a single-AZ cluster, you must change AZs for all nodes at the same time. When adding an AZ to a dual-AZ cluster, you can change AZs for a single type of nodes or all nodes in a cluster at a time. For example, in a cluster using the dual-AZ architecture, you can use the three-AZ architecture for master nodes alone. During HA modification, the nodes with the smallest configurations are modified to rebuild the cluster. After the HA modification is complete, the YML configuration of the nodes that are not modified is also updated. You need to restart the cluster to make the modification take effect.
- When migrating an AZ, you can select only one target AZ. You can migrate AZs for a single type of nodes or all nodes in a cluster at a time. For example, in a cluster with two AZs, you can migrate the AZ of the master node to the other AZ. After adding AZs, you need to restart the cluster to make the modification take effect.
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- Click the Change AZ tab.
- On the Change AZ page, set parameters.
+
Table 1 Parameters for changing AZsParameter
+ |
+Description
+ |
+
+
+Operation Type
+ |
+- Add AZ: Add one or two AZs to a single-AZ cluster, or add an AZ to a dual-AZ cluster.
During HA modification, the nodes with the smallest configurations are modified to rebuild the cluster. After the HA modification is complete, the YML configuration of the nodes that are not modified is also updated. You need to restart the cluster to make the modification take effect.
+ - Migrate AZ: Migrate data from one AZ to another.
After adding AZs, you need to restart the cluster to make the modification take effect.
+
+ |
+
+Node Type
+ |
+Select a type of node or All nodes to change their AZ.
+ NOTE: When adding one or two AZs to a single-AZ cluster, you can only select All nodes to change AZs for all nodes at a time.
+
+ |
+
+Current AZ
+ |
+Current AZ of a cluster
+ |
+
+Target AZ
+ |
+Target AZ.
+- Add AZ: Select up to three AZs, which must include all your current AZs.
- Migrate AZ: Select only one target AZ, which cannot be your current AZ.
+ |
+
+Agency
+ |
+Select an IAM agency to grant the current account the permission to change AZs.
+If no agencies are available, click Create IAM Agency to go to the IAM console and create an agency.
+ NOTE: The selected agency must be authorized with the Tenant Administrator or VPC Administrator policy.
+
+ |
+
+
+
+
+ - Click Submit. Determine whether to check for the backup of all indexes and click OK to start the change.
- The current AZ change task is displayed in the task list. If the task status is Running, expand the task list and click View Progress to view the progress details.
If the task status is Failed, you can retry or terminate the task.
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640777441.html b/docs/css/umn/en-us_topic_0000001640777441.html
new file mode 100644
index 00000000..03f40aae
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640777441.html
@@ -0,0 +1,111 @@
+
+
+Configuring YML Parameters
+You can modify the elasticsearch.yml file.
+ Modifying Parameter Configurations- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, click the name of the target cluster. The cluster information page is displayed.
- Click Parameter Configurations and click Edit to modify module parameters as required.
+
Table 1 Module parametersModule Name
+ |
+Parameter
+ |
+Description
+ |
+
+
+Cross-domain Access
+ |
+http.cors.allow-credentials
+ |
+Indicates whether to return the Access-Control-Allow-Credentials of the header during cross-domain access.
+Value: true or false
+Default value: false
+ |
+
+http.cors.allow-origin
+ |
+Origin IP address allowed for cross-domain access, for example, 122.122.122.122:9200
+ |
+
+http.cors.max-age
+ |
+Default browser cache duration. The cache is automatically cleared after the time range you specified.
+Unit: s
+Default value: 1728000
+ |
+
+http.cors.allow-headers
+ |
+Headers allowed for cross-domain access, including X-Requested-With, Content-Type, and Content-Length. Use commas (,) and spaces to separate headers.
+ |
+
+http.cors.enabled
+ |
+Indicates whether to allow cross-domain access.
+Value: true or false
+Default value: false
+ |
+
+http.cors.allow-methods
+ |
+Methods allowed for cross-domain access, including OPTIONS, HEAD, GET, POST, PUT, and DELETE. Use commas (,) and spaces to separate methods.
+ |
+
+Reindexing
+ |
+reindex.remote.whitelist
+ |
+Configure this parameter to migrate data from the current cluster to the target cluster through the reindex API. The example value is 122.122.122.122:9200.
+ |
+
+Custom Cache
+ |
+indices.queries.cache.size
+ |
+Cache size in the query phase
+Value range: 1 to 100.
+Unit: %
+Default value: 10%
+ |
+
+Queue Size in a Thread Pool
+ |
+thread_pool.bulk.queue_size
+ |
+Queue size in the bulk thread pool. The value is an integer. You need to customize this parameter.
+Default value: 200
+ |
+
+thread_pool.write.queue_size
+ |
+Queue size in the write thread pool. The value is an integer. You need to customize this parameter.
+Default value: 200
+ |
+
+thread_pool.force_merge.size
+ |
+Queue size in the force merge thread pool. The value is an integer.
+Default value: 1
+ |
+
+Customize
+ |
+You can add parameters based on your needs.
+ |
+Customized parameters
+ NOTE: - Enter multiple values in the format of [value1, value2, value3...].
- Separate values by commas (,) and spaces.
- Colons (:) are not allowed.
+
+ |
+
+
+
+
+ - After the modification is complete, click Submit.In the displayed Submit Configuration dialog box, select the box indicating "I understand that the modification will take effect after the cluster is restarted." and click Yes.
If the Status is Succeeded in the parameter modification list, the modification has been saved. Up to 20 modification records can be displayed.
+ - Return to the cluster list and choose More > Restart in the Operation column to restart the cluster and make the modification take effect.
- You need to restart the cluster after modification, or Configuration unupdated will be displayed in the Task Status column on the Clusters page.
- If you restart the cluster after the modification, and Task Status displays Configuration error, the parameter configuration file fails to be modified.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640879293.html b/docs/css/umn/en-us_topic_0000001640879293.html
new file mode 100644
index 00000000..6db79a05
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640879293.html
@@ -0,0 +1,20 @@
+
+
+Restarting a Cluster
+If a cluster becomes faulty, you can restart it to check if it can run normally.
+ Prerequisites- The target cluster is not frozen and has no task in progress.
- If a cluster is available, ensure that it has stopped processing service requests (such as importing data and searching for data). Otherwise, data may be lost when the cluster is restarted. You are advised to perform this operation during off-peak hours.
+
+ ContextCSS supports quick restart and rolling restart.
+ Quick Restart- All clusters support this function.
- If you select a node type for quick restart, all nodes of the selected type will be restarted together.
- If you select a node name for quick restart, only the specified node will be restarted.
- The cluster is unavailable during quick restart.
+
+
+ Quick Restart- Log in to the CSS management console.
- In the navigation pane, choose Clusters > OpenSearch.
- In the Operation column of the target cluster, choose More > Restart.
- On the Restart Cluster page, select Quick Restart.
You can quick restart nodes by Node type or Node name. If you select Node type, then you can select multiple node types and perform quick restart at the time. If you select Node name, you can perform quick restart only on one node at a time.
+ - Refresh the page and check the cluster status. During the restart, the cluster status is Processing, and the task status is Restarting. If the cluster status changes to Available, the cluster has been restarted successfully.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640883633.html b/docs/css/umn/en-us_topic_0000001640883633.html
new file mode 100644
index 00000000..6773e505
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640883633.html
@@ -0,0 +1,23 @@
+
+
+Index Backup and Restoration
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640892937.html b/docs/css/umn/en-us_topic_0000001640892937.html
new file mode 100644
index 00000000..b8625921
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640892937.html
@@ -0,0 +1,19 @@
+
+
+Replacing a Specified Node
+If a node in the cluster is faulty, you can create a new node with the same specifications to replace it.
+ PrerequisitesThe target cluster is available and has no tasks in progress.
+
+ Constraints- Only one node can be replaced at a time.
- The ID, IP address, specifications, and AZ of the new node will be the same as those of the original one.
- The configurations you modified manually will not be retained after node replacement. For example, if you have manually added a return route to the original node, you need to add it to the new node again after the node replacement is complete.
- If the node you want to replace is a data node (ess) or cold data node (ess-cold), pay attention to the following precautions:
- For data node replacement, data from the original node will be migrated to other nodes, and then the node will be rebuilt. Therefore, the total number of replicas and primary shards of each index in the cluster must be less than the total number of data nodes (including ess and ess-cold) in the cluster. The time required for node replacement is closely related to the time required for migrating data to other nodes.
- The AZ of the node to be replaced must have two or more data nodes (including ess and ess-cold).
- If the cluster of the node to be replaced does not have a master node (ess-master), the number of available data nodes (including ess and ess-cold) in the cluster must be greater than or equal to 3.
- The preceding precautions do not apply if you are replacing a master node (ess-master) or client node (ess-client).
- The precautions 1 to 4 do not apply if you are replacing a faulty node, regardless of its type. Faulty nodes are not included in _cat/nodes.
+
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, click the Replace Node tab.
- On the Replace Node tab page, set the following parameters:
- Whether to perform data migration: If this option is selected, data migration is performed. If the target node has disabled indexes or indexes that have no replicas, this option must be selected.
- Select the node to be replaced in the data node table.
+ - Click Submit.
- Click Back to Cluster List to switch to the Clusters page. The Task Status is Upgrading. When Cluster Status changes to Available, the node has been successfully replaced.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001640998693.html b/docs/css/umn/en-us_topic_0000001640998693.html
new file mode 100644
index 00000000..7499679c
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001640998693.html
@@ -0,0 +1,15 @@
+
+
+Deleting a Cluster
+You can delete clusters that you no longer need.
+ - If you delete a cluster, the cluster service data will be cleared. Exercise caution when performing this operation.
- The snapshots of a cluster stored in OBS are not deleted with the cluster. You can restore a deleted cluster using its snapshots stored in the OBS bucket.
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose Clusters > OpenSearch.
- Locate the target cluster and click More > Delete in the Operation column.
- In the displayed dialog box, enter the name of the cluster to be deleted and click OK.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001641003025.html b/docs/css/umn/en-us_topic_0000001641003025.html
new file mode 100644
index 00000000..61958331
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001641003025.html
@@ -0,0 +1,17 @@
+
+
+Quickly Accessing an OpenSearch Cluster
+OpenSearch clusters have built-in Kibana and Cerebro components. You can quickly access an OpenSearch cluster through Kibana and Cerebro.
+ Accessing a Cluster Through Kibana- Log in to the CSS management console.
- In the navigation pane, choose Clusters > OpenSearch.
- On the Clusters page, locate the target cluster and click Access Kibana in the Operation column to go to the OpenSearch Dashboards login page.
- Non-security cluster: The OpenSearch Dashboards console is displayed.
- Security cluster: Enter the username and password on the login page and click Log In to go to the Kibana console. The default username is admin and the password is the one specified during cluster creation.
+ - After the login is successful, access the cluster and perform related operations on the OpenSearch Dashboards.
+
+ Accessing a Cluster Through Cerebro- Log in to the CSS management console.
- In the navigation pane, choose Clusters > OpenSearch.
- On the Clusters page, locate the target cluster and click More > Cerebro in the Operation column to go to the Cerebro login page.
- Non-security cluster: Click the cluster name on the Cerebro login page to go to the Cerebro console.
- Security cluster: Click the cluster name on the Cerebro login page, enter the username and password, and click Authenticate to go to the Cerebro console. The default username is admin and the password is the one specified during cluster creation.
+ - After the login is successful, you can access clusters through Cerebro.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001641003029.html b/docs/css/umn/en-us_topic_0000001641003029.html
new file mode 100644
index 00000000..0ef98b30
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001641003029.html
@@ -0,0 +1,12 @@
+
+
+Backup and Restoration Overview
+You can back up index data in clusters. If data loss occurs or you want to retrieve data of a specified duration, you can restore the index data. Index backup is implemented by creating cluster snapshots. When creating a backup for the first time, you are advised to back up data of all indexes.
+ - Managing Automatic Snapshot Creation: Snapshots are automatically created at a specified time each day according to the rules you create. You can enable or disable the automatic snapshot creation function and set the automatic snapshot creation policy.
- Manually Creating a Snapshot: You can manually create a snapshot at any time to back up all data or data of specified indexes.
- Restoring Data: You can use existing snapshots to restore the backup index data to a specified cluster.
- Deleting a Snapshot: Delete unnecessary snapshots and release resources.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001641012329.html b/docs/css/umn/en-us_topic_0000001641012329.html
new file mode 100644
index 00000000..03c5eba4
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001641012329.html
@@ -0,0 +1,24 @@
+
+
+Changing Specifications
+If the workloads on the data plane of a cluster change, you can change its node specifications as needed.
+ Prerequisites- The target cluster is available and has no tasks in progress.
- The target cluster has sufficient quotas available.
- When changing the node specifications, ensure that all service data has copies so the services will not be interrupted.
Run the GET _cat/indices?v command in Kibana. If the returned rep value is greater than 0, the data has copies. If the returned rep value is 0, the data has no copies. In this case, create snapshot for the cluster by referring to Manually Creating a Snapshot.
+ - If the data volume is large, it may take long to modify the node specifications. You are advised to modify specifications during off-peak hours.
+
+ Constraints- The number of nodes and the capacity of node storage cannot be changed. You can add nodes and increase the node storage capacity by referring to Scaling Out a Cluster. For details about how to reduce the number of nodes, see Scaling in a Cluster.
- After decreasing cluster specifications, the cluster performance will deteriorate and service capabilities will be affected. Exercise caution when performing this operation.
- If a cluster has multiple node types, you can change the specifications of only one type at a time. After the change, nodes in other types still maintain their original specifications.
- Kibana is unavailable during specification change.
- During the specification modification, the nodes are stopped and restarted in sequence. It is a rolling process.
+
+ Procedure- Log in to the CSS management console.
- In the navigation pane, choose a cluster type. The cluster management page is displayed.
- Choose More > Modify Configuration in the Operation column of the target cluster. The Modify Configuration page is displayed.
- On the Modify Configuration page, choose the Scale Cluster tab and click Change Specifications to set parameters.
- Action: select Change specifications.
- Resources: The changed amount of resources.
- Nodes: Specifications of the default data nodes. Select the required specifications from the Node Specifications drop-down list and select the node that you want to change the specifications.
- If a cluster has master nodes, client nodes, or cold data nodes, you can change their specifications.
+ - Click Next.
- Confirm the information and click Submit.
- In the dialog box that is displayed, confirm whether to select Verify index copies and Cluster status check and click OK to start the specifications change.
Index copy verification:
+By default, CSS checks for indexes that do not have copies. You can skip this step, but the lack of index copies may affect services during a cluster specifications change. - If you selected Verify index copies and the cluster has no master node, indexes must have at least one copy and the cluster must have at least three nodes.
- If you selected Verify index copies and the cluster has no master node, indexes must have at least one copy.
+
+Cluster status check:
+The cluster status is checked before the specifications change by default. The specifications of nodes are changed one by one to ensure success and data security. If a cluster is overloaded and services are faulty, the request for a specifications change will not be delivered. In this case, you can disable cluster status check. If you ignore the cluster status check before the specifications change, the cluster may be faulty and services may be interrupted. Exercise caution when performing this operation.
+ - Click Back to Cluster List to switch to the Clusters page. The Cluster Status is Configuration modified. When Cluster Status changes to Available, the cluster specifications have been successfully modified.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001641016221.html b/docs/css/umn/en-us_topic_0000001641016221.html
new file mode 100644
index 00000000..b72c8811
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001641016221.html
@@ -0,0 +1,42 @@
+
+
+Changing an Index Policy
+You can change any managed index policy. ISM has constraints to ensure that policy changes do not break indexes.
+ If an index is stuck in its current status and you want to update its policy immediately, make sure that the new policy includes the same status (same name, action, and order) as the old policy. In this case, ISM applies the new policy even if the old policy is being executed.
+ If the new policy you use does not include the same status as the old policy, ISM updates the policies only after all actions in the current status are completed. Alternatively, you can select a specific status in the old policy and make the new policy take effect.
+ Perform the following steps to change a policy in the OpenSearch Dashboards:
+ - On the Index Management page of the OpenSearch Dashboards, select the index policy you want to change.
- Click Change policy in the upper right corner. In the Choose managed indices and Choose new policy areas, select information about the new policy.
Figure 1 Changing an index policy
+
+ Table 1 Parameters required for changing a policyParameter
+ |
+Description
+ |
+
+
+Managed indices
+ |
+Select the indexes to which you want to attach the new policy. Multiple indexes can be selected.
+ |
+
+State filters
+ |
+Select an index status. When a status is selected, the new policy is attached to an index in this status.
+ |
+
+New policy
+ |
+Select a new policy.
+ |
+
+
+
+
+
+ - After configuration is complete, click Change.
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001647464345.html b/docs/css/umn/en-us_topic_0000001647464345.html
new file mode 100644
index 00000000..96428324
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001647464345.html
@@ -0,0 +1,58 @@
+
+
+Managing Tags
+Tags are cluster identifiers. Adding tags to clusters can help you identify and manage your cluster resources.
+ You can add tags to a cluster when creating the cluster or add them on the details page of the created cluster.
+ If your organization has enabled tag policies for CSS, you must comply with the tag policy rules when creating clusters, otherwise, clusters may fail to be created. Contact the organization administrator to learn more about tag policies.
+ Managing Tags of a New Cluster- Log in to the CSS management console.
- Click Create Cluster in the upper right corner. The Create Cluster page is displayed.
- On the Create Cluster page, set Advanced Settings to Custom. Add tags for a cluster.
You can select a predefined tag and set Tag value for the tag. You can click View Predefined Tag to switch to the TMS management console and view existing tags.
+You can also create new tags by specifying Tag key and Tag value.
+You can add a maximum of 20 tags for a CSS cluster. If the entered tag is incorrect, you can click Delete on the right of the tag to delete the tag. If you do not want to add tags, leave this parameter blank.
+
+Table 1 Naming rules for a tag key and valueParameter
+ |
+Description
+ |
+
+
+Tag key
+ |
+Must be unique in a cluster.
+The value cannot contain more than 64 characters.
+It can contain only numbers, letters, and the following special characters: _.:=+-@/ The value cannot start or end with a space.
+Cannot be left blank.
+ |
+
+Tag value
+ |
+The value cannot contain more than 64 characters.
+It can contain only numbers, letters, and the following special characters: _.:=+-@/ The value cannot start or end with a space.
+Cannot be left blank.
+ |
+
+
+
+
+
+
+ Managing Tags of Existing ClustersYou can modify, delete, or add tags for a cluster.
+ - Log in to the CSS management console.
- In the navigation pane, choose Clusters > Opensearch.
- On the Clusters page, click the name of a cluster for which you want to manage tags.
The Basic Information page is displayed.
+ - In the navigation pane on the left, choose the Tags tab. You can add, modify, or delete tags.
- View
On the Tags page, you can view details about tags of the cluster, including the number of tags and the key and value of each tag.
+ - Add
Click Add in the upper left corner. In the displayed Add Tag dialog box, enter the key and value of the tag to be added, and click OK.
+ - Modify
You can only change the value of an existing tag.
+In the Operation column of a tag, click Edit. In the displayed Edit Tag page, enter a new tag value and click OK.
+ - Delete
In the Operation column of a tag, click Delete. After confirmation, click Yes on the displayed Delete Tag page.
+
+
+
+ Searching for Clusters by Tag- Log in to the CSS management console.
- In the navigation pane, choose Clusters > Opensearch.
- On the Clusters page, click Search by Tag in the upper right corner of the cluster list.
- Select or enter the tag key and tag value you want to search for, and click Add to add the tag to the search text box.
You can select a tag key or tag value from their drop-down lists. The system returns a list of clusters that exactly match the tag key or tag value. If you enter multiple tags, the cluster that meets requirements of all the tags will be filtered.
+You can add a maximum of 10 tags at one time.
+ - Click Search.
The system searches for the target cluster by tag key and value.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001667545170.html b/docs/css/umn/en-us_topic_0000001667545170.html
new file mode 100644
index 00000000..9f712b33
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001667545170.html
@@ -0,0 +1,20 @@
+
+
+What Is Cloud Search Service?
+CSSCloud Search Service (CSS) is a fully hosted distributed search service based on Elasticsearch. You can use it for structured and unstructured data search, and use AI vectors for combine search, statistics, and reports. CSS is a fully managed cloud service of the ELK Stack and is compatible with open-source Elasticsearch, Kibana, and Cerebro.
+ Elasticsearch is an open-source distributed search engine that can be deployed in standalone or cluster mode. The heart of the ELK Stack, Elasticsearch clusters support multi-condition search, statistical analysis, and create visualized reports of structured and unstructured text. For details about Elasticsearch, see the Elasticsearch: The Definitive Guide.
+ CSS can be automatically deployed, allowing you to quickly create Elasticsearch clusters. It provides the search engine optimization practices and does not require your O&M. Additionally, it has a robust monitoring system to present you key metrics, including clusters and query performance so that you can focus on the business logic.
+
+ Functions- Compatible with Elasticsearch
Freely use native Elasticsearch APIs and other software in the ecosystem, such as Beats and Kibana.
+ - Support various data sources
A few simple configurations can allow you to smoothly connect to multiple data sources, such as FTP, OBS, HBase, and Kafka. No extra coding is required.
+ - One-click operation
One-click cluster application, capacity expansion, and restart from small-scale testing to large-scale rollout
+ - User-defined snapshot policies
Trigger backup snapshots manually or configure an automated schedule.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001667545182.html b/docs/css/umn/en-us_topic_0000001667545182.html
new file mode 100644
index 00000000..d497299e
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001667545182.html
@@ -0,0 +1,12 @@
+
+
+Quotas
+CSS uses the following resources:
+ - Instance
- CPU
- Memory (GB)
- Disk quantity
- Disk size (GB)
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001667704882.html b/docs/css/umn/en-us_topic_0000001667704882.html
new file mode 100644
index 00000000..5b943cad
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001667704882.html
@@ -0,0 +1,19 @@
+
+
+Product Components
+CSS supports Kibana and Cerebro.
+ KibanaKibana is an open-source data analytics and visualization platform that works with Elasticsearch. You can use Kibana to search for and view data stored in Elasticsearch indexes and display data in charts and maps. For details about Kibana, visit https://www.elastic.co/guide/en/kibana/current/index.html.
+ By default, the Elasticsearch cluster of CSS provides the access channel to Kibana. You can quickly access Kibana without installing it. CSS is compatible with Kibana visualizations and Elasticsearch statistical and analysis capabilities.
+ - Over 10 data presentation modes
- Nearly 20 data statistics methods
- Classification in various dimensions, such as time and tag
+
+ CerebroCerebro is an open-source Elasticsearch web admin tool built using Scala, Play Framework, AngularJS, and Bootstrap. Cerebro allows you to manage clusters on a visualized page, such as executing REST requests, modifying Elasticsearch configurations, monitoring real-time disks, cluster loads, and memory usage.
+ By default, the Elasticsearch cluster of CSS provides the access channel to Cerebro. You can quickly access Cerebro without installing it. CSS is fully compatible with the open-source Cerebro and adapts to the latest 0.8.4 version.
+ - Elasticsearch visualized and real-time load monitoring
- Elasticsearch visualized data management
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001667704890.html b/docs/css/umn/en-us_topic_0000001667704890.html
new file mode 100644
index 00000000..f7c9f358
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001667704890.html
@@ -0,0 +1,43 @@
+
+
+Advantages
+CSS has the following features and advantages.
+ Efficient and Ease of UseYou can get insights from terabyte-scale data in milliseconds. In addition, you can use the visualized platform for data display and analysis.
+
+ Flexible and ScalableYou can request resources as needed and perform capacity expansion online with zero service interruption.
+
+ Easy O&MCSS is a fully-managed, out-of-the-box service. You can start using it with several clicks, instead of managing clusters.
+
+ Kernel Enhancement- Vector search
When you search for unstructured data, such as images, videos, and corpuses, the nearest neighbors or approximate nearest neighbors are searched based on feature vectors.
+ - Decoupled storage and compute
CSS provides an API for freezing indexes. Hot data stored on SSD can be dumped to OBS to reduce data storage costs and decouple compute from storage.
+ - Flow control
CSS can control traffic at the node level. You can configure the blacklist and whitelist, the maximum concurrent HTTPS connections, and the maximum HTTP connections for a node. Each function has an independent control switch.
+ - Large query isolation
CSS allows you to separately manage large queries. You can isolate query requests that consume a large amount of memory or take a long period of time.
+ - Index monitoring
CSS monitors various metrics of the running status and change trend of cluster indexes to measure service usage and handle potential risks in a timely manner, ensuring that clusters can run stably.
+ - Enhanced monitoring
CSS supports enhanced cluster monitoring. It can monitor the P99 latency of cluster search requests and the HTTP status codes of clusters.
+
+
+ High ReliabilityYou can choose to trigger snapshots manually or on a periodic basis for backup and restore snapshots to the current or other clusters. Snapshots of a cluster can be restored to another cluster to implement cluster data migration.
+ - Automatic backup using snapshots
CSS provides the backup function. You can enable the automatic backup function on the CSS management console and set the backup period based on the actual requirements.
+Automatic backup is to back up the index data of a cluster. Index backup is implemented by creating cluster snapshots. For backup of the first time, you are advised to back up all index data.
+CSS allows you to store the snapshot data of Elasticsearch instances to OBS, thereby achieving cross-region backup with the cross-region replication function of OBS.
+
+
+
+ High SecurityCSS ensures secure running of data and services from the following aspects:
+ - Network isolation
The network is divided into two planes, service plane and management plane. The two planes are deployed and isolated physically to ensure the security of the service and management networks.
+- Service plane: refers to the network plane of the cluster. It provides service channels for users and delivers data definition, index, and search capabilities.
- Management plane: refers to the management console. It is used to manage CSS.
- VPC security groups or isolated networks ensure the security of hosts.
+ - Access control
- Using the network access control list (ACL), you can permit or deny the network traffic entering and exiting the subnets.
- Internal security infrastructure (including the network firewall, intrusion detection system, and protection system) can monitor all network traffic that enters or exits the VPC through the IPsec VPN.
- User authentication and index-level authentication are supported. CSS also supports interconnection with third-party user management systems.
+ - Data security
- In CSS, the multi-replica mechanism is used to ensure user data security.
- Communication between the client and server can be encrypted using SSL.
+ - Operation audit
Cloud Trace Service (CTS) can be used to perform auditing on key logs and operations.
+
+
+ High AvailabilityTo prevent data loss and minimize the cluster downtime in case of service interruption, CSS supports cross-AZ cluster deployment. When creating a cluster, you can select two or three AZs in the same region. The system will automatically allocate nodes to these AZs. If an AZ is faulty, the remaining AZs can still run properly, significantly enhancing cluster availability and improving service stability.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001715624649.html b/docs/css/umn/en-us_topic_0000001715624649.html
new file mode 100644
index 00000000..8b987fb3
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001715624649.html
@@ -0,0 +1,29 @@
+
+
+Basic Concepts
+ClusterCSS provides functions on a per cluster basis. A cluster represents an independent search service that consists of multiple nodes.
+
+ IndexAn index stores Elasticsearch data. It is a logical space in which one or more shards are grouped.
+
+ ShardAn index can potentially store a large amount of data that can exceed the hardware limits of a single node. To solve this problem, Elasticsearch provides the ability to subdivide your index into multiple pieces called shards. When you create an index, you can simply define the number of shards that you want. Each shard is in itself a fully-functional and independent "index" that can be hosted on any node in the cluster.
+ You need to specify the number of shards before creating an index and cannot change the number after the index is successfully created.
+
+ ReplicaA replica is a copy of the actual storage index in a shard. It can be understood as a backup of the shard. Replicas help prevent single point of failures (SPOFs). You can increase or decrease the number of replicas based on your service requirements.
+
+ DocumentAn entity for Elasticsearch storage. Equivalent to the row in the RDB, the document is the basic unit that can be indexed.
+
+ Document TypeSimilar to a table in the RDB, type is used to distinguish between different data.
+ In versions earlier than Elasticsearch 7.x, each index can contain multiple document types. Elasticsearch defines a type for each document.
+ Elasticsearch 7.x and later versions only support documents of the .doc type.
+
+ MappingA mapping is used to restrict the type of a field and can be automatically created based on data. It is similar to the schema in the database.
+
+ FieldThe field is the minimum unit of a document. It is similar to the column in the database.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001715624661.html b/docs/css/umn/en-us_topic_0000001715624661.html
new file mode 100644
index 00000000..67df32e1
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001715624661.html
@@ -0,0 +1,62 @@
+
+
+Related Services
+Figure 1 shows the relationships between CSS and other services.
+ Figure 1 Relationships between CSS and other services
+
+ Table 1 Relationships between CSS and other servicesService
+ |
+Description
+ |
+
+
+Virtual Private Cloud (VPC)
+ |
+CSS clusters are created in the subnets of a VPC. VPCs provide a secure, isolated, and logical network environment for your clusters.
+ |
+
+Elastic Cloud Server (ECS)
+ |
+In a CSS cluster, each node represents an ECS. When you create a cluster, ECSs are automatically created.
+ |
+
+Elastic Volume Service (EVS)
+ |
+CSS uses EVS to store index data. When you create a cluster, EVSs are automatically created for cluster data storage.
+ |
+
+Object Storage Service (OBS)
+ |
+Snapshots of CSS clusters are stored in OBS buckets.
+ |
+
+Identity and Access Management (IAM)
+ |
+IAM authenticates access to CSS.
+ |
+
+Cloud Eye
+ |
+CSS uses Cloud Eye to monitor cluster metrics in real time. The supported CSS metrics include the disk usage and cluster health status. You can learn about the disk usage of the cluster based on the disk usage metric. You can learn about the health status of a cluster based on the cluster health status metric.
+ |
+
+Cloud Trace Service (CTS)
+ |
+With CTS, you can record operations associated with CSS for query, audit, and backtracking operations.
+ |
+
+Key Management Service (KMS)
+ |
+If disk encryption is enabled on CSS clusters, you need to obtain the key provided by KMS to encrypt and decrypt the disk data.
+ |
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001715624665.html b/docs/css/umn/en-us_topic_0000001715624665.html
new file mode 100644
index 00000000..95ba6223
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001715624665.html
@@ -0,0 +1,362 @@
+
+
+Permissions Management
+If you need to assign different permissions to employees in your organization to access your CSS resources, IAM is a good choice for fine-grained permissions management. IAM provides identity authentication, permissions management, and access control.
+ If the current account has met your requirements, you do not need to create an independent IAM user for permission management. Then you can skip this section. This will not affect other functions of CSS.
+ With IAM, you can use your account to create IAM users for your employees and assign permissions to the users to control their access to your resources. IAM is free of charge. You pay only for the resources you purchase.
+ Permissions ManagementNew IAM users do not have any permissions assigned by default. You need to first add them to one or more groups and attach policies or roles to these groups. The users then inherit permissions from the groups and can perform specified operations on cloud services based on the permissions they have been assigned.
+ CSS is a project-level service deployed in specific physical regions. Therefore, CSS permissions are assigned to projects in specific regions and only take effect in these regions. If you want the permissions to take effect in all regions, you need to assign the permissions to projects in each region. When accessing CSS, the users need to switch to a region where they have been authorized to use cloud services.
+ You can use roles and policies to grant users permissions.
+ - Roles are a type of coarse-grained authorization mechanism that defines permissions related to user responsibilities. There are only a limited number of service-level roles for granting permissions to users. When using roles to grant permissions, you need to also assign dependency roles. Roles are not ideal for fine-grained authorization and secure access control.
- Policies are a type of fine-grained authorization mechanism that defines the permissions for performing operations on specific cloud resources under certain conditions. This mechanism allows for more flexible authorization. Policies allow you to meet requirements for more secure access control. For example, CSS administrators can only grant CSS users the permissions needed for managing a particular type of CSS resources.
+ Table 1 lists all the system-defined roles and policies supported by CSS.
+ - Elasticsearch Administrator depends on the roles of other services to execute its permissions. Therefore, if you assign the Elasticsearch Administrator role to a user, assign its dependency roles at the same time.
- CSS FullAccess and CSS ReadOnlyAccess can be used to control the resources that users can access. For example, if you want your software developers to use CSS resources but not delete them or perform any high-risk operations, you can create IAM users for these software developers and assign them only the permissions required for using CSS resources.
+
+ Table 1 CSS system permissionRole/Policy Name
+ |
+Type
+ |
+Role/Policy Description
+ |
+Dependency
+ |
+
+
+Elasticsearch Administrator
+ |
+System-defined role
+ |
+Full permissions for CSS.
+This role depends on the Tenant Guest and Server Administrator roles in the same project.
+ |
+- Tenant Guest: A global role, which must be assigned in the global project.
- Server Administrator: A project-level role, which must be assigned in the same project
+ |
+
+CSS FullAccess
+ |
+System-defined policy
+ |
+Full CSS permissions granted through policies. Users with these permissions can perform all operations on CSS.
+Some functions depend on corresponding permissions. To use certain functions, you need to enable the dependent permissions in the same project.
+ |
+The VPCEndpoint Administrator system role is required for accessing a cluster through a VPC endpoint.
+Some operations depend on the following permissions:
+- Automatically create an agency:
iam:agencies:createAgency
+ - View the agency list:
iam:agencies:listAgencies
+iam:permissions:listRolesForAgencyOnDomain
+iam:permissions:listRolesForAgencyOnProject
+iam:permissions:listRolesForAgency
+ - Display enterprise projects and predefined tags on the console:
eps:enterpriseProjects:list
+tms:predefineTags:list
+ - Use the snapshot, word dictionary, and log management functions:
obs:bucket:Get*
+obs:bucket:List*
+obs:object:List*
+obs:object:Get*
+obs:bucket:HeadBucket
+obs:object:PutObject
+obs:object:DeleteObject
+
+ |
+
+CSS ReadOnlyAccess
+ |
+System-defined policy
+ |
+Read-only permissions for CSS. Users with these permissions can only view CSS data.
+Some functions depend on corresponding permissions. To use certain functions, you need to enable the dependent permissions in global services.
+ |
+Some operations depend on the following permissions:
+- View the agency list:
iam:agencies:listAgencies
+iam:permissions:listRolesForAgencyOnDomain
+iam:permissions:listRolesForAgencyOnProject
+iam:permissions:listRolesForAgency
+ - Display enterprise projects and predefined tags on the console:
eps:enterpriseProjects:list
+tms:predefineTags:list
+ - Use the snapshot, word dictionary, and log management functions:
obs:bucket:Get*
+obs:bucket:List*
+obs:object:List*
+obs:object:Get*
+obs:bucket:HeadBucket
+
+ |
+
+
+
+
+ Table 2 lists the common operations supported by each system permission of CSS. Please choose proper system permissions according to this table.
+
+ Table 2 Common operations supported by each system-defined policyOperation
+ |
+CSS FullAccess
+ |
+CSS ReadOnlyAccess
+ |
+Elasticsearch Administrator
+ |
+Remarks
+ |
+
+
+Creating a cluster
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+Querying a cluster list
+ |
+√
+ |
+√
+ |
+√
+ |
+-
+ |
+
+Querying cluster details
+ |
+√
+ |
+√
+ |
+√
+ |
+-
+ |
+
+Deleting a cluster
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+Restarting a cluster
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+Expanding cluster capacity
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+Adding instances and expanding instance storage capacity
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+Querying tags of a specified cluster
+ |
+√
+ |
+√
+ |
+√
+ |
+-
+ |
+
+Querying all tags
+ |
+√
+ |
+√
+ |
+√
+ |
+-
+ |
+
+Loading a custom word dictionary
+ |
+√
+ |
+x
+ |
+√
+ |
+Depends on OBS and IAM permissions
+ |
+
+Querying the status of a custom word dictionary
+ |
+√
+ |
+√
+ |
+√
+ |
+-
+ |
+
+Deleting a custom word dictionary
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+Automatically setting basic configurations of a cluster snapshot
+ |
+√
+ |
+x
+ |
+√
+ |
+Depends on OBS and IAM permissions
+ |
+
+Modifying basic configurations of a cluster snapshot
+ |
+√
+ |
+x
+ |
+√
+ |
+Depends on OBS and IAM permissions
+ |
+
+Setting the automatic snapshot creation policy
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+Querying the automatic snapshot creation policy
+ |
+√
+ |
+√
+ |
+√
+ |
+-
+ |
+
+Manually creating a snapshot
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+Querying the snapshot list
+ |
+√
+ |
+√
+ |
+√
+ |
+-
+ |
+
+Restoring a snapshot
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+Deleting a snapshot
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+Disabling the snapshot function
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+Modifying specifications
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+Scaling in clusters
+ |
+√
+ |
+x
+ |
+√
+ |
+-
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001715624677.html b/docs/css/umn/en-us_topic_0000001715624677.html
new file mode 100644
index 00000000..f71fb38c
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001715624677.html
@@ -0,0 +1,36 @@
+
+
+Constraints
+Restrictions on Clusters and NodesThe following table describes restrictions on clusters and nodes in CSS.
+
+ Table 1 Restrictions on Elasticsearch clusters and nodesCluster and Node
+ |
+Restriction
+ |
+
+
+Maximum number of nodes in a cluster
+ |
+32
+ |
+
+Minimum number of nodes in a cluster
+ |
+1
+ |
+
+
+
+
+
+ Restrictions on Browsers- You are advised to use the following browsers to access the CSS management console:
- Google Chrome 36.0 or later
- Mozilla Firefox 35.0 or later
+ - You are advised to use the following browsers to access Kibana integrated in CSS:
- Google Chrome 36.0 or later
- Mozilla Firefox 35.0 or later
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001715704493.html b/docs/css/umn/en-us_topic_0000001715704493.html
new file mode 100644
index 00000000..0f783c1b
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001715704493.html
@@ -0,0 +1,26 @@
+
+
+Scenarios
+CSS can be used to build search boxes for websites and apps to improve user experience. You can also build a log analysis platform with it, facilitating data-driven O&M and business operations. CSS vector search can help you quickly build smart applications, such as AI-based image search, recommendation, and semantic search.
+ Site SearchCSS can be used to search for website content by keyword as well as search for and recommend commodities on e-commerce sites.
+ - Real-time search: When site content is updated, you can find the updated content in your search within minutes, or even just seconds.
- Categorized statistics: You can apply search filters to sort products by category.
- Custom highlight style: You can define how the search results are highlighted.
+ Figure 1 Site search
+
+ All-Scenario Log AnalysisAnalyze the logs of Elastic Load Balance (ELB), servers, containers, and applications. In CSS, the Kafka message buffer queue is used to balance loads in peak and off-peak hours. Logstash is used for data extract, transform and load (ETL). Elasticsearch retrieves and analyzes data. The analysis results are visualized by Kibana and presented to you.
+ - High cost-effectiveness: CSS separates cold and hot storage, and decouples computing and storage resources, achieving high performance and reducing costs by over 30%.
- Ease of use: Perform queries in a GUI editor. Easily create reports using drag-and-drop components.
- Powerful processing capability: CSS can import hundreds of terabytes of data per day, and can process petabytes of data.
+ Figure 2 All-scenario log analysis
+
+ Database Query AccelerationCSS can be used to accelerate database queries. E-commerce and logistics companies have to respond to a huge number of concurrent order queries within a short period of time. Relational databases, although having good transaction atomicity, are weak in transaction processing, and can rely on CSS to enhance OLTP and OLAP capabilities.
+ - High performance: Retrieve data from hundreds of millions of records within milliseconds. Text, time, numeric, and spatial data types are supported.
- High scalability: CSS can be scaled to have over 200 data nodes and over 1000 columns.
- Zero service interruption: The rolling restart and dual-copy mechanisms can avoid service interruption in case of specifications change or configuration update.
+
+ Vector SearchWhen you search for unstructured data, such as images, videos, and corpuses, the nearest neighbors or approximate nearest neighbors are searched based on feature vectors. This has the following advantages:
+ - Efficient and reliable: The vector search engine provides optimal search performance and distributed DR capabilities.
- Abundant indexes: Multiple indexing algorithms and similarity measurement methods are available and can meet diverse needs.
- Easy learning: CSS is fully compatible with the open-source Elasticsearch ecosystem.
+ Figure 3 Vector search
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001720964408.html b/docs/css/umn/en-us_topic_0000001720964408.html
new file mode 100644
index 00000000..658b26b0
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001720964408.html
@@ -0,0 +1,25 @@
+
+
+Querying Real-Time Traces
+ScenariosAfter you enable CTS and the management tracker is created, CTS starts recording operations on cloud resources. CTS stores operation records generated in the last seven days.
+ This section describes how to query and export operation records of the last seven days on the CTS console.
+
+
+ Viewing Real-Time Traces in the Trace List- Log in to the management console.
- Click
in the upper left corner and choose Management & Deployment > Cloud Trace Service. The CTS console is displayed. - Choose Trace List in the navigation pane on the left.
- Set filters to search for your desired traces, as shown in Figure 1. The following filters are available:
Figure 1 Filters
+- Trace Type, Trace Source, Resource Type, and Search By: Select a filter from the drop-down list.
- If you select Resource ID for Search By, specify a resource ID.
- If you select Trace name for Search By, specify a trace name.
- If you select Resource name for Search By, specify a resource name.
+ - Operator: Select a user.
- Trace Status: Select All trace statuses, Normal, Warning, or Incident.
- Time range: You can query traces generated during any time range in the last seven days.
- Click Export to export all traces in the query result as a CSV file. The file can contain up to 5000 records.
+
+ - Click Query.
- On the Trace List page, you can also export and refresh the trace list.
- Click Export to export all traces in the query result as a CSV file. The file can contain up to 5000 records.
- Click
to view the latest information about traces.
+ - Click
on the left of a trace to expand its details.
+
+
+ - Click View Trace in the Operation column. The trace details are displayed.

+ - For details about key fields in the trace structure, see section "Trace References" > "Trace Structure" and section "Trace References" > "Example Traces" in the CTS User Guide.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001814230837.html b/docs/css/umn/en-us_topic_0000001814230837.html
new file mode 100644
index 00000000..9becb43a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001814230837.html
@@ -0,0 +1,139 @@
+
+
+Using PV_GRAPH to Search for Vector Indexes
+PV_GRAPH deeply optimizes the HNSW algorithm and supports the vector and scalar joint filtering. When the vector and scalar joint filtering is used, the result filling rate and query performance can be greatly improved compared with post-filtering and Boolean query.
+
+ Creating an Index- Log in to the CSS management console.
- Choose Clusters in the navigation pane. On the Clusters page, locate the target cluster and click Access Kibana in the Operation column.
- Click Dev Tools in the navigation tree on the left and run the following command to create a vector index.
Create an index named my_index that contains a vector field my_vector and two sub-fields country and category.
+PUT my_index
+{
+ "settings": {
+ "index": {
+ "vector": true
+ }
+ },
+ "mappings": {
+ "properties": {
+ "my_vector": {
+ "type": "vector",
+ "dimension": 2,
+ "indexing": true,
+ "algorithm": "PV_GRAPH",
+ "metric": "euclidean",
+ "sub_fields": ["country", "category"]
+ }
+ }
+ }
+}
+For details about the parameters for creating an index, see Table 1.
+ The metric parameter of the PV_GRAPH index algorithm can only be set to euclidean or inner_product.
+
+
+
+ Importing the Vector and Scalar DataWhen algorithm is set to PV_GRPAH and sub_fields is specified, the following data writing grammars are supported. The sub_fields parameter supports only the keyword type and you can specify multiple values for it.
+ # Write a single data record.
+POST my_index/_doc
+{
+ "my_vector": {
+ "data": [1.0, 1.0],
+ "country": "cn",
+ "category": ["1", "2"]
+ }
+}
+
+# Write multiple data records in batches.
+POST my_index/_bulk
+{"index": {}}
+{"my_vector": {"data": [1.0, 2.0], "country": "cn", "category": "1"}}
+{"index": {}}
+{"my_vector": {"data": [2.0, 2.0], "country": "cn", "category": ["1", "2"]}}
+{"index": {}}
+{"my_vector": {"data": [2.0, 3.0], "country": "eu", "category": "2"}}
+
+ Querying a VectorBased on the existing Elasticsearch APIs, the filter parameter is added to vector to support vector and scalar joint filtering. The values of sub_fields can be used for scalar filtering. Currently, the JSON format is supported. The should, must, must_not, term, and terms queries are supported. The syntax is the same as that of Elasticsearch query. The restrictions are as follows:
+ Currently, up to four layers are supported for filtering nesting.
+ - must_not cannot be nested or contain nest layers.
- The first layer can contain only one query keyword (such as must).
+ The fields defined in sub_fields during index creation are the scalar fields used in the joint filtering and take effect only when the algorithm is set to PV_GRAPH. If the specified filtering field does not exist, the filtering request becomes invalid and the query is processed with no filtering conditions.
+ # Example of single-label and single-value matching query
+GET my_index/_search
+{
+ "query": {
+ "vector": {
+ "my_vector": {
+ "vector": [1.0, 1.0],
+ "topk": 10,
+ "filter": {
+ "term": { "country": "cn" }
+ }
+ }
+ }
+ }
+}
+
+# Example of single-label and multi-value matching query
+GET my_index/_search
+{
+ "query": {
+ "vector": {
+ "my_vector": {
+ "vector": [1.0, 1.0],
+ "topk": 10,
+ "filter": {
+ "terms": { "country": ["cn", "eu"] }
+ }
+ }
+ }
+ }
+}
+
+# Example of multi-label matching query
+GET my_index/_search
+{
+ "query": {
+ "vector": {
+ "my_vector": {
+ "vector": [1.0, 1.0],
+ "topk": 10,
+ "filter": {
+ "must": [
+ {
+ "term": {"country": "cn"}
+ },
+ {
+ "terms": {"category": ["1", "2"]}
+ }
+ ]
+ }
+ }
+ }
+ }
+}
+
+# Example of must_not matching query
+GET my_index/_search
+{
+ "query": {
+ "vector": {
+ "my_vector": {
+ "vector": [1.0, 1.0],
+ "topk": 10,
+ "filter": {
+ "must_not": [
+ {
+ "term": {"country": "eu"}
+ }
+ ]
+ }
+ }
+ }
+ }
+}
+ For details about vector query parameters, see Table 1.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001815107861.html b/docs/css/umn/en-us_topic_0000001815107861.html
new file mode 100644
index 00000000..b8a00f19
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001815107861.html
@@ -0,0 +1,198 @@
+
+
+Configuring SMN Alarms
+ScenariosBy default, CSS has installed the open-source alert plugin opensearch-alerting for OpenSearch clusters to send notifications when data meets specific conditions. This plugin consists of three components: Alerts, Monitors, and Destinations. CSS integrates the SMN service in the Destinations component and can send alarm messages only through the SMN service as the destination.
+ This section describes how to configure the SMN alarm function for OpenSearch clusters on OpenSearch Dashboards.
+
+
+ Constraints and LimitationsBy default, the open-source alert plug-in opensearch-alerting is installed for OpenSearch clusters of version 1.3.6.
+
+
+ Procedure- Log in to the CSS management console.
- Choose Clusters > OpenSearch, select the target cluster and click Access Kibana in the Operation column.
- On the OpenSearch Dashboards page, choose OpenSearch Plugins > Alerting in the navigation tree on the left.
- Create an SMN destination to send alert messages.
- On the Alerting page, click the Destinations tab and click Add destination to configure destination information.
+
Table 1 Destinations parametersParameter
+ |
+Description
+ |
+
+
+Name
+ |
+User-defined destination name
+ |
+
+Type
+ |
+Retain the default value SMN.
+ |
+
+Topic
+ |
+Select the SMN topic you have created for sending alarm messages.
+ |
+
+
+
+
+Figure 1 Add destination
+ - Click Create to return to the destination list. The created SMN destination is displayed in the list.
Figure 2 Destination list
+
+ - Create a monitoring task and configure the alarm triggering condition and monitoring frequency.
- Click the Monitors tab on the Alerting page and click Create monitor to configure monitoring information.
+
Table 2 Monitor parametersParameter
+ |
+Description
+ |
+
+
+Monitor details
+ |
+
+Monitor name
+ |
+User-defined monitor name
+ |
+
+Monitor type
+ |
+Monitor type. The value can be Per query monitor (common monitoring), Per bucket monitor (aggregation bucket monitoring), and Per cluster metrics monitor (cluster metric monitoring).
+ |
+
+Monitor defining method
+ |
+Monitor defining method. Extraction query editor is recommended.
+- Visual editor
- Extraction query editor
- Anomaly detector
+The options of Monitor defining method are determined by the Monitor type you selected.
+ |
+
+Detector
+ |
+If Monitor defining method is set to Anomaly detector, select an exception detection task.
+ |
+
+Frequency
+ |
+Select the monitoring frequency and set the monitoring interval. The options include:
+- By interval
- Daily
- Weekly
- Monthly
- Custom cron expression
+ |
+
+Data source
+ |
+
+Index
+ |
+When Monitor defining method is set to Visual editor or Extraction query editor, you need to specify the index to be monitored.
+ |
+
+Time field
+ |
+When Monitor defining method is set to Visual editor, you need to specify the time field to define counting parameters such as count.
+ |
+
+Query
+ |
+
+Metrics
+ |
+When Monitor defining method is set to Visual editor, you need to set the metrics range for extracting statistics.
+ |
+
+Time range for the last
+ |
+When Monitor defining method is set to Visual editor, you need to set the monitoring time range for plug-ins.
+ |
+
+Data filter
+ |
+When Monitor defining method is set to Visual editor, you need to set filters for data search.
+ |
+
+Group by
+ |
+When Monitor defining method is set to Visual editor, you need to specify a field so that each value of the field triggers an alarm.
+ |
+
+Define extraction query
+ |
+When Monitor defining method is set to Extraction query editor, you need to enter the query statement to define the monitoring.
+ |
+
+Request type
+ |
+When Monitor type is set to Per cluster metrics monitor, you need to specify the request type to monitor cluster metrics, such as the running status and CPU usage.
+ |
+
+
+
+
+ - Click Add trigger to add triggers and specify the alarm triggering conditions and actions to be triggered when an alarm is reported.
- On the Triggers page, set the alarm triggering sensitivity and message release on the destination end.
+
Table 3 Trigger parametersParameter
+ |
+Description
+ |
+
+
+Trigger name
+ |
+User-defined trigger name
+ |
+
+Severity level
+ |
+Sensitivity of a trigger, that is, the number of alarms that are triggered before an alarm message is sent. 1 indicates the highest sensitivity.
+ |
+
+Trigger condition
+ |
+Trigger condition. An alarm is triggered when the trigger condition is hit.
+ |
+
+Action name
+ |
+Trigger action name
+ |
+
+Destination
+ |
+Select the SMN destination created in section 4.
+ |
+
+Message
+ |
+Alarm message body By default, the subject and body are defined when the destination is an email.
+ |
+
+Perform action
+ |
+When Monitor type is set to Per bucket monitor, you need to set whether to send alarms in combination. The value can be:
+- Per execution: A combination alarm is sent when multiple alarm triggering conditions are hit.
- Per alert: Alarms are sent separately when multiple alarm triggering conditions are hit.
+ |
+
+Actionable alerts
+ |
+When Monitor type is set to Per bucket monitor, set this parameter to Per alert. You need to set the alarms that can be executed after alarm triggering conditions are hit.
+- De-duplicated: Alarms that have been triggered. OpenSearch retains the existing alarms to prevent the plugin from creating duplicate alarms.
- New: Newly created alarms.
- Completed: Alarms that are no longer ongoing.
+ |
+
+Throttling
+ |
+Message sending frequency. It limits the number of notification messages can be received in a specified period.
+For example, if this parameter is set to 10 minutes, SMN sends only one alarm notification in the next 10 minutes even if the trigger condition is hit for multiple times. After 10 minutes, SMN sends another alarm notification if the alarm condition is met.
+ |
+
+
+
+
+Figure 3 Setting the destination of a trigger action
+ - Click Send test message. If a subscriber receives an email, as shown in Figure 5, the trigger is configured successfully.
Figure 4 Sending a test message
+Figure 5 Email notification
+ - Click Create to return to the monitor details page. The detector is successfully created.
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001832788405.html b/docs/css/umn/en-us_topic_0000001832788405.html
new file mode 100644
index 00000000..301f1c9a
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001832788405.html
@@ -0,0 +1,39 @@
+
+
+Recording Access Logs in Files
+The traffic control function can record cluster access logs and write the logs to background log files. You can back up the logs to OBS for viewing. You can run the following command to enable the function of recording access logs to files: PUT /_cluster/settings
+{
+ "persistent": {
+ "flowcontrol.log.file.enabled": true
+ }
+}
+
+ Table 1 ParametersParameter
+ |
+Type
+ |
+Description
+ |
+
+
+flowcontrol.log.file.enabled
+ |
+Boolean
+ |
+Indicates whether to record the log details of each request to the background log file. The value can be:
+- true
- false (default value)
+ |
+
+
+
+
+
+ - After the function of recording access logs to files is enabled, access from a client to a cluster node is recorded in the {Cluster name_access_log.log} file. You can use the log backup function to view detailed access logs.
- After the fault is located, you are advised to disable this function.
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001866261281.html b/docs/css/umn/en-us_topic_0000001866261281.html
new file mode 100644
index 00000000..cfdb6260
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001866261281.html
@@ -0,0 +1,21 @@
+
+
+
+ Using the Open Distro Alarm Plug-in to Configure SMN Alarms
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001876048066.html b/docs/css/umn/en-us_topic_0000001876048066.html
new file mode 100644
index 00000000..b9d00775
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001876048066.html
@@ -0,0 +1,29 @@
+
+
+
+ FAQs
+
+
+
+
+
diff --git a/docs/css/umn/en-us_topic_0000001921967557.html b/docs/css/umn/en-us_topic_0000001921967557.html
new file mode 100644
index 00000000..3279635f
--- /dev/null
+++ b/docs/css/umn/en-us_topic_0000001921967557.html
@@ -0,0 +1,31 @@
+
+
+
+ Product Overview
+
+
+
+
+
|
|
|
|
|
|