diff --git a/docs/dms/umn/.placeholder b/docs/dms/umn/.placeholder
deleted file mode 100644
index e69de29b..00000000
diff --git a/docs/dms/umn/ALL_META.TXT.json b/docs/dms/umn/ALL_META.TXT.json
index a3f5366b..116796d6 100644
--- a/docs/dms/umn/ALL_META.TXT.json
+++ b/docs/dms/umn/ALL_META.TXT.json
@@ -1,1281 +1,3010 @@
[
+ {
+ "dockw":"User Guide"
+ },
{
"uri":"kafka-ug-0723001.html",
- "product_code":"dms",
+ "node_id":"kafka-ug-0723001.xml",
+ "product_code":"kafka",
"code":"1",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Service Overview",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Service Overview",
"githuburl":""
},
{
"uri":"kafka-pd-190605001.html",
- "product_code":"dms",
+ "node_id":"kafka-pd-190605001.xml",
+ "product_code":"kafka",
"code":"2",
"des":"Apache Kafka is distributed message middleware that features high throughput, data persistence, horizontal scalability, and stream data processing. It adopts the publish-",
"doc_type":"usermanual",
"kw":"What Is DMS?,Service Overview,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"What Is DMS?",
"githuburl":""
},
{
"uri":"kafka-advantage.html",
- "product_code":"dms",
+ "node_id":"kafka-advantage.xml",
+ "product_code":"kafka",
"code":"3",
"des":"DMS provides easy-to-use message queuing based on Apache Kafka. Services can be quickly migrated to the cloud without any change, reducing maintenance and usage costs.Rap",
"doc_type":"usermanual",
"kw":"Product Advantages,Service Overview,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Product Advantages",
"githuburl":""
},
{
"uri":"kafka-scenarios.html",
- "product_code":"dms",
+ "node_id":"kafka-scenarios.xml",
+ "product_code":"kafka",
"code":"4",
"des":"Kafka is popular message-oriented middleware that features highly reliable, asynchronous message delivery. It is widely used for transmitting data between different syste",
"doc_type":"usermanual",
"kw":"Application Scenarios,Service Overview,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Application Scenarios",
"githuburl":""
},
{
"uri":"Kafka-specification.html",
- "product_code":"dms",
+ "node_id":"kafka-specification.xml",
+ "product_code":"kafka",
"code":"5",
- "des":"Kafka instances are compatible with open-source Kafka 1.1.0, 2.3.0, and 2.7. The instance specifications are classified based on bandwidth, namely, 100 MB/s, 300 MB/s, 60",
+ "des":"Kafka instances are compatible with open-source Kafka v1.1.0, v2.3.0, v2.7, and v3.x. Kafka instances are classified into cluster and single-node types. A cluster instanc",
"doc_type":"usermanual",
"kw":"Specifications,Service Overview,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Specifications",
"githuburl":""
},
{
- "uri":"kafka-pd-200720001.html",
- "product_code":"dms",
+ "uri":"kafka-pd-0052.html",
+ "node_id":"kafka-pd-0052.xml",
+ "product_code":"kafka",
"code":"6",
+ "des":"A single-node Kafka instance has only one broker. These instances do not guarantee performance or reliability and are for trial use or testing only. In the production env",
+ "doc_type":"usermanual",
+ "kw":"Comparing Single-node and Cluster Kafka Instances,Service Overview,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Comparing Single-node and Cluster Kafka Instances",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-pd-200720001.html",
+ "node_id":"kafka-pd-200720001.xml",
+ "product_code":"kafka",
+ "code":"7",
"des":"DMS is compatible with open-source Kafka and has customized and enhanced Kafka features. In addition to the advantages of open-source Kafka, DMS for Kafka provides more r",
"doc_type":"usermanual",
"kw":"Comparing DMS for Kafka and Open-Source Kafka,Service Overview,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Comparing DMS for Kafka and Open-Source Kafka",
"githuburl":""
},
{
"uri":"kafka-pd-190605003.html",
- "product_code":"dms",
- "code":"7",
+ "node_id":"kafka-pd-190605003.xml",
+ "product_code":"kafka",
+ "code":"8",
"des":"This section describes the notes and constraints on DMS.",
"doc_type":"usermanual",
"kw":"Notes and Constraints,Service Overview,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Notes and Constraints",
"githuburl":""
},
{
"uri":"kafka-pd-190605002.html",
- "product_code":"dms",
- "code":"8",
+ "node_id":"kafka-pd-190605002.xml",
+ "product_code":"kafka",
+ "code":"9",
"des":"Cloud Trace Service (CTS)CTS generates traces to provide you with a history of operations performed on cloud service resources. The traces include operation requests sent",
"doc_type":"usermanual",
"kw":"Related Services,Service Overview,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Related Services",
"githuburl":""
},
{
"uri":"glossary-kafka.html",
- "product_code":"dms",
- "code":"9",
+ "node_id":"glossary-kafka.xml",
+ "product_code":"kafka",
+ "code":"10",
"des":"DMS for Kafka of the cloud service platform uses Kafka as the message engine. This chapter presents explanations of basic concepts of Kafka.A topic is a category for mess",
"doc_type":"usermanual",
"kw":"Basic Concepts,Service Overview,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Basic Concepts",
"githuburl":""
},
{
"uri":"dms-ug-001.html",
- "product_code":"dms",
- "code":"10",
+ "node_id":"dms-ug-001.xml",
+ "product_code":"kafka",
+ "code":"11",
"des":"This section provides recommendations on configuring common parameters for Kafka producers and consumers.",
"doc_type":"usermanual",
"kw":"Best Practices,Service Overview,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Best Practices",
"githuburl":""
},
{
- "uri":"dms-ug-190128001.html",
- "product_code":"dms",
- "code":"11",
- "des":"By default, there are two types of user permissions: user management and resource management.User management refers to the management of users, user groups, and user grou",
- "doc_type":"usermanual",
- "kw":"Permissions,Service Overview,User Guide",
- "title":"Permissions",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-180604012.html",
- "product_code":"dms",
+ "uri":"ProductDescPrivilegeManagement.html",
+ "node_id":"productdescprivilegemanagement.xml",
+ "product_code":"kafka",
"code":"12",
- "des":"Before creating a Kafka instance, ensure the availability of resources, including a virtual private cloud (VPC), subnet, security group, and security group rules. Each Ka",
+ "des":"You can use Identity and Access Management (IAM) to manage DMS for Kafka permissions and control access to your resources. IAM provides identity authentication, permissio",
"doc_type":"usermanual",
- "kw":"Preparing Required Resources,User Guide",
- "title":"Preparing Required Resources",
+ "kw":"Permission,Service Overview,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Permission",
"githuburl":""
},
{
- "uri":"dms-ug-180604013.html",
- "product_code":"dms",
+ "uri":"kafka-ug-00003.html",
+ "node_id":"kafka-ug-00003.xml",
+ "product_code":"kafka",
"code":"13",
- "des":"Kafka instances are physically isolated and exclusively occupied by each tenant. You can customize the computing capabilities and storage space of an instance based on se",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
- "kw":"Creating an Instance,User Guide",
- "title":"Creating an Instance",
+ "kw":"Getting Started",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Getting Started",
"githuburl":""
},
{
- "uri":"kafka-ug190605003.html",
- "product_code":"dms",
+ "uri":"kafka-qs-0409001.html",
+ "node_id":"kafka-qs-0409001.xml",
+ "product_code":"kafka",
"code":"14",
- "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "des":"This section takes the example of creating a Kafka 2.7 instance (ciphertext access and SASL_SSL) and accessing it on the client (private network, within a virtual private",
"doc_type":"usermanual",
- "kw":"Accessing a Kafka Instance",
- "title":"Accessing a Kafka Instance",
+ "kw":"Getting Started with Kafka for Message Production and Consumption,Getting Started,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Getting Started with Kafka for Message Production and Consumption",
"githuburl":""
},
{
- "uri":"kafka-ug-180604020.html",
- "product_code":"dms",
+ "uri":"kafka-ug-0069.html",
+ "node_id":"kafka-ug-0069.xml",
+ "product_code":"kafka",
"code":"15",
- "des":"This section describes how to use an open-source Kafka client to access a Kafka instance if SASL access is not enabled for the instance. There are two scenarios. For cros",
+ "des":"The following figure shows the process of using a Kafka instance to produce and consume messages.Creating a User and Granting DMS for Kafka PermissionsCreate IAM users an",
"doc_type":"usermanual",
- "kw":"Accessing a Kafka Instance Without SASL,Accessing a Kafka Instance,User Guide",
- "title":"Accessing a Kafka Instance Without SASL",
+ "kw":"Process of Using Kafka,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Process of Using Kafka",
"githuburl":""
},
{
- "uri":"kafka-ug-180801001.html",
- "product_code":"dms",
+ "uri":"UserPrivilegeManagement.html",
+ "node_id":"userprivilegemanagement.xml",
+ "product_code":"kafka",
"code":"16",
- "des":"If you enable SASL_SSL when creating an instance, data will be encrypted before transmission for enhanced security.For security purposes, TLS_ECDHE_RSA_WITH_AES_128_GCM_S",
- "doc_type":"usermanual",
- "kw":"Accessing a Kafka Instance with SASL,Accessing a Kafka Instance,User Guide",
- "title":"Accessing a Kafka Instance with SASL",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-0001.html",
- "product_code":"dms",
- "code":"17",
- "des":"VPCs are logically isolated from each other. If a Kafka instance and a Kafka client are in different VPCs within a region, they cannot communicate with each other. In thi",
- "doc_type":"usermanual",
- "kw":"Cross-VPC Access to a Kafka Instance,Accessing a Kafka Instance,User Guide",
- "title":"Cross-VPC Access to a Kafka Instance",
- "githuburl":""
- },
- {
- "uri":"kafka-dnat.html",
- "product_code":"dms",
- "code":"18",
- "des":"You can use destination NAT (DNAT) to access a Kafka instance so that the instance can provide services on the public network through port mapping.You have created EIPs. ",
- "doc_type":"usermanual",
- "kw":"Using DNAT to Access a Kafka Instance,Accessing a Kafka Instance,User Guide",
- "title":"Using DNAT to Access a Kafka Instance",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-180604011.html",
- "product_code":"dms",
- "code":"19",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
- "kw":"Managing Instances",
- "title":"Managing Instances",
+ "kw":"Permission Management",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Permission Management",
"githuburl":""
},
{
- "uri":"kafka-ug-180604014.html",
- "product_code":"dms",
- "code":"20",
- "des":"View detailed information about a Kafka instance on the DMS console, for example, the IP addresses and port numbers for accessing the instance.Select the region where you",
+ "uri":"CreateUserAndGrantPolicy.html",
+ "node_id":"createuserandgrantpolicy.xml",
+ "product_code":"kafka",
+ "code":"17",
+ "des":"This section describes how to use Identity and Access Management (IAM) for fine-grained permissions control for your Distributed Message Service (DMS) for Kafka resources",
"doc_type":"usermanual",
- "kw":"Viewing an Instance,Managing Instances,User Guide",
- "title":"Viewing an Instance",
+ "kw":"Creating a User and Granting DMS for Kafka Permissions,Permission Management,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Creating a User and Granting DMS for Kafka Permissions",
"githuburl":""
},
{
- "uri":"kafka-ug-180604015.html",
- "product_code":"dms",
- "code":"21",
- "des":"Restart one or more Kafka instances at a time on the DMS console.When a Kafka instance is being restarted, message retrieval and creation requests of clients will be reje",
+ "uri":"kafka-ug-180604013.html",
+ "node_id":"kafka-ug-180604013.xml",
+ "product_code":"kafka",
+ "code":"18",
+ "des":"Kafka instances are tenant-exclusive, and physically isolated in deployment. You can customize the computing capabilities and storage space of a Kafka instance as require",
"doc_type":"usermanual",
- "kw":"Restarting an Instance,Managing Instances,User Guide",
- "title":"Restarting an Instance",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-180604016.html",
- "product_code":"dms",
- "code":"22",
- "des":"On the DMS console, you can delete one or more Kafka instances that have been created or failed to be created.Deleting a Kafka instance will delete the data in the instan",
- "doc_type":"usermanual",
- "kw":"Deleting an Instance,Managing Instances,User Guide",
- "title":"Deleting an Instance",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-180604017.html",
- "product_code":"dms",
- "code":"23",
- "des":"After creating a Kafka instance, you can modify some parameters of the instance based on service requirements, including the instance name, description, security group, a",
- "doc_type":"usermanual",
- "kw":"Modifying the Information About an Instance,Managing Instances,User Guide",
- "title":"Modifying the Information About an Instance",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-0319001.html",
- "product_code":"dms",
- "code":"24",
- "des":"To access a Kafka instance over a public network, enable public access and configure EIPs for the instance.If you no longer need public access to the instance, you can di",
- "doc_type":"usermanual",
- "kw":"Configuring Public Access,Managing Instances,User Guide",
- "title":"Configuring Public Access",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-180718001.html",
- "product_code":"dms",
- "code":"25",
- "des":"You can reset the SASL_SSL password for accessing a Kafka instance by resetting Kafka password if you forget it.You can reset the Kafka password only if Kafka SASL_SSL ha",
- "doc_type":"usermanual",
- "kw":"Resetting Kafka Password,Managing Instances,User Guide",
- "title":"Resetting Kafka Password",
- "githuburl":""
- },
- {
- "uri":"TagManagement.html",
- "product_code":"dms",
- "code":"26",
- "des":"Tags facilitate Kafka instance identification and management.You can add tags to a Kafka instance when creating the instance or add tags on the Tags tab page of the creat",
- "doc_type":"usermanual",
- "kw":"Managing Instance Tags,Managing Instances,User Guide",
- "title":"Managing Instance Tags",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-200119002.html",
- "product_code":"dms",
- "code":"27",
- "des":"After you initiate certain instance operations such as configuring public access and modifying the capacity threshold policy, a background task will start for each operat",
- "doc_type":"usermanual",
- "kw":"Viewing Background Tasks,Managing Instances,User Guide",
- "title":"Viewing Background Tasks",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-0004.html",
- "product_code":"dms",
- "code":"28",
- "des":"On the Kafka console, you can view the disk usage of each broker.Select the region where your Kafka instance is located.You can query topics that use the most disk space ",
- "doc_type":"usermanual",
- "kw":"Viewing Disk Usage,Managing Instances,User Guide",
- "title":"Viewing Disk Usage",
+ "kw":"Creating a Kafka Instance,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Creating a Kafka Instance",
"githuburl":""
},
{
"uri":"kafka-ug-0720001.html",
- "product_code":"dms",
- "code":"29",
+ "node_id":"kafka-ug-0720001.xml",
+ "product_code":"kafka",
+ "code":"19",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Configuring Topics",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Configuring Topics",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-180604018.html",
+ "node_id":"kafka-ug-180604018.xml",
+ "product_code":"kafka",
+ "code":"20",
+ "des":"Topics store messages created by producers and subscribed by consumers. If automatic topic creation is not enabled during Kafka instance creation, you need to manually cr",
+ "doc_type":"usermanual",
+ "kw":"Creating a Kafka Topic,Configuring Topics,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Creating a Kafka Topic",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0002.html",
+ "node_id":"kafka-ug-0002.xml",
+ "product_code":"kafka",
+ "code":"21",
+ "des":"DMS supports access control list (ACL) for topics. You can differentiate user permissions by granting users different permissions in a topic.This section describes how to",
+ "doc_type":"usermanual",
+ "kw":"Configuring Kafka Topic Permissions,Configuring Topics,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Configuring Kafka Topic Permissions",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0058.html",
+ "node_id":"kafka-ug-0058.xml",
+ "product_code":"kafka",
+ "code":"22",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Managing Topics",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Managing Topics",
"githuburl":""
},
{
- "uri":"dms-ug-180604018.html",
- "product_code":"dms",
- "code":"30",
- "des":"A topic is a stream of messages. If automatic topic creation is not enabled during Kafka instance creation, you need to manually create topics for creating and retrieving",
+ "uri":"kafka_ug_0045.html",
+ "node_id":"kafka_ug_0045.xml",
+ "product_code":"kafka",
+ "code":"23",
+ "des":"On the console, you can view the details of a Kafka instance including subscriptions to a topic, offsets and number of messages in each partition, and producer addresses.",
"doc_type":"usermanual",
- "kw":"Creating a Topic,Managing Topics,User Guide",
- "title":"Creating a Topic",
+ "kw":"Viewing Kafka Topic Details,Managing Topics,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Viewing Kafka Topic Details",
"githuburl":""
},
{
- "uri":"kafka-ug-180604019.html",
- "product_code":"dms",
- "code":"31",
- "des":"Delete a topic using either of the following methods:By using the consoleBy using Kafka CLIA Kafka instance has been created, and a topic has been created in this instanc",
+ "uri":"kafka-ug-0038.html",
+ "node_id":"kafka-ug-0038.xml",
+ "product_code":"kafka",
+ "code":"24",
+ "des":"This section describes how to modify following configurations of a Kafka topic on the console.Modifying Synchronous Replication, Synchronous Flushing, Message Timestamp, ",
"doc_type":"usermanual",
- "kw":"Deleting a Topic,Managing Topics,User Guide",
- "title":"Deleting a Topic",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-200506001.html",
- "product_code":"dms",
- "code":"32",
- "des":"Aging time is a period that messages in the topic are retained for. Consumers must retrieve messages before this period ends. Otherwise, the messages will be deleted and ",
- "doc_type":"usermanual",
- "kw":"Modifying Topic Aging Time,Managing Topics,User Guide",
- "title":"Modifying Topic Aging Time",
+ "kw":"Modifying Kafka Topic Configurations,Managing Topics,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Modifying Kafka Topic Configurations",
"githuburl":""
},
{
"uri":"kafka-ug-0006.html",
- "product_code":"dms",
- "code":"33",
- "des":"After creating a topic, you can increase the number of partitions based on service requirements.Changing the number of partitions does not affect services.Methods for cha",
+ "node_id":"kafka-ug-0006.xml",
+ "product_code":"kafka",
+ "code":"25",
+ "des":"After creating a topic, you can increase the number of partitions as required.Changing the number of partitions does not restart the instance or affect services.Methods f",
"doc_type":"usermanual",
- "kw":"Changing Partition Quantity,Managing Topics,User Guide",
- "title":"Changing Partition Quantity",
- "githuburl":""
- },
- {
- "uri":"kafka_ug_0022.html",
- "product_code":"dms",
- "code":"34",
- "des":"Synchronous replication: A message is returned to the client only after the message creation request has been received and the message has been acknowledged by all replic",
- "doc_type":"usermanual",
- "kw":"Modifying Synchronous Replication and Flushing Settings,Managing Topics,User Guide",
- "title":"Modifying Synchronous Replication and Flushing Settings",
- "githuburl":""
- },
- {
- "uri":"kafka_ug_0024.html",
- "product_code":"dms",
- "code":"35",
- "des":"On the console, view sample code for creating and retrieving messages in Java, Go, and Python.Select the region where your Kafka instance is located.View sample code for ",
- "doc_type":"usermanual",
- "kw":"Viewing Sample Code,Managing Topics,User Guide",
- "title":"Viewing Sample Code",
+ "kw":"Changing Kafka Partition Quantity,Managing Topics,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Changing Kafka Partition Quantity",
"githuburl":""
},
{
"uri":"kafka_ug_0027.html",
- "product_code":"dms",
- "code":"36",
- "des":"Export topics on the console. Batch export is supported.A topic has been created.Select the region where your Kafka instance is located.The topic list contains the follow",
+ "node_id":"kafka_ug_0027.xml",
+ "product_code":"kafka",
+ "code":"26",
+ "des":"Export the topic list on the console. Batch export is supported.A topic has been created.Select the region where your Kafka instance is located.The topic list contains th",
"doc_type":"usermanual",
- "kw":"Exporting Topics,Managing Topics,User Guide",
- "title":"Exporting Topics",
+ "kw":"Exporting the Kafka Topic List,Managing Topics,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Exporting the Kafka Topic List",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka_ug_0023.html",
+ "node_id":"kafka_ug_0023.xml",
+ "product_code":"kafka",
+ "code":"27",
+ "des":"Partition reassignment is to reassign replicas of a partition to different brokers to solve the problem of unbalanced broker load.Partition reassignment is required in th",
+ "doc_type":"usermanual",
+ "kw":"Reassigning Kafka Partitions,Managing Topics,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Reassigning Kafka Partitions",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka_ug_0043.html",
+ "node_id":"kafka_ug_0043.xml",
+ "product_code":"kafka",
+ "code":"28",
+ "des":"Automatic topic creation: A topic will be automatically created when a message is produced in or consumed from a topic that does not exist. By default, the topic has para",
+ "doc_type":"usermanual",
+ "kw":"Configuring Automatic Topic Creation,Managing Topics,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Configuring Automatic Topic Creation",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-180604019.html",
+ "node_id":"kafka-ug-180604019.xml",
+ "product_code":"kafka",
+ "code":"29",
+ "des":"Delete a topic using either of the following methods:Deleting a Kafka Topic (Console)Deleting a Kafka Topic on the ClientA Kafka instance has been created, and a topic ha",
+ "doc_type":"usermanual",
+ "kw":"Deleting a Kafka Topic,Managing Topics,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Deleting a Kafka Topic",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug190605003.html",
+ "node_id":"kafka-ug190605003.xml",
+ "product_code":"kafka",
+ "code":"30",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Connecting to an Instance",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Connecting to an Instance",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0059.html",
+ "node_id":"kafka-ug-0059.xml",
+ "product_code":"kafka",
+ "code":"31",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Configuring Kafka Network Connections",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Configuring Kafka Network Connections",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-180604012.html",
+ "node_id":"kafka-ug-180604012.xml",
+ "product_code":"kafka",
+ "code":"32",
+ "des":"A client can connect to a Kafka instance in public or private networks. Notes before using a private network:By default, a client and a Kafka instance are interconnected ",
+ "doc_type":"usermanual",
+ "kw":"Kafka Network Connection Conditions,Configuring Kafka Network Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Kafka Network Connection Conditions",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0319001.html",
+ "node_id":"kafka-ug-0319001.xml",
+ "product_code":"kafka",
+ "code":"33",
+ "des":"To access a Kafka instance over a public network, enable public access and configure EIPs for the instance.If you no longer need public access to the instance, you can di",
+ "doc_type":"usermanual",
+ "kw":"Configuring Kafka Public Access,Configuring Kafka Network Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Configuring Kafka Public Access",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0001.html",
+ "node_id":"kafka-ug-0001.xml",
+ "product_code":"kafka",
+ "code":"34",
+ "des":"VPCs are logically isolated from each other. If a Kafka instance and a Kafka client are in different VPCs within a region, they cannot communicate with each other. In thi",
+ "doc_type":"usermanual",
+ "kw":"Accessing Kafka Using a VPC Endpoint Across VPCs,Configuring Kafka Network Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Accessing Kafka Using a VPC Endpoint Across VPCs",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-dnat.html",
+ "node_id":"kafka-dnat.xml",
+ "product_code":"kafka",
+ "code":"35",
+ "des":"You can use destination NAT (DNAT) to access a Kafka instance so that the instance can provide services on the public network through port mapping.You have created EIPs. ",
+ "doc_type":"usermanual",
+ "kw":"Accessing Kafka in a Public Network Using DNAT,Configuring Kafka Network Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Accessing Kafka in a Public Network Using DNAT",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0061.html",
+ "node_id":"kafka-ug-0061.xml",
+ "product_code":"kafka",
+ "code":"36",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Configuring Kafka Access Control",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Configuring Kafka Access Control",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka_ug_0044.html",
+ "node_id":"kafka_ug_0044.xml",
+ "product_code":"kafka",
+ "code":"37",
+ "des":"You can access a Kafka instance in plaintext or ciphertext. This section describes how to change the access mode on the console.When you change the access mode for the fi",
+ "doc_type":"usermanual",
+ "kw":"Configuring Plaintext or Ciphertext Access to Kafka Instances,Configuring Kafka Access Control,User ",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Configuring Plaintext or Ciphertext Access to Kafka Instances",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0003.html",
+ "node_id":"kafka-ug-0003.xml",
+ "product_code":"kafka",
+ "code":"38",
+ "des":"DMS supports access control list (ACL) for topics. You can differentiate user permissions by granting users different permissions in a topic.This section describes how to",
+ "doc_type":"usermanual",
+ "kw":"Configuring Kafka Users,Configuring Kafka Access Control,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Configuring Kafka Users",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0062.html",
+ "node_id":"kafka-ug-0062.xml",
+ "product_code":"kafka",
+ "code":"39",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Configuring the Kafka Client",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Configuring the Kafka Client",
+ "githuburl":""
+ },
+ {
+ "uri":"Kafka-client-parameter.html",
+ "node_id":"kafka-client-parameter.xml",
+ "product_code":"kafka",
+ "code":"40",
+ "des":"This section provides recommendations on configuring common parameters for Kafka producers and consumers. Kafka clients in different versions may have different parameter",
+ "doc_type":"usermanual",
+ "kw":"Setting Parameters for Kafka Clients,Configuring the Kafka Client,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Setting Parameters for Kafka Clients",
+ "githuburl":""
+ },
+ {
+ "uri":"Kafka-client-best-practice.html",
+ "node_id":"kafka-client-best-practice.xml",
+ "product_code":"kafka",
+ "code":"41",
+ "des":"Ensure that the owner thread does not exit abnormally. Otherwise, the client may fail to initiate consumption requests and the consumption will be blocked.Commit messages",
+ "doc_type":"usermanual",
+ "kw":"Suggestions on Using the Kafka Client,Configuring the Kafka Client,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Suggestions on Using the Kafka Client",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-180604020.html",
+ "node_id":"kafka-ug-180604020.xml",
+ "product_code":"kafka",
+ "code":"42",
+ "des":"This section describes how to use an open-source Kafka client to access a Kafka instance in plaintext. Two scenarios: private network within a VPC and public network. To ",
+ "doc_type":"usermanual",
+ "kw":"Connecting to Kafka Using the Client (Plaintext Access),Connecting to an Instance,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Connecting to Kafka Using the Client (Plaintext Access)",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-180801001.html",
+ "node_id":"kafka-ug-180801001.xml",
+ "product_code":"kafka",
+ "code":"43",
+ "des":"If you enable ciphertext access when creating an instance, SASL authentication will be required when your client connects to a Kafka instance.For security purposes, TLS_E",
+ "doc_type":"usermanual",
+ "kw":"Connecting to Kafka Using the Client (Ciphertext Access),Connecting to an Instance,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Connecting to Kafka Using the Client (Ciphertext Access)",
"githuburl":""
},
{
"uri":"kafka-ug-0720002.html",
- "product_code":"dms",
- "code":"37",
+ "node_id":"kafka-ug-0720002.xml",
+ "product_code":"kafka",
+ "code":"44",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Managing Messages",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Managing Messages",
"githuburl":""
},
{
"uri":"kafka-ug-190904001.html",
- "product_code":"dms",
- "code":"38",
+ "node_id":"kafka-ug-190904001.xml",
+ "product_code":"kafka",
+ "code":"45",
"des":"You can view the offset of different partitions, the message size, creation time, and body of messages in topics.Select the region where your Kafka instance is located.If",
"doc_type":"usermanual",
- "kw":"Querying Messages,Managing Messages,User Guide",
- "title":"Querying Messages",
+ "kw":"Viewing Kafka Messages,Managing Messages,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Viewing Kafka Messages",
"githuburl":""
},
{
- "uri":"kafka-ug-0009.html",
- "product_code":"dms",
- "code":"39",
- "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "uri":"kafka-ug-200506001.html",
+ "node_id":"kafka-ug-200506001.xml",
+ "product_code":"kafka",
+ "code":"46",
+ "des":"Aging time is a period that messages in the topic are retained for. Consumers must retrieve messages before this period ends. Otherwise, the messages will be deleted and ",
"doc_type":"usermanual",
- "kw":"Managing Users",
- "title":"Managing Users",
+ "kw":"Changing Kafka Message Retention Period,Managing Messages,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Changing Kafka Message Retention Period",
"githuburl":""
},
{
- "uri":"kafka-ug-0003.html",
- "product_code":"dms",
- "code":"40",
- "des":"DMS supports ACL permission management for topics. You can differentiate the operations that different users are allowed to perform on a topic by granting the users diffe",
+ "uri":"kafka_ug_0046.html",
+ "node_id":"kafka_ug_0046.xml",
+ "product_code":"kafka",
+ "code":"47",
+ "des":"This section describes how to delete messages on the console.Deleted messages cannot be recovered.Before deleting a message, set the auto.offset.reset parameter in the co",
"doc_type":"usermanual",
- "kw":"Creating a SASL_SSL User,Managing Users,User Guide",
- "title":"Creating a SASL_SSL User",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-0002.html",
- "product_code":"dms",
- "code":"41",
- "des":"DMS supports ACL permission management for topics. You can differentiate the operations that different users are allowed to perform on a topic by granting the users diffe",
- "doc_type":"usermanual",
- "kw":"Granting Permissions to a SASL_SSL User,Managing Users,User Guide",
- "title":"Granting Permissions to a SASL_SSL User",
- "githuburl":""
- },
- {
- "uri":"kafka_ug_0025.html",
- "product_code":"dms",
- "code":"42",
- "des":"If you forget the password of a SASL_SSL user created on the Users tab page, you can reset the password and use the new password to connect to the Kafka instance.If you f",
- "doc_type":"usermanual",
- "kw":"Resetting the SASL_SSL Password,Managing Users,User Guide",
- "title":"Resetting the SASL_SSL Password",
- "githuburl":""
- },
- {
- "uri":"kafka_ug_0026.html",
- "product_code":"dms",
- "code":"43",
- "des":"This section describes how to delete a SASL_SSL user.Select the region where your Kafka instance is located.On the Users tab page, click Delete in the row that contains t",
- "doc_type":"usermanual",
- "kw":"Deleting a SASL_SSL User,Managing Users,User Guide",
- "title":"Deleting a SASL_SSL User",
+ "kw":"Deleting Kafka Messages,Managing Messages,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Deleting Kafka Messages",
"githuburl":""
},
{
"uri":"kafka-ug-0011.html",
- "product_code":"dms",
- "code":"44",
+ "node_id":"kafka-ug-0011.xml",
+ "product_code":"kafka",
+ "code":"48",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Managing Consumer Groups",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Managing Consumer Groups",
"githuburl":""
},
+ {
+ "uri":"kafka-ug-0041.html",
+ "node_id":"kafka-ug-0041.xml",
+ "product_code":"kafka",
+ "code":"49",
+ "des":"Create a consumer group on the console.auto.create.groups.enable: a consumer group is automatically created when a consumer attempts to enter a group that does not exist.",
+ "doc_type":"usermanual",
+ "kw":"Creating a Kafka Consumer Group,Managing Consumer Groups,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Creating a Kafka Consumer Group",
+ "githuburl":""
+ },
{
"uri":"kafka_ug_0021.html",
- "product_code":"dms",
- "code":"45",
- "des":"View the consumer group list, consumer list, and consumer offsets.The consumer list can be viewed only when consumers in a consumer group are connected to the Kafka insta",
+ "node_id":"kafka_ug_0021.xml",
+ "product_code":"kafka",
+ "code":"50",
+ "des":"This section describes how to query the consumer group list.Select the region where your Kafka instance is located.The consumer group name, status, and Coordinator (ID) a",
"doc_type":"usermanual",
- "kw":"Querying Consumer Group Details,Managing Consumer Groups,User Guide",
- "title":"Querying Consumer Group Details",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-0012.html",
- "product_code":"dms",
- "code":"46",
- "des":"You can delete a consumer group using either of the following methods:Method 1: Delete a consumer group on the console.Method 2: Use Kafka CLI to delete a consumer group.",
- "doc_type":"usermanual",
- "kw":"Deleting a Consumer Group,Managing Consumer Groups,User Guide",
- "title":"Deleting a Consumer Group",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-0014.html",
- "product_code":"dms",
- "code":"47",
- "des":"Resetting the consumer offset is to change the retrieval position of a consumer.Messages may be retrieved more than once after the offset is reset. Exercise caution when ",
- "doc_type":"usermanual",
- "kw":"Resetting the Consumer Offset,Managing Consumer Groups,User Guide",
- "title":"Resetting the Consumer Offset",
+ "kw":"Querying the Kafka Consumer Group List,Managing Consumer Groups,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Querying the Kafka Consumer Group List",
"githuburl":""
},
{
"uri":"kafka-ug-0015.html",
- "product_code":"dms",
- "code":"48",
- "des":"View consumer connection addresses on the DMS console.The connection address of a consumer can be viewed only when the consumer is connected to a Kafka instance.Select th",
+ "node_id":"kafka-ug-0015.xml",
+ "product_code":"kafka",
+ "code":"51",
+ "des":"This section describes how to view the consumer list and consumer connection addresses.The consumer list and connection address can be viewed only when consumers in a con",
"doc_type":"usermanual",
- "kw":"Viewing Consumer Connection Addresses,Managing Consumer Groups,User Guide",
- "title":"Viewing Consumer Connection Addresses",
+ "kw":"Viewing Kafka Consumer Details,Managing Consumer Groups,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Viewing Kafka Consumer Details",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0014.html",
+ "node_id":"kafka-ug-0014.xml",
+ "product_code":"kafka",
+ "code":"52",
+ "des":"This section describes how to view and reset consumption offsets. Resetting consumption offsets is to change the consumption position for consumers.Messages may be retrie",
+ "doc_type":"usermanual",
+ "kw":"Viewing and Resetting Kafka Consumption Offsets,Managing Consumer Groups,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Viewing and Resetting Kafka Consumption Offsets",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0056.html",
+ "node_id":"kafka-ug-0056.xml",
+ "product_code":"kafka",
+ "code":"53",
+ "des":"Export the consumer group list from the console.Select the region where your Kafka instance is located.Select the desired consumer groups and choose Export > Export selec",
+ "doc_type":"usermanual",
+ "kw":"Exporting Kafka Consumer Groups,Managing Consumer Groups,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Exporting Kafka Consumer Groups",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0012.html",
+ "node_id":"kafka-ug-0012.xml",
+ "product_code":"kafka",
+ "code":"54",
+ "des":"You can delete a consumer group in either of the following ways:Method 1: Delete a consumer group on the console.Method 2: Use Kafka CLI to delete a consumer group. (Ensu",
+ "doc_type":"usermanual",
+ "kw":"Deleting a Kafka Consumer Group,Managing Consumer Groups,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Deleting a Kafka Consumer Group",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-180604011.html",
+ "node_id":"kafka-ug-180604011.xml",
+ "product_code":"kafka",
+ "code":"55",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Managing Instances",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Managing Instances",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-180604014.html",
+ "node_id":"kafka-ug-180604014.xml",
+ "product_code":"kafka",
+ "code":"56",
+ "des":"This section describes how to view the details, and modify the basic information of a Kafka instance on the console.After creating a Kafka instance, you can modify some p",
+ "doc_type":"usermanual",
+ "kw":"Viewing and Modifying Basic Information of a Kafka Instance,Managing Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Viewing and Modifying Basic Information of a Kafka Instance",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0004.html",
+ "node_id":"kafka-ug-0004.xml",
+ "product_code":"kafka",
+ "code":"57",
+ "des":"This section describes how to view the disk usage of each broker on the Kafka console.This function is unavailable for single-node instances.Select the region where your ",
+ "doc_type":"usermanual",
+ "kw":"Viewing Kafka Disk Usage,Managing Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Viewing Kafka Disk Usage",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-200119002.html",
+ "node_id":"kafka-ug-200119002.xml",
+ "product_code":"kafka",
+ "code":"58",
+ "des":"After you initiate certain instance operations listed in Table 1, a background task will start for each operation. On the console, you can view the background task status",
+ "doc_type":"usermanual",
+ "kw":"Viewing Kafka Background Tasks,Managing Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Viewing Kafka Background Tasks",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka_ug_0024.html",
+ "node_id":"kafka_ug_0024.xml",
+ "product_code":"kafka",
+ "code":"59",
+ "des":"On the console, view sample code for creating and retrieving messages in Java, Go, and Python.Select the region where your Kafka instance is located.View sample code for ",
+ "doc_type":"usermanual",
+ "kw":"Viewing Sample Code of Kafka Production and Consumption,Managing Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Viewing Sample Code of Kafka Production and Consumption",
"githuburl":""
},
{
"uri":"kafka-ug-0007.html",
- "product_code":"dms",
- "code":"49",
+ "node_id":"kafka-ug-0007.xml",
+ "product_code":"kafka",
+ "code":"60",
"des":"Your Kafka instances, topics, and consumers come with default configuration parameter settings. You can modify common parameters on the DMS console. For details about par",
"doc_type":"usermanual",
- "kw":"Modifying Kafka Parameters,User Guide",
- "title":"Modifying Kafka Parameters",
+ "kw":"Modifying Kafka Instance Configuration Parameters,Managing Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Modifying Kafka Instance Configuration Parameters",
+ "githuburl":""
+ },
+ {
+ "uri":"TagManagement.html",
+ "node_id":"tagmanagement.xml",
+ "product_code":"kafka",
+ "code":"61",
+ "des":"Tags facilitate Kafka instance identification and management.You can add tags to a Kafka instance when creating the instance or add tags on the Tags tab page of the creat",
+ "doc_type":"usermanual",
+ "kw":"Configuring Kafka Instance Tags,Managing Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Configuring Kafka Instance Tags",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-0053.html",
+ "node_id":"kafka-ug-0053.xml",
+ "product_code":"kafka",
+ "code":"62",
+ "des":"This section describes how to export the Kafka instance list from the console.Select the region where your Kafka instance is located.Select the desired instances and choo",
+ "doc_type":"usermanual",
+ "kw":"Exporting the Kafka Instance List,Managing Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Exporting the Kafka Instance List",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-180604015.html",
+ "node_id":"kafka-ug-180604015.xml",
+ "product_code":"kafka",
+ "code":"63",
+ "des":"Restart one or more Kafka instances at a time on the DMS console.When a Kafka instance is being restarted, message retrieval and creation requests of clients will be reje",
+ "doc_type":"usermanual",
+ "kw":"Restarting a Kafka Instance,Managing Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Restarting a Kafka Instance",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-180604016.html",
+ "node_id":"kafka-ug-180604016.xml",
+ "product_code":"kafka",
+ "code":"64",
+ "des":"Delete one or more Kafka instances at a time on the DMS console.Deleting a Kafka instance will delete the data in the instance without any backup. Exercise caution when p",
+ "doc_type":"usermanual",
+ "kw":"Deleting Kafka Instances,Managing Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Deleting Kafka Instances",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-ug-181221001.html",
+ "node_id":"kafka-ug-181221001.xml",
+ "product_code":"kafka",
+ "code":"65",
+ "des":"After creating a Kafka instance, you can increase its specifications. Table 1 lists available modification options. Only one object can be modified per operation: broker ",
+ "doc_type":"usermanual",
+ "kw":"Modifying Kafka Instance Specifications,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Modifying Kafka Instance Specifications",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka_ug_0016.html",
+ "node_id":"kafka_ug_0016.xml",
+ "product_code":"kafka",
+ "code":"66",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Migrating Data",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Migrating Data",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-bp-migration.html",
+ "node_id":"kafka-bp-migration.xml",
+ "product_code":"kafka",
+ "code":"67",
+ "des":"You can migrate Kafka services to connect message producers and consumers to a new Kafka instance and can even migrate persisted message data to the new Kafka instance. K",
+ "doc_type":"usermanual",
+ "kw":"Kafka Data Migration Overview,Migrating Data,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Kafka Data Migration Overview",
"githuburl":""
},
{
"uri":"kafka-ug-180413001.html",
- "product_code":"dms",
- "code":"50",
+ "node_id":"kafka-ug-180413001.xml",
+ "product_code":"kafka",
+ "code":"68",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
- "kw":"Monitoring",
- "title":"Monitoring",
+ "kw":"Monitoring and Alarms",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Monitoring and Alarms",
"githuburl":""
},
{
"uri":"kafka-ug-190605001.html",
- "product_code":"dms",
- "code":"51",
+ "node_id":"kafka-ug-190605001.xml",
+ "product_code":"kafka",
+ "code":"69",
"des":"Cloud Eye monitors Kafka instance metrics in real time. You can view these metrics on the Cloud Eye console.At least one Kafka instance has been created. The instance has",
"doc_type":"usermanual",
- "kw":"Viewing Metrics,Monitoring,User Guide",
- "title":"Viewing Metrics",
+ "kw":"Viewing Kafka Monitoring Metrics,Monitoring and Alarms,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Viewing Kafka Monitoring Metrics",
"githuburl":""
},
{
- "uri":"dms-ug-180413002.html",
- "product_code":"dms",
- "code":"52",
- "des":"This section describes DMS metrics reported to Cloud Eye as well as their namespace and dimensions. You can use the Cloud Eye console to query the Kafka metrics and alarm",
+ "uri":"kafka-ug-180413002.html",
+ "node_id":"kafka-ug-180413002.xml",
+ "product_code":"kafka",
+ "code":"70",
+ "des":"This section describes metrics reported by DMS to Cloud Eye as well as their namespaces and dimensions. You can use the Cloud Eye console or APIs to query the Kafka metri",
"doc_type":"usermanual",
- "kw":"Kafka Metrics,Monitoring,User Guide",
+ "kw":"Kafka Metrics,Monitoring and Alarms,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Kafka Metrics",
"githuburl":""
},
{
- "uri":"kafka-ug-180418001.html",
- "product_code":"dms",
- "code":"53",
- "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "uri":"kafka-ug-180524001.html",
+ "node_id":"kafka-ug-180524001.xml",
+ "product_code":"kafka",
+ "code":"71",
+ "des":"This section describes the alarm rules of some metrics and how to configure them. In actual services, you are advised to configure alarm rules for metrics based on the fo",
"doc_type":"usermanual",
- "kw":"Auditing",
- "title":"Auditing",
+ "kw":"Configuring a Kafka Alarm Rule,Monitoring and Alarms,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Configuring a Kafka Alarm Rule",
"githuburl":""
},
{
"uri":"kafka-ug-180418002.html",
- "product_code":"dms",
- "code":"54",
- "des":"With Cloud Trace Service (CTS), you can record operations associated with DMS for later query, audit, and backtrack operations.",
+ "node_id":"kafka-ug-180418002.xml",
+ "product_code":"kafka",
+ "code":"72",
+ "des":"With Cloud Trace Service (CTS), you can record operations associated with DMS for later query, audit, and backtrack operations.CTS has been enabled.See Querying Real-Time",
"doc_type":"usermanual",
- "kw":"Operations Logged by CTS,Auditing,User Guide",
- "title":"Operations Logged by CTS",
- "githuburl":""
- },
- {
- "uri":"kafka-ug-180418003.html",
- "product_code":"dms",
- "code":"55",
- "des":"This section describes how to view operation records of the last 7 days on the CTS console.Select the region where your Kafka instance is located.Trace Source: Select DMS",
- "doc_type":"usermanual",
- "kw":"Viewing Audit Logs,Auditing,User Guide",
- "title":"Viewing Audit Logs",
+ "kw":"Viewing Kafka Audit Logs,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "opensource":"true",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Viewing Kafka Audit Logs",
"githuburl":""
},
{
"uri":"kafka-ug-0723004.html",
- "product_code":"dms",
- "code":"56",
+ "node_id":"kafka-ug-0723004.xml",
+ "product_code":"kafka",
+ "code":"73",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"FAQs",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"FAQs",
"githuburl":""
},
{
"uri":"kafka-faq-191030002.html",
- "product_code":"dms",
- "code":"57",
+ "node_id":"kafka-faq-191030002.xml",
+ "product_code":"kafka",
+ "code":"74",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Instances",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Instances",
"githuburl":""
},
{
"uri":"kafka-faq-200426002.html",
- "product_code":"dms",
- "code":"58",
+ "node_id":"kafka-faq-200426002.xml",
+ "product_code":"kafka",
+ "code":"75",
"des":"To improve the reliability of a Kafka instance, you are advised to select three AZs or more when creating the instance. You cannot select two AZs.Each Kafka instance cont",
"doc_type":"usermanual",
"kw":"Why Can't I Select Two AZs?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Why Can't I Select Two AZs?",
"githuburl":""
},
{
"uri":"kafka-faq-200426003.html",
- "product_code":"dms",
- "code":"59",
- "des":"This may be because you do not have the Server Administrator and VPC Administrator permissions. For details about how to add permissions to a user group, see \"User and Us",
+ "node_id":"kafka-faq-200426003.xml",
+ "product_code":"kafka",
+ "code":"76",
+ "des":"This may be because you do not have the Server Administrator and VPC Administrator permissions. For details about how to add permissions to a user group, see Viewing and ",
"doc_type":"usermanual",
"kw":"Why Can't I View the Subnet and Security Group Information When Creating a DMS Instance?,Instances,U",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Why Can't I View the Subnet and Security Group Information When Creating a DMS Instance?",
"githuburl":""
},
{
"uri":"kafka-faq-200426005.html",
- "product_code":"dms",
- "code":"60",
- "des":"The storage space is the space for storing messages (including messages in replicas), logs and metadata. When specifying storage space, specify the disk type and disk siz",
+ "node_id":"kafka-faq-200426005.xml",
+ "product_code":"kafka",
+ "code":"77",
+ "des":"The storage space is the space for storing messages (including messages in replicas), logs and metadata. To select a storage space, specify the disk type and disk size. F",
"doc_type":"usermanual",
"kw":"How Do I Select Storage Space for a Kafka Instance?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"How Do I Select Storage Space for a Kafka Instance?",
"githuburl":""
},
{
"uri":"kafka-faq-200426006.html",
- "product_code":"dms",
- "code":"61",
- "des":"High I/O: The average latency is 6 to 10 ms, and the maximum bandwidth is 120 MB/s (read + write).Ultra-high I/O: The average latency is 1 to 3 ms, and the maximum bandwi",
+ "node_id":"kafka-faq-200426006.xml",
+ "product_code":"kafka",
+ "code":"78",
+ "des":"High I/O: The average latency is 6 to 10 ms, and the maximum bandwidth is 150 MB/s (read + write).Ultra-high I/O: The average latency is 1 to 3 ms, and the maximum bandwi",
"doc_type":"usermanual",
"kw":"How Do I Choose Between High I/O and Ultra-high I/O?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"How Do I Choose Between High I/O and Ultra-high I/O?",
"githuburl":""
},
{
"uri":"kafka-faq-200426007.html",
- "product_code":"dms",
- "code":"62",
+ "node_id":"kafka-faq-200426007.xml",
+ "product_code":"kafka",
+ "code":"79",
"des":"The following policies are supported:Stop productionWhen the memory usage reaches the disk capacity threshold (95%), new messages will no longer be created, but existing ",
"doc_type":"usermanual",
"kw":"Which Capacity Threshold Policy Should I Use?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Which Capacity Threshold Policy Should I Use?",
"githuburl":""
},
{
"uri":"kafka-faq-200426008.html",
- "product_code":"dms",
- "code":"63",
- "des":"Kafka v1.1.0, v2.3.0, and v2.7.",
+ "node_id":"kafka-faq-200426008.xml",
+ "product_code":"kafka",
+ "code":"80",
+ "des":"Kafka v2.3.0, v2.7, and v3.x.For details about how to create a Kafka instance, see Creating a Kafka Instance.",
"doc_type":"usermanual",
"kw":"Which Kafka Versions Are Supported?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Which Kafka Versions Are Supported?",
"githuburl":""
},
{
"uri":"kafka-faq-200426009.html",
- "product_code":"dms",
- "code":"64",
+ "node_id":"kafka-faq-200426009.xml",
+ "product_code":"kafka",
+ "code":"81",
"des":"Kafka instances are managed using ZooKeeper. Opening ZooKeeper may cause misoperations and service losses. ZooKeeper is used only within Kafka clusters and does not provi",
"doc_type":"usermanual",
"kw":"What Is the ZooKeeper Address of a Kafka Instance?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"What Is the ZooKeeper Address of a Kafka Instance?",
"githuburl":""
},
{
"uri":"kafka-faq-200426010.html",
- "product_code":"dms",
- "code":"65",
- "des":"Yes. A Kafka instance is a cluster that consists of three or more brokers.",
+ "node_id":"kafka-faq-200426010.xml",
+ "product_code":"kafka",
+ "code":"82",
+ "des":"Kafka instances are classified into single-node and cluster types. A single-node instance has only one broker in single-node mode. A cluster instance consists of three or",
"doc_type":"usermanual",
"kw":"Are Kafka Instances in Cluster Mode?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Are Kafka Instances in Cluster Mode?",
"githuburl":""
},
{
"uri":"kafka-faq-200426011.html",
- "product_code":"dms",
- "code":"66",
+ "node_id":"kafka-faq-200426011.xml",
+ "product_code":"kafka",
+ "code":"83",
"des":"No. You must access a Kafka instance through one of the following ports:Accessing a Kafka instance without SASL:The port varies with the access mode:Intra-VPC access: por",
"doc_type":"usermanual",
- "kw":"Can I Modify the Connection Address for Accessing a Kafka Instance?,Instances,User Guide",
- "title":"Can I Modify the Connection Address for Accessing a Kafka Instance?",
+ "kw":"Can I Modify the Port for Accessing a Kafka Instance?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Can I Modify the Port for Accessing a Kafka Instance?",
"githuburl":""
},
{
"uri":"kafka-faq-200426012.html",
- "product_code":"dms",
- "code":"67",
+ "node_id":"kafka-faq-200426012.xml",
+ "product_code":"kafka",
+ "code":"84",
"des":"The certificates are valid for more than 15 years. You do not need to worry about certificate expiration. The certificates are used for one-way authentication when enabli",
"doc_type":"usermanual",
"kw":"How Long Are Kafka SSL Certificates Valid for?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"How Long Are Kafka SSL Certificates Valid for?",
"githuburl":""
},
{
"uri":"kafka-faq-200426013.html",
- "product_code":"dms",
- "code":"68",
+ "node_id":"kafka-faq-200426013.xml",
+ "product_code":"kafka",
+ "code":"85",
"des":"Unfortunately, you cannot synchronize two Kafka instances in real time. To migrate services from one instance to another, create messages to both instances. After all mes",
"doc_type":"usermanual",
"kw":"How Do I Synchronize Data from One Kafka Instance to Another?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"How Do I Synchronize Data from One Kafka Instance to Another?",
"githuburl":""
},
{
"uri":"kafka-faq-200426014.html",
- "product_code":"dms",
- "code":"69",
- "des":"The SASL_SSL setting cannot be changed once the instance has been created. Be careful when configuring this setting during instance creation. If you need to change the se",
+ "node_id":"kafka-faq-200426014.xml",
+ "product_code":"kafka",
+ "code":"86",
+ "des":"The SASL_SSL setting can be changed for cluster instances (see Configuring Plaintext or Ciphertext Access to Kafka Instances), but cannot be changed for single-node insta",
"doc_type":"usermanual",
"kw":"How Do I Change the SASL_SSL Setting of a Kafka Instance?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"How Do I Change the SASL_SSL Setting of a Kafka Instance?",
"githuburl":""
},
+ {
+ "uri":"kafka_faq_0052.html",
+ "node_id":"kafka_faq_0052.xml",
+ "product_code":"kafka",
+ "code":"87",
+ "des":"After an instance is created, its SASL mechanism cannot be modified. If you want to change it, create an instance again.",
+ "doc_type":"usermanual",
+ "kw":"How Do I Modify the SASL Mechanism?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"How Do I Modify the SASL Mechanism?",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka_faq_0062.html",
+ "node_id":"kafka_faq_0062.xml",
+ "product_code":"kafka",
+ "code":"88",
+ "des":"The security protocol can be changed on the console. In the Connection area on the Kafka instance details page, disable Ciphertext Access and then configure security prot",
+ "doc_type":"usermanual",
+ "kw":"How Do I Change the Security Protocol?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"How Do I Change the Security Protocol?",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-faq-0008.html",
+ "node_id":"kafka-faq-0008.xml",
+ "product_code":"kafka",
+ "code":"89",
+ "des":"No. A Kafka instance will not be restarted if you modify its enterprise project.",
+ "doc_type":"usermanual",
+ "kw":"Will a Kafka Instance Be Restarted After Its Enterprise Project Is Modified?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Will a Kafka Instance Be Restarted After Its Enterprise Project Is Modified?",
+ "githuburl":""
+ },
{
"uri":"kafka-faq-0015.html",
- "product_code":"dms",
- "code":"70",
+ "node_id":"kafka-faq-0015.xml",
+ "product_code":"kafka",
+ "code":"90",
"des":"Kafka brokers and ZooKeeper are deployed on the same VM.",
"doc_type":"usermanual",
"kw":"Are Kafka Brokers and ZooKeeper Deployed on the Same VM or on Different VMs?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Are Kafka Brokers and ZooKeeper Deployed on the Same VM or on Different VMs?",
"githuburl":""
},
{
"uri":"kafka-faq-0020.html",
- "product_code":"dms",
- "code":"71",
+ "node_id":"kafka-faq-0020.xml",
+ "product_code":"kafka",
+ "code":"91",
"des":"For security purposes, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 is supported.",
"doc_type":"usermanual",
"kw":"Which Cipher Suites Are Supported by Kafka?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Which Cipher Suites Are Supported by Kafka?",
"githuburl":""
},
{
"uri":"kafka-faq-0023.html",
- "product_code":"dms",
- "code":"72",
- "des":"No. The AZ configuration cannot be changed once the instance is created. To use multiple AZs, create another instance.",
+ "node_id":"kafka-faq-0023.xml",
+ "product_code":"kafka",
+ "code":"92",
+ "des":"No. The AZ cannot be changed once the instance is created. To use multiple AZs, create another instance.",
"doc_type":"usermanual",
- "kw":"Can I Change an Instance from Single-AZ Deployment to Multi-AZ Deployment?,Instances,User Guide",
- "title":"Can I Change an Instance from Single-AZ Deployment to Multi-AZ Deployment?",
+ "kw":"Can I Change Single-AZ Deployment to Multi-AZ Deployment for an Instance?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Can I Change Single-AZ Deployment to Multi-AZ Deployment for an Instance?",
"githuburl":""
},
{
"uri":"kafka-faq-0025.html",
- "product_code":"dms",
- "code":"73",
+ "node_id":"kafka-faq-0025.xml",
+ "product_code":"kafka",
+ "code":"93",
"des":"DMS for Kafka supports cross-AZ disaster recovery. If you select multiple AZs when creating an instance, cross-AZ disaster recovery will be available.You can view the AZs",
"doc_type":"usermanual",
- "kw":"Does DMS for Kafka Support Cross-AZ Disaster Recovery? Where Can I View the AZs Configured for an Ex",
- "title":"Does DMS for Kafka Support Cross-AZ Disaster Recovery? Where Can I View the AZs Configured for an Existing Instance?",
+ "kw":"Does DMS for Kafka Support Cross-AZ Disaster Recovery? Where Can I Check Whether an Existing Instanc",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Does DMS for Kafka Support Cross-AZ Disaster Recovery? Where Can I Check Whether an Existing Instance is Across-AZs?",
"githuburl":""
},
{
"uri":"kafka-faq-0030.html",
- "product_code":"dms",
- "code":"74",
+ "node_id":"kafka-faq-0030.xml",
+ "product_code":"kafka",
+ "code":"94",
"des":"Yes.",
"doc_type":"usermanual",
"kw":"Do Kafka Instances Support Disk Encryption?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Do Kafka Instances Support Disk Encryption?",
"githuburl":""
},
{
"uri":"kafka-faq-0036.html",
- "product_code":"dms",
- "code":"75",
+ "node_id":"kafka-faq-0036.xml",
+ "product_code":"kafka",
+ "code":"95",
"des":"No. Once an instance is created, its VPC and subnet cannot be changed.",
"doc_type":"usermanual",
"kw":"Can I Change the VPC and Subnet After a Kafka Instance Is Created?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Can I Change the VPC and Subnet After a Kafka Instance Is Created?",
"githuburl":""
},
{
"uri":"kafka-faq-0037.html",
- "product_code":"dms",
- "code":"76",
+ "node_id":"kafka-faq-0037.xml",
+ "product_code":"kafka",
+ "code":"96",
"des":"You can find Kafka Streams use cases on the official Kafka website.",
"doc_type":"usermanual",
"kw":"Where Can I Find Kafka Streams Use Cases?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Where Can I Find Kafka Streams Use Cases?",
"githuburl":""
},
{
"uri":"kafka-faq-0040.html",
- "product_code":"dms",
- "code":"77",
+ "node_id":"kafka-faq-0040.xml",
+ "product_code":"kafka",
+ "code":"97",
"des":"No. Kafka instances cannot be upgraded once they are created. To use a higher Kafka version, create another Kafka instance.",
"doc_type":"usermanual",
"kw":"Can I Upgrade Kafka Instances?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Can I Upgrade Kafka Instances?",
"githuburl":""
},
{
"uri":"kafka_faq_0046.html",
- "product_code":"dms",
- "code":"78",
+ "node_id":"kafka_faq_0046.xml",
+ "product_code":"kafka",
+ "code":"98",
"des":"On the DMS console, click the name of the target Kafka instance. Disable Public Access in the Connection section on the Basic Information tab page, and then enable it aga",
"doc_type":"usermanual",
"kw":"How Do I Bind an EIP Again?,Instances,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"How Do I Bind an EIP Again?",
"githuburl":""
},
+ {
+ "uri":"kafka_faq_0053.html",
+ "node_id":"kafka_faq_0053.xml",
+ "product_code":"kafka",
+ "code":"99",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Specification Modification",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Specification Modification",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-faq-0035.html",
+ "node_id":"kafka-faq-0035.xml",
+ "product_code":"kafka",
+ "code":"100",
+ "des":"Table 1 describes the impact of increasing specifications. It takes 5 to 10 minutes to modify specifications on one broker. The more brokers, the longer time the modifica",
+ "doc_type":"usermanual",
+ "kw":"Does Specification Modification Affect Services?,Specification Modification,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Does Specification Modification Affect Services?",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka_faq_0054.html",
+ "node_id":"kafka_faq_0054.xml",
+ "product_code":"kafka",
+ "code":"101",
+ "des":"No. Data will not be migrated when you increase specifications.",
+ "doc_type":"usermanual",
+ "kw":"Will Data Migration Be Involved When I Increase Specifications?,Specification Modification,User Guid",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Will Data Migration Be Involved When I Increase Specifications?",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka_faq_0056.html",
+ "node_id":"kafka_faq_0056.xml",
+ "product_code":"kafka",
+ "code":"102",
+ "des":"Possible cause: When you increase the broker flavor, a rolling restart is performed on brokers. During the restart, partition leaders are changed. The producer has cached",
+ "doc_type":"usermanual",
+ "kw":"Why Does Message Production Fail During Scaling?,Specification Modification,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Why Does Message Production Fail During Scaling?",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka_faq_0057.html",
+ "node_id":"kafka_faq_0057.xml",
+ "product_code":"kafka",
+ "code":"103",
+ "des":"Symptom: Specifications fail to be increased, and a message is displayed indicating that the underlying ECS/EVS resources are insufficient. However, the required ECSs can",
+ "doc_type":"usermanual",
+ "kw":"What Can I Do When I Fail to Increase Specifications Due to Insufficient Resources?,Specification Mo",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"What Can I Do When I Fail to Increase Specifications Due to Insufficient Resources?",
+ "githuburl":""
+ },
{
"uri":"kafka-faq-191030001.html",
- "product_code":"dms",
- "code":"79",
+ "node_id":"kafka-faq-191030001.xml",
+ "product_code":"kafka",
+ "code":"104",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Connections",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Connections",
"githuburl":""
},
- {
- "uri":"kafka-faq-0604001.html",
- "product_code":"dms",
- "code":"80",
- "des":"This section describes how to troubleshoot Kafka connection problems.If the connection to a Kafka instance is abnormal, perform the following operations to troubleshoot t",
- "doc_type":"usermanual",
- "kw":"Troubleshooting Kafka Connection Exceptions,Connections,User Guide",
- "title":"Troubleshooting Kafka Connection Exceptions",
- "githuburl":""
- },
{
"uri":"kafka-faq-180604024.html",
- "product_code":"dms",
- "code":"81",
+ "node_id":"kafka-faq-180604024.xml",
+ "product_code":"kafka",
+ "code":"105",
"des":"Kafka instances can be accessed within a VPC, across VPCs, through DNAT, or over public networks. Before accessing a Kafka instance, configure a security group.If they us",
"doc_type":"usermanual",
"kw":"How Do I Select and Configure a Security Group?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"How Do I Select and Configure a Security Group?",
"githuburl":""
},
{
"uri":"kafka-faq-200426015.html",
- "product_code":"dms",
- "code":"82",
+ "node_id":"kafka-faq-200426015.xml",
+ "product_code":"kafka",
+ "code":"106",
"des":"Yes. For details, see the instance access instructions.",
"doc_type":"usermanual",
"kw":"Can I Access a Kafka Instance Over a Public Network?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Can I Access a Kafka Instance Over a Public Network?",
"githuburl":""
},
{
"uri":"kafka-faq-200426016.html",
- "product_code":"dms",
- "code":"83",
- "des":"The number of connection addresses of a Kafka instance is the same as the number of brokers of the instance.",
+ "node_id":"kafka-faq-200426016.xml",
+ "product_code":"kafka",
+ "code":"107",
+ "des":"The number of connection addresses of a Kafka instance is the same as the number of brokers of the instance. The following table lists the number of brokers corresponding",
"doc_type":"usermanual",
"kw":"How Many Connection Addresses Does a Kafka Instance Have by Default?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"How Many Connection Addresses Does a Kafka Instance Have by Default?",
"githuburl":""
},
{
"uri":"kafka-faq-200426017.html",
- "product_code":"dms",
- "code":"84",
+ "node_id":"kafka-faq-200426017.xml",
+ "product_code":"kafka",
+ "code":"108",
"des":"Yes. You can access a Kafka instance across regions over a public network or by using direct connections.",
"doc_type":"usermanual",
"kw":"Do Kafka Instances Support Cross-Region Access?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Do Kafka Instances Support Cross-Region Access?",
"githuburl":""
},
{
"uri":"kafka-faq-200426019.html",
- "product_code":"dms",
- "code":"85",
+ "node_id":"kafka-faq-200426019.xml",
+ "product_code":"kafka",
+ "code":"109",
"des":"Yes. You can use one of the following methods to access a Kafka instance across VPCs:Establish a VPC peering connection to allow two VPCs to communicate with each other. ",
"doc_type":"usermanual",
"kw":"Do Kafka Instances Support Cross-VPC Access?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Do Kafka Instances Support Cross-VPC Access?",
"githuburl":""
},
{
"uri":"kafka-faq-200426020.html",
- "product_code":"dms",
- "code":"86",
- "des":"Yes.If the client and the instance are in the same VPC, cross-subnet access is supported.",
+ "node_id":"kafka-faq-200426020.xml",
+ "product_code":"kafka",
+ "code":"110",
+ "des":"Yes.If the client and the instance are in the same VPC, cross-subnet access is supported. By default, subnets in the same VPC can communicate with each other.",
"doc_type":"usermanual",
"kw":"Do Kafka Instances Support Cross-Subnet Access?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Do Kafka Instances Support Cross-Subnet Access?",
"githuburl":""
},
{
"uri":"kafka-faq-200426023.html",
- "product_code":"dms",
- "code":"87",
+ "node_id":"kafka-faq-200426023.xml",
+ "product_code":"kafka",
+ "code":"111",
"des":"No, Kerberos authentication is not supported. Kafka supports client authentication with SASL and API calling authentication using tokens and AK/SK.To access an instance i",
"doc_type":"usermanual",
"kw":"Does DMS for Kafka Support Authentication with Kerberos?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Does DMS for Kafka Support Authentication with Kerberos?",
"githuburl":""
},
{
"uri":"kafka-faq-200708002.html",
- "product_code":"dms",
- "code":"88",
- "des":"Yes. No password is required for accessing a Kafka instance with SASL disabled. For details, see Accessing a Kafka Instance Without SASL.",
+ "node_id":"kafka-faq-200708002.xml",
+ "product_code":"kafka",
+ "code":"112",
+ "des":"Yes. No password is required when accessing a Kafka instance in plaintext. For details, see Connecting to Kafka Using the Client (Plaintext Access).",
"doc_type":"usermanual",
"kw":"Does DMS for Kafka Support Password-Free Access?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Does DMS for Kafka Support Password-Free Access?",
"githuburl":""
},
- {
- "uri":"kafka-connect-other.html",
- "product_code":"dms",
- "code":"89",
- "des":"Kafka instances are fully compatible with open-source clients. You can obtain clients in other programming languages and access your instance as instructed by the officia",
- "doc_type":"usermanual",
- "kw":"Obtaining Kafka Clients,Connections,User Guide",
- "title":"Obtaining Kafka Clients",
- "githuburl":""
- },
{
"uri":"kafka-faq-0001.html",
- "product_code":"dms",
- "code":"90",
+ "node_id":"kafka-faq-0001.xml",
+ "product_code":"kafka",
+ "code":"113",
"des":"Click the name of your Kafka instance. In the Connection section on the Basic Information tab page, view Instance Address (Public Network).For details about how to connec",
"doc_type":"usermanual",
"kw":"How Do I Obtain the Public Access Address After Public Access Is Enabled?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"How Do I Obtain the Public Access Address After Public Access Is Enabled?",
"githuburl":""
},
{
"uri":"kafka-faq-0026.html",
- "product_code":"dms",
- "code":"91",
+ "node_id":"kafka-faq-0026.xml",
+ "product_code":"kafka",
+ "code":"114",
"des":"No.",
"doc_type":"usermanual",
"kw":"Does DMS for Kafka Support Authentication on Clients by the Server?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Does DMS for Kafka Support Authentication on Clients by the Server?",
"githuburl":""
},
{
"uri":"kafka-faq-0027.html",
- "product_code":"dms",
- "code":"92",
+ "node_id":"kafka-faq-0027.xml",
+ "product_code":"kafka",
+ "code":"115",
"des":"No. You can only use JKS certificates for connecting to instances in Java.",
"doc_type":"usermanual",
"kw":"Can I Use PEM SSL Truststore When Connecting to a Kafka Instance with SASL_SSL Enabled?,Connections,",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Can I Use PEM SSL Truststore When Connecting to a Kafka Instance with SASL_SSL Enabled?",
"githuburl":""
},
{
"uri":"kafka-faq-0028.html",
- "product_code":"dms",
- "code":"93",
+ "node_id":"kafka-faq-0028.xml",
+ "product_code":"kafka",
+ "code":"116",
"des":"JKS certificates are used for connecting to instances in Java and CRT certificates are used for connecting to instances in Python.",
"doc_type":"usermanual",
"kw":"What Are the Differences Between JKS and CRT Certificates?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"What Are the Differences Between JKS and CRT Certificates?",
"githuburl":""
},
{
"uri":"kafka-faq-0029.html",
- "product_code":"dms",
- "code":"94",
+ "node_id":"kafka-faq-0029.xml",
+ "product_code":"kafka",
+ "code":"117",
"des":"TLS 1.2.",
"doc_type":"usermanual",
"kw":"Which TLS Version Does DMS for Kafka Support?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Which TLS Version Does DMS for Kafka Support?",
"githuburl":""
},
{
"uri":"kafka-faq-0033.html",
- "product_code":"dms",
- "code":"95",
- "des":"Yes. The maximum allowed number of client connections varies by instance specifications.If the bandwidth is 100 MB/s, a maximum of 3000 client connections are allowed.If ",
+ "node_id":"kafka-faq-0033.xml",
+ "product_code":"kafka",
+ "code":"118",
+ "des":"Yes. The maximum allowed number of client connections varies by instance specifications.",
"doc_type":"usermanual",
"kw":"Is There a Limit on the Number of Client Connections to a Kafka Instance?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Is There a Limit on the Number of Client Connections to a Kafka Instance?",
"githuburl":""
},
{
"uri":"kafka-faq-0034.html",
- "product_code":"dms",
- "code":"96",
+ "node_id":"kafka-faq-0034.xml",
+ "product_code":"kafka",
+ "code":"119",
"des":"Each Kafka broker allows a maximum of 1000 connections from each IP address by default. Excess connections will be rejected. You can change the limit by referring to Modi",
"doc_type":"usermanual",
"kw":"How Many Connections Are Allowed from Each IP Address?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"How Many Connections Are Allowed from Each IP Address?",
"githuburl":""
},
+ {
+ "uri":"kafka_faq_0048.html",
+ "node_id":"kafka_faq_0048.xml",
+ "product_code":"kafka",
+ "code":"120",
+ "des":"No, and you cannot specify the IP addresses.",
+ "doc_type":"usermanual",
+ "kw":"Can I Change the Private Network Addresses of a Kafka Instance?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Can I Change the Private Network Addresses of a Kafka Instance?",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka_faq_0051.html",
+ "node_id":"kafka_faq_0051.xml",
+ "product_code":"kafka",
+ "code":"121",
+ "des":"Yes. All Kafka instances and users use the same SSL certificate.To obtain the SSL certificate, perform the following steps:",
+ "doc_type":"usermanual",
+ "kw":"Is the Same SSL Certificate Used for Different Instances?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Is the Same SSL Certificate Used for Different Instances?",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka_faq_0061.html",
+ "node_id":"kafka_faq_0061.xml",
+ "product_code":"kafka",
+ "code":"122",
+ "des":"If a Sarama client is used to send and receive messages, the following issues may occur:Sarama cannot detect partition changes. Adding topic partitions requires client re",
+ "doc_type":"usermanual",
+ "kw":"Why Is It Not Recommended to Use a Sarama Client for Messaging?,Connections,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Why Is It Not Recommended to Use a Sarama Client for Messaging?",
+ "githuburl":""
+ },
{
"uri":"kafka-faq-191030003.html",
- "product_code":"dms",
- "code":"97",
+ "node_id":"kafka-faq-191030003.xml",
+ "product_code":"kafka",
+ "code":"123",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Topics and Partitions",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Topics and Partitions",
"githuburl":""
},
{
"uri":"kafka-faq-200426024.html",
- "product_code":"dms",
- "code":"98",
+ "node_id":"kafka-faq-200426024.xml",
+ "product_code":"kafka",
+ "code":"124",
"des":"The number of topics is related to the total number of topic partitions and the number of partitions in each topic. There is an upper limit on the aggregate number of par",
"doc_type":"usermanual",
"kw":"Is There a Limit on the Number of Topics in a Kafka Instance?,Topics and Partitions,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Is There a Limit on the Number of Topics in a Kafka Instance?",
"githuburl":""
},
{
"uri":"kafka-faq-200426025.html",
- "product_code":"dms",
- "code":"99",
+ "node_id":"kafka-faq-200426025.xml",
+ "product_code":"kafka",
+ "code":"125",
"des":"Kafka manages messages by partition. If there are too many partitions, message creation, storage, and retrieval will be fragmented, affecting the performance and stabilit",
"doc_type":"usermanual",
"kw":"Why Is Partition Quantity Limited?,Topics and Partitions,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Why Is Partition Quantity Limited?",
"githuburl":""
},
{
"uri":"kafka-faq-200426101.html",
- "product_code":"dms",
- "code":"100",
+ "node_id":"kafka-faq-200426101.xml",
+ "product_code":"kafka",
+ "code":"126",
"des":"No. If you want to use fewer partitions, delete the corresponding topic, create another one, and specify the desired number of partitions.",
"doc_type":"usermanual",
- "kw":"Can I Change the Partition Quantity?,Topics and Partitions,User Guide",
- "title":"Can I Change the Partition Quantity?",
+ "kw":"Can I Reduce the Partition Quantity?,Topics and Partitions,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Can I Reduce the Partition Quantity?",
"githuburl":""
},
{
"uri":"kafka-faq-200426026.html",
- "product_code":"dms",
- "code":"101",
- "des":"Possible cause: The aggregate number of partitions of created topics has reached the upper limit. The maximum number of partitions varies with instance specifications. Fo",
+ "node_id":"kafka-faq-200426026.xml",
+ "product_code":"kafka",
+ "code":"127",
+ "des":"Possible cause: The aggregate number of partitions of created topics has reached the upper limit. The upper limit on partitions varies by instance specifications. For det",
"doc_type":"usermanual",
"kw":"Why Do I Fail to Create Topics?,Topics and Partitions,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Why Do I Fail to Create Topics?",
"githuburl":""
},
{
"uri":"kafka-faq-200426027.html",
- "product_code":"dms",
- "code":"102",
+ "node_id":"kafka-faq-200426027.xml",
+ "product_code":"kafka",
+ "code":"128",
"des":"Automatic topic creation is supported, but batch topic import is not supported. You can only export topics in batches.Enable automatic topic creation using one of the fol",
"doc_type":"usermanual",
"kw":"Do Kafka Instances Support Batch Importing Topics or Automatic Topic Creation?,Topics and Partitions",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Do Kafka Instances Support Batch Importing Topics or Automatic Topic Creation?",
"githuburl":""
},
{
"uri":"kafka-faq-200426028.html",
- "product_code":"dms",
- "code":"103",
- "des":"This may be because automatic topic creation has been enabled and a consumer is connecting to the topic. If no existing topics are available for message creation, new top",
+ "node_id":"kafka-faq-200426028.xml",
+ "product_code":"kafka",
+ "code":"129",
+ "des":"Possible cause: Automatic topic creation has been enabled and a consumer is connecting to the topic. If no existing topics are available for message creation, new topics ",
"doc_type":"usermanual",
"kw":"Why Do Deleted Topics Still Exist?,Topics and Partitions,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Why Do Deleted Topics Still Exist?",
"githuburl":""
},
{
"uri":"kafka-faq-200426030.html",
- "product_code":"dms",
- "code":"104",
- "des":"Yes. Use either of the following methods to check the disk space used by a topic:Click next to the Kafka instance name to go to the Cloud Eye console. On the Queues tab ",
+ "node_id":"kafka-faq-200426030.xml",
+ "product_code":"kafka",
+ "code":"130",
+ "des":"Yes. Use either of the following methods to check the disk space used by a topic:In the row containing the desired Kafka instance, click View Metric to go to the Cloud Ey",
"doc_type":"usermanual",
"kw":"Can I View the Disk Space Used by a Topic?,Topics and Partitions,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Can I View the Disk Space Used by a Topic?",
"githuburl":""
},
{
"uri":"kafka-faq-200426032.html",
- "product_code":"dms",
- "code":"105",
+ "node_id":"kafka-faq-200426032.xml",
+ "product_code":"kafka",
+ "code":"131",
"des":"If you have enabled SASL_SSL for your Kafka instance, you can configure ACL permissions for your topics. On the Topics tab page of the Kafka console, click Grant User Per",
"doc_type":"usermanual",
"kw":"Can I Add ACL Permissions for Topics?,Topics and Partitions,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Can I Add ACL Permissions for Topics?",
"githuburl":""
},
{
"uri":"kafka-faq-0003.html",
- "product_code":"dms",
- "code":"106",
- "des":"Messages are not deleted immediately after being retrieved. They are deleted only when the aging time expires.You can shorten the aging time.",
+ "node_id":"kafka-faq-0003.xml",
+ "product_code":"kafka",
+ "code":"132",
+ "des":"Messages are not deleted immediately after being retrieved. They are deleted only when the aging time expires.You can shorten the aging time or expand the storage space.",
"doc_type":"usermanual",
"kw":"What Should I Do If Kafka Storage Space Is Used Up Because Retrieved Messages Are Not Deleted?,Topic",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"What Should I Do If Kafka Storage Space Is Used Up Because Retrieved Messages Are Not Deleted?",
"githuburl":""
},
+ {
+ "uri":"kafka-faq-0005.html",
+ "node_id":"kafka-faq-0005.xml",
+ "product_code":"kafka",
+ "code":"133",
+ "des":"You can increase the partition quantity by adding brokers.To do so, go to the Kafka console, locate the row that contains the desired instance, and choose More > Modify S",
+ "doc_type":"usermanual",
+ "kw":"How Do I Increase the Partition Quantity?,Topics and Partitions,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"How Do I Increase the Partition Quantity?",
+ "githuburl":""
+ },
{
"uri":"kafka-faq-0010.html",
- "product_code":"dms",
- "code":"107",
- "des":"Yes. A Kafka instance will be restarted if you enable or disable automatic topic creation for it.",
+ "node_id":"kafka-faq-0010.xml",
+ "product_code":"kafka",
+ "code":"134",
+ "des":"Enabling or disabling automatic topic creation may cause instance restarts. For details, see the information displayed on the Kafka console.",
"doc_type":"usermanual",
"kw":"Will a Kafka Instance Be Restarted After Its Automatic Topic Creation Setting Is Modified?,Topics an",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Will a Kafka Instance Be Restarted After Its Automatic Topic Creation Setting Is Modified?",
"githuburl":""
},
- {
- "uri":"kafka-faq-0014.html",
- "product_code":"dms",
- "code":"108",
- "des":"On the Kafka console, click the name of your instance.In the Instance Information section of the Basic Information tab page, click next to Automatic Topic Creation to di",
- "doc_type":"usermanual",
- "kw":"How Do I Disable Automatic Topic Creation?,Topics and Partitions,User Guide",
- "title":"How Do I Disable Automatic Topic Creation?",
- "githuburl":""
- },
{
"uri":"kafka-faq-0031.html",
- "product_code":"dms",
- "code":"109",
- "des":"Yes, just simply unsubscribe from it on the Kafka client.",
+ "node_id":"kafka-faq-0031.xml",
+ "product_code":"kafka",
+ "code":"135",
+ "des":"Just simply unsubscribe from them on the Kafka client.",
"doc_type":"usermanual",
"kw":"Can I Delete Unnecessary Topics in a Consumer Group?,Topics and Partitions,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Can I Delete Unnecessary Topics in a Consumer Group?",
"githuburl":""
},
{
"uri":"kafka-faq-0038.html",
- "product_code":"dms",
- "code":"110",
+ "node_id":"kafka-faq-0038.xml",
+ "product_code":"kafka",
+ "code":"136",
"des":"Symptom: Different consumers in a consumer group have different topic permissions. When a consumer attempts to retrieve messages from a topic, the error message \"Not auth",
"doc_type":"usermanual",
"kw":"What Can I Do If a Consumer Fails to Retrieve Messages from a Topic Due to Insufficient Permissions?",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"What Can I Do If a Consumer Fails to Retrieve Messages from a Topic Due to Insufficient Permissions?",
"githuburl":""
},
{
"uri":"kafka-faq-200423001.html",
- "product_code":"dms",
- "code":"111",
+ "node_id":"kafka-faq-200423001.xml",
+ "product_code":"kafka",
+ "code":"137",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Consumer Groups",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Consumer Groups",
"githuburl":""
},
{
"uri":"kafka-faq-200426033.html",
- "product_code":"dms",
- "code":"112",
- "des":"No. They are generated automatically when you use the instance.For details about creating and retrieving messages after connecting to a Kafka instance, see Accessing a Ka",
+ "node_id":"kafka-faq-200426033.xml",
+ "product_code":"kafka",
+ "code":"138",
+ "des":"When parameter auto.create.groups.enable is set to true, you do not need to create a consumer group, producer, or consumer because they are generated automatically when y",
"doc_type":"usermanual",
"kw":"Do I Need to Create Consumer Groups, Producers, and Consumers for Kafka Instances?,Consumer Groups,U",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Do I Need to Create Consumer Groups, Producers, and Consumers for Kafka Instances?",
"githuburl":""
},
- {
- "uri":"kafka-faq-0032.html",
- "product_code":"dms",
- "code":"113",
- "des":"No. You can directly delete the consumer group.",
- "doc_type":"usermanual",
- "kw":"Do I Need to Unsubscribe from a Topic Before Deleting a Consumer Group?,Consumer Groups,User Guide",
- "title":"Do I Need to Unsubscribe from a Topic Before Deleting a Consumer Group?",
- "githuburl":""
- },
{
"uri":"kafka-faq-0043.html",
- "product_code":"dms",
- "code":"114",
- "des":"Yes.Kafka uses the offsets.retention.minutes parameter to control how long to keep offsets for a consumer group. If offsets are not committed within this period, they wil",
+ "node_id":"kafka-faq-0043.xml",
+ "product_code":"kafka",
+ "code":"139",
+ "des":"This depends on the offsets.retention.minutes and auto.create.groups.enable parameters.For instances created much earlier, auto.create.groups.enable is set to true by def",
"doc_type":"usermanual",
"kw":"Will a Consumer Group Without Active Consumers Be Automatically Deleted in 14 Days?,Consumer Groups,",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Will a Consumer Group Without Active Consumers Be Automatically Deleted in 14 Days?",
"githuburl":""
},
+ {
+ "uri":"kafka_faq_0059.html",
+ "node_id":"kafka_faq_0059.xml",
+ "product_code":"kafka",
+ "code":"140",
+ "des":"Possible cause: Automatic consumer group creation has been enabled and your service is connected to the consumer group and consuming messages. Therefore, the consumer gro",
+ "doc_type":"usermanual",
+ "kw":"Why Does a Deleted Consumer Group Still Exist?,Consumer Groups,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Why Does a Deleted Consumer Group Still Exist?",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka_faq_0060.html",
+ "node_id":"kafka_faq_0060.xml",
+ "product_code":"kafka",
+ "code":"141",
+ "des":"Check whether Flink is used for consumption. Flink uses the assign mode and the client assigns specific partitions to be consumed, so you cannot see any consumer on the K",
+ "doc_type":"usermanual",
+ "kw":"Why Can't I View Consumers When Instance Consumption Is Normal?,Consumer Groups,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Why Can't I View Consumers When Instance Consumption Is Normal?",
+ "githuburl":""
+ },
{
"uri":"kafka-faq-190416001.html",
- "product_code":"dms",
- "code":"115",
+ "node_id":"kafka-faq-190416001.xml",
+ "product_code":"kafka",
+ "code":"142",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Messages",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Messages",
"githuburl":""
},
{
"uri":"kafka-faq-200426035.html",
- "product_code":"dms",
- "code":"116",
+ "node_id":"kafka-faq-200426035.xml",
+ "product_code":"kafka",
+ "code":"143",
"des":"10 MB.",
"doc_type":"usermanual",
"kw":"What Is the Maximum Size of a Message that Can be Created?,Messages,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"What Is the Maximum Size of a Message that Can be Created?",
"githuburl":""
},
{
"uri":"kafka-faq-200426036.html",
- "product_code":"dms",
- "code":"117",
+ "node_id":"kafka-faq-200426036.xml",
+ "product_code":"kafka",
+ "code":"144",
"des":"Rebalancing is a process where partitions of topics are re-allocated for a consumer group.In normal cases, rebalancing occurs inevitably when a consumer is added to or re",
"doc_type":"usermanual",
"kw":"Why Does Message Poll Often Fail During Rebalancing?,Messages,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Why Does Message Poll Often Fail During Rebalancing?",
"githuburl":""
},
{
"uri":"kafka-faq-200426037.html",
- "product_code":"dms",
- "code":"118",
+ "node_id":"kafka-faq-200426037.xml",
+ "product_code":"kafka",
+ "code":"145",
"des":"Possible cause 1: The message has been aged.Solution: Change the aging time.Solution: Change the aging time.Possible cause 2: The createTime timestamp of the message is i",
"doc_type":"usermanual",
"kw":"Why Can't I Query Messages on the Console?,Messages,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Why Can't I Query Messages on the Console?",
"githuburl":""
},
+ {
+ "uri":"kafka-faq-200426100.html",
+ "node_id":"kafka-faq-200426100.xml",
+ "product_code":"kafka",
+ "code":"146",
+ "des":"Symptom: An alarm is generated for the Accumulated Messages metric.Solution:Log in to the Kafka console and click the instance for which the alarm is generated. The insta",
+ "doc_type":"usermanual",
+ "kw":"What Can I Do If Kafka Messages Are Accumulated?,Messages,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"What Can I Do If Kafka Messages Are Accumulated?",
+ "githuburl":""
+ },
{
"uri":"kafka-faq-200708001.html",
- "product_code":"dms",
- "code":"119",
+ "node_id":"kafka-faq-200708001.xml",
+ "product_code":"kafka",
+ "code":"147",
"des":"If the aging time has been set for a topic, the value of the log.retention.hours parameter does not take effect for the topic. The value of the log.retention.hours parame",
"doc_type":"usermanual",
"kw":"Why Do Messages Still Exist After the Retention Period Elapses?,Messages,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Why Do Messages Still Exist After the Retention Period Elapses?",
"githuburl":""
},
{
"uri":"kafka-faq-0018.html",
- "product_code":"dms",
- "code":"120",
+ "node_id":"kafka-faq-0018.xml",
+ "product_code":"kafka",
+ "code":"148",
"des":"No.",
"doc_type":"usermanual",
"kw":"Do Kafka Instances Support Delayed Message Delivery?,Messages,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Do Kafka Instances Support Delayed Message Delivery?",
"githuburl":""
},
{
"uri":"kafka-faq-0041.html",
- "product_code":"dms",
- "code":"121",
+ "node_id":"kafka-faq-0041.xml",
+ "product_code":"kafka",
+ "code":"149",
"des":"View the number of accumulated messages using any of the following methods:On the Consumer Groups page of an instance, click the name of the consumer group whose accumula",
"doc_type":"usermanual",
"kw":"How Do I View the Number of Accumulated Messages?,Messages,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"How Do I View the Number of Accumulated Messages?",
"githuburl":""
},
{
"uri":"kafka-faq-0045.html",
- "product_code":"dms",
- "code":"122",
+ "node_id":"kafka-faq-0045.xml",
+ "product_code":"kafka",
+ "code":"150",
"des":"The message creation time is specified by CreateTime when a producer creates messages. If this parameter is not set during message creation, the message creation time is ",
"doc_type":"usermanual",
"kw":"Why Is the Message Creation Time Displayed as Year 1970?,Messages,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Why Is the Message Creation Time Displayed as Year 1970?",
"githuburl":""
},
+ {
+ "uri":"kafka_faq_0058.html",
+ "node_id":"kafka_faq_0058.xml",
+ "product_code":"kafka",
+ "code":"151",
+ "des":"message.max.bytes can be modified on the Parameters page on the console. For details, see Modifying Kafka Instance Configuration Parameters.The maximum value of message.m",
+ "doc_type":"usermanual",
+ "kw":"How Do I Modify message.max.bytes?,Messages,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"How Do I Modify message.max.bytes?",
+ "githuburl":""
+ },
{
"uri":"kafka-faq-191030004.html",
- "product_code":"dms",
- "code":"123",
+ "node_id":"kafka-faq-191030004.xml",
+ "product_code":"kafka",
+ "code":"152",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Monitoring & Alarm",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Monitoring & Alarm",
"githuburl":""
},
{
"uri":"kafka-faq-200426041.html",
- "product_code":"dms",
- "code":"124",
- "des":"The possible causes are as follows:The topic name starts with a special character, such as an underscore (_) or a number sign (#).The consumer group name starts with a sp",
+ "node_id":"kafka-faq-200426041.xml",
+ "product_code":"kafka",
+ "code":"153",
+ "des":"If topic monitoring data is not displayed, the possible causes are as follows:The topic name starts with a special character, such as an underscore (_) or a number sign (",
"doc_type":"usermanual",
"kw":"Why Can't I View the Monitoring Data?,Monitoring & Alarm,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Why Can't I View the Monitoring Data?",
"githuburl":""
},
{
"uri":"kafka-faq-0007.html",
- "product_code":"dms",
- "code":"125",
+ "node_id":"kafka-faq-0007.xml",
+ "product_code":"kafka",
+ "code":"154",
"des":"Symptom: The monitoring data shows that there are 810 million accumulated messages. However, the Kafka console shows that there are 100 million messages in all six topics",
"doc_type":"usermanual",
"kw":"Why Is the Monitored Number of Accumulated Messages Inconsistent with the Message Quantity Displayed",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Why Is the Monitored Number of Accumulated Messages Inconsistent with the Message Quantity Displayed on the Kafka Console?",
"githuburl":""
},
{
"uri":"kafka-faq-0022.html",
- "product_code":"dms",
- "code":"126",
+ "node_id":"kafka-faq-0022.xml",
+ "product_code":"kafka",
+ "code":"155",
"des":"The monitoring data is reported every minute. The reported data will be displayed on the monitoring page after being sorted. This process takes less than 20 minutes. Afte",
"doc_type":"usermanual",
"kw":"Why Is a Consumer Group Still on the Monitoring Page After Being Deleted?,Monitoring & Alarm,User Gu",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Why Is a Consumer Group Still on the Monitoring Page After Being Deleted?",
"githuburl":""
},
+ {
+ "uri":"kafka-ug-0723006.html",
+ "node_id":"kafka-ug-0723006.xml",
+ "product_code":"kafka",
+ "code":"156",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Troubleshooting",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Troubleshooting",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-faq-0604001.html",
+ "node_id":"kafka-faq-0604001.xml",
+ "product_code":"kafka",
+ "code":"157",
+ "des":"This section describes how to troubleshoot Kafka connection problems.If the connection to a Kafka instance is abnormal, perform the following operations to troubleshoot t",
+ "doc_type":"usermanual",
+ "kw":"Troubleshooting Kafka Connection Exceptions,Troubleshooting,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Troubleshooting Kafka Connection Exceptions",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-trouble-0709001.html",
+ "node_id":"kafka-trouble-0709001.xml",
+ "product_code":"kafka",
+ "code":"158",
+ "des":"The duration from message creation to retrieval occasionally reaches 6 minutes, which is not tolerable to services.Service requests are stacked and cannot be processed in",
+ "doc_type":"usermanual",
+ "kw":"Troubleshooting 6-Min Latency Between Message Creation and Retrieval,Troubleshooting,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Troubleshooting 6-Min Latency Between Message Creation and Retrieval",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-trouble-0001.html",
+ "node_id":"kafka-trouble-0001.xml",
+ "product_code":"kafka",
+ "code":"159",
+ "des":"The system displays the error message \"Disk error when trying to access log file on the disk\".The disk usage of the broker is too high.Expand the disk space by referring ",
+ "doc_type":"usermanual",
+ "kw":"Troubleshooting Message Creation Failures,Troubleshooting,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Troubleshooting Message Creation Failures",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-trouble-0002.html",
+ "node_id":"kafka-trouble-0002.xml",
+ "product_code":"kafka",
+ "code":"160",
+ "des":"A deleted topic still exists.Automatic topic creation has been enabled for the instance, and a consumer is connecting to the topic. If services are not stopped, message c",
+ "doc_type":"usermanual",
+ "kw":"Troubleshooting Topic Deletion Failures,Troubleshooting,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Troubleshooting Topic Deletion Failures",
+ "githuburl":""
+ },
+ {
+ "uri":"kafka-trouble-0004.html",
+ "node_id":"kafka-trouble-0004.xml",
+ "product_code":"kafka",
+ "code":"161",
+ "des":"For a Kafka instance deployed in multiple AZs, if one of the AZs is faulty, error message \"Topic {{topic_name}} not present in metadata after 60000 ms\" may be reported on",
+ "doc_type":"usermanual",
+ "kw":"Troubleshooting Error \"Topic {{topic_name}} not present in metadata after 60000 ms\" During Message P",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
+ "title":"Troubleshooting Error \"Topic {{topic_name}} not present in metadata after 60000 ms\" During Message Production or Consumption",
+ "githuburl":""
+ },
{
"uri":"kafka-ug-00001.html",
- "product_code":"dms",
- "code":"127",
+ "node_id":"kafka-ug-00001.xml",
+ "product_code":"kafka",
+ "code":"162",
"des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"doc_type":"usermanual",
"kw":"Change History,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "prodname":"kafka",
+ "documenttype":"usermanual"
+ }
+ ],
"title":"Change History",
"githuburl":""
},
{
- "uri":"dms-ug-0312114.html",
+ "uri":"kafka-ug-00002.html",
+ "node_id":"kafka-ug-00002.xml",
"product_code":"dms",
- "code":"128",
+ "code":"163",
+ "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "doc_type":"usermanual",
+ "kw":"Acronyms and Abbreviations,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "documenttype":"usermanual",
+ "prodname":"dms"
+ }
+ ],
+ "title":"Acronyms and Abbreviations",
+ "githuburl":""
+ },
+ {
+ "uri":"dms-ug-0312114.html",
+ "node_id":"dms-ug-0312114.xml",
+ "product_code":"dms",
+ "code":"164",
"des":"See Glossary.",
"doc_type":"usermanual",
"kw":"Glossary,User Guide",
+ "search_title":"",
+ "metedata":[
+ {
+ "documenttype":"usermanual",
+ "prodname":"dms"
+ }
+ ],
"title":"Glossary",
"githuburl":""
}
diff --git a/docs/dms/umn/CLASS.TXT.json b/docs/dms/umn/CLASS.TXT.json
index bf94028e..9b4143ba 100644
--- a/docs/dms/umn/CLASS.TXT.json
+++ b/docs/dms/umn/CLASS.TXT.json
@@ -36,7 +36,7 @@
"code":"4"
},
{
- "desc":"Kafka instances are compatible with open-source Kafka 1.1.0, 2.3.0, and 2.7. The instance specifications are classified based on bandwidth, namely, 100 MB/s, 300 MB/s, 60",
+ "desc":"Kafka instances are compatible with open-source Kafka v1.1.0, v2.3.0, v2.7, and v3.x. Kafka instances are classified into cluster and single-node types. A cluster instanc",
"product_code":"dms",
"title":"Specifications",
"uri":"Kafka-specification.html",
@@ -44,6 +44,15 @@
"p_code":"1",
"code":"5"
},
+ {
+ "desc":"A single-node Kafka instance has only one broker. These instances do not guarantee performance or reliability and are for trial use or testing only. In the production env",
+ "product_code":"dms",
+ "title":"Comparing Single-node and Cluster Kafka Instances",
+ "uri":"kafka-pd-0052.html",
+ "doc_type":"usermanual",
+ "p_code":"1",
+ "code":"6"
+ },
{
"desc":"DMS is compatible with open-source Kafka and has customized and enhanced Kafka features. In addition to the advantages of open-source Kafka, DMS for Kafka provides more r",
"product_code":"dms",
@@ -51,7 +60,7 @@
"uri":"kafka-pd-200720001.html",
"doc_type":"usermanual",
"p_code":"1",
- "code":"6"
+ "code":"7"
},
{
"desc":"This section describes the notes and constraints on DMS.",
@@ -60,7 +69,7 @@
"uri":"kafka-pd-190605003.html",
"doc_type":"usermanual",
"p_code":"1",
- "code":"7"
+ "code":"8"
},
{
"desc":"Cloud Trace Service (CTS)CTS generates traces to provide you with a history of operations performed on cloud service resources. The traces include operation requests sent",
@@ -69,7 +78,7 @@
"uri":"kafka-pd-190605002.html",
"doc_type":"usermanual",
"p_code":"1",
- "code":"8"
+ "code":"9"
},
{
"desc":"DMS for Kafka of the cloud service platform uses Kafka as the message engine. This chapter presents explanations of basic concepts of Kafka.A topic is a category for mess",
@@ -78,7 +87,7 @@
"uri":"glossary-kafka.html",
"doc_type":"usermanual",
"p_code":"1",
- "code":"9"
+ "code":"10"
},
{
"desc":"This section provides recommendations on configuring common parameters for Kafka producers and consumers.",
@@ -87,242 +96,296 @@
"uri":"dms-ug-001.html",
"doc_type":"usermanual",
"p_code":"1",
- "code":"10"
- },
- {
- "desc":"By default, there are two types of user permissions: user management and resource management.User management refers to the management of users, user groups, and user grou",
- "product_code":"dms",
- "title":"Permissions",
- "uri":"dms-ug-190128001.html",
- "doc_type":"usermanual",
- "p_code":"1",
"code":"11"
},
{
- "desc":"Before creating a Kafka instance, ensure the availability of resources, including a virtual private cloud (VPC), subnet, security group, and security group rules. Each Ka",
+ "desc":"You can use Identity and Access Management (IAM) to manage DMS for Kafka permissions and control access to your resources. IAM provides identity authentication, permissio",
"product_code":"dms",
- "title":"Preparing Required Resources",
- "uri":"kafka-ug-180604012.html",
+ "title":"Permission",
+ "uri":"ProductDescPrivilegeManagement.html",
"doc_type":"usermanual",
- "p_code":"",
+ "p_code":"1",
"code":"12"
},
{
- "desc":"Kafka instances are physically isolated and exclusively occupied by each tenant. You can customize the computing capabilities and storage space of an instance based on se",
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"dms",
- "title":"Creating an Instance",
- "uri":"dms-ug-180604013.html",
+ "title":"Getting Started",
+ "uri":"kafka-ug-00003.html",
"doc_type":"usermanual",
"p_code":"",
"code":"13"
},
{
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "desc":"This section takes the example of creating a Kafka 2.7 instance (ciphertext access and SASL_SSL) and accessing it on the client (private network, within a virtual private",
"product_code":"dms",
- "title":"Accessing a Kafka Instance",
- "uri":"kafka-ug190605003.html",
+ "title":"Getting Started with Kafka for Message Production and Consumption",
+ "uri":"kafka-qs-0409001.html",
"doc_type":"usermanual",
- "p_code":"",
+ "p_code":"13",
"code":"14"
},
{
- "desc":"This section describes how to use an open-source Kafka client to access a Kafka instance if SASL access is not enabled for the instance. There are two scenarios. For cros",
+ "desc":"The following figure shows the process of using a Kafka instance to produce and consume messages.Creating a User and Granting DMS for Kafka PermissionsCreate IAM users an",
"product_code":"dms",
- "title":"Accessing a Kafka Instance Without SASL",
- "uri":"kafka-ug-180604020.html",
+ "title":"Process of Using Kafka",
+ "uri":"kafka-ug-0069.html",
"doc_type":"usermanual",
- "p_code":"14",
+ "p_code":"",
"code":"15"
},
{
- "desc":"If you enable SASL_SSL when creating an instance, data will be encrypted before transmission for enhanced security.For security purposes, TLS_ECDHE_RSA_WITH_AES_128_GCM_S",
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"dms",
- "title":"Accessing a Kafka Instance with SASL",
- "uri":"kafka-ug-180801001.html",
+ "title":"Permission Management",
+ "uri":"UserPrivilegeManagement.html",
"doc_type":"usermanual",
- "p_code":"14",
+ "p_code":"",
"code":"16"
},
{
- "desc":"VPCs are logically isolated from each other. If a Kafka instance and a Kafka client are in different VPCs within a region, they cannot communicate with each other. In thi",
+ "desc":"This section describes how to use Identity and Access Management (IAM) for fine-grained permissions control for your Distributed Message Service (DMS) for Kafka resources",
"product_code":"dms",
- "title":"Cross-VPC Access to a Kafka Instance",
- "uri":"kafka-ug-0001.html",
+ "title":"Creating a User and Granting DMS for Kafka Permissions",
+ "uri":"CreateUserAndGrantPolicy.html",
"doc_type":"usermanual",
- "p_code":"14",
+ "p_code":"16",
"code":"17"
},
{
- "desc":"You can use destination NAT (DNAT) to access a Kafka instance so that the instance can provide services on the public network through port mapping.You have created EIPs. ",
+ "desc":"Kafka instances are tenant-exclusive, and physically isolated in deployment. You can customize the computing capabilities and storage space of a Kafka instance as require",
"product_code":"dms",
- "title":"Using DNAT to Access a Kafka Instance",
- "uri":"kafka-dnat.html",
+ "title":"Creating a Kafka Instance",
+ "uri":"kafka-ug-180604013.html",
"doc_type":"usermanual",
- "p_code":"14",
+ "p_code":"",
"code":"18"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"dms",
- "title":"Managing Instances",
- "uri":"kafka-ug-180604011.html",
+ "title":"Configuring Topics",
+ "uri":"kafka-ug-0720001.html",
"doc_type":"usermanual",
"p_code":"",
"code":"19"
},
{
- "desc":"View detailed information about a Kafka instance on the DMS console, for example, the IP addresses and port numbers for accessing the instance.Select the region where you",
+ "desc":"Topics store messages created by producers and subscribed by consumers. If automatic topic creation is not enabled during Kafka instance creation, you need to manually cr",
"product_code":"dms",
- "title":"Viewing an Instance",
- "uri":"kafka-ug-180604014.html",
+ "title":"Creating a Kafka Topic",
+ "uri":"kafka-ug-180604018.html",
"doc_type":"usermanual",
"p_code":"19",
"code":"20"
},
{
- "desc":"Restart one or more Kafka instances at a time on the DMS console.When a Kafka instance is being restarted, message retrieval and creation requests of clients will be reje",
+ "desc":"DMS supports access control list (ACL) for topics. You can differentiate user permissions by granting users different permissions in a topic.This section describes how to",
"product_code":"dms",
- "title":"Restarting an Instance",
- "uri":"kafka-ug-180604015.html",
+ "title":"Configuring Kafka Topic Permissions",
+ "uri":"kafka-ug-0002.html",
"doc_type":"usermanual",
"p_code":"19",
"code":"21"
},
{
- "desc":"On the DMS console, you can delete one or more Kafka instances that have been created or failed to be created.Deleting a Kafka instance will delete the data in the instan",
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"dms",
- "title":"Deleting an Instance",
- "uri":"kafka-ug-180604016.html",
+ "title":"Managing Topics",
+ "uri":"kafka-ug-0058.html",
"doc_type":"usermanual",
"p_code":"19",
"code":"22"
},
{
- "desc":"After creating a Kafka instance, you can modify some parameters of the instance based on service requirements, including the instance name, description, security group, a",
+ "desc":"On the console, you can view the details of a Kafka instance including subscriptions to a topic, offsets and number of messages in each partition, and producer addresses.",
"product_code":"dms",
- "title":"Modifying the Information About an Instance",
- "uri":"kafka-ug-180604017.html",
+ "title":"Viewing Kafka Topic Details",
+ "uri":"kafka_ug_0045.html",
"doc_type":"usermanual",
- "p_code":"19",
+ "p_code":"22",
"code":"23"
},
{
- "desc":"To access a Kafka instance over a public network, enable public access and configure EIPs for the instance.If you no longer need public access to the instance, you can di",
+ "desc":"This section describes how to modify following configurations of a Kafka topic on the console.Modifying Synchronous Replication, Synchronous Flushing, Message Timestamp, ",
"product_code":"dms",
- "title":"Configuring Public Access",
- "uri":"kafka-ug-0319001.html",
+ "title":"Modifying Kafka Topic Configurations",
+ "uri":"kafka-ug-0038.html",
"doc_type":"usermanual",
- "p_code":"19",
+ "p_code":"22",
"code":"24"
},
{
- "desc":"You can reset the SASL_SSL password for accessing a Kafka instance by resetting Kafka password if you forget it.You can reset the Kafka password only if Kafka SASL_SSL ha",
+ "desc":"After creating a topic, you can increase the number of partitions as required.Changing the number of partitions does not restart the instance or affect services.Methods f",
"product_code":"dms",
- "title":"Resetting Kafka Password",
- "uri":"kafka-ug-180718001.html",
+ "title":"Changing Kafka Partition Quantity",
+ "uri":"kafka-ug-0006.html",
"doc_type":"usermanual",
- "p_code":"19",
+ "p_code":"22",
"code":"25"
},
{
- "desc":"Tags facilitate Kafka instance identification and management.You can add tags to a Kafka instance when creating the instance or add tags on the Tags tab page of the creat",
+ "desc":"Export the topic list on the console. Batch export is supported.A topic has been created.Select the region where your Kafka instance is located.The topic list contains th",
"product_code":"dms",
- "title":"Managing Instance Tags",
- "uri":"TagManagement.html",
+ "title":"Exporting the Kafka Topic List",
+ "uri":"kafka_ug_0027.html",
"doc_type":"usermanual",
- "p_code":"19",
+ "p_code":"22",
"code":"26"
},
{
- "desc":"After you initiate certain instance operations such as configuring public access and modifying the capacity threshold policy, a background task will start for each operat",
+ "desc":"Partition reassignment is to reassign replicas of a partition to different brokers to solve the problem of unbalanced broker load.Partition reassignment is required in th",
"product_code":"dms",
- "title":"Viewing Background Tasks",
- "uri":"kafka-ug-200119002.html",
+ "title":"Reassigning Kafka Partitions",
+ "uri":"kafka_ug_0023.html",
"doc_type":"usermanual",
- "p_code":"19",
+ "p_code":"22",
"code":"27"
},
{
- "desc":"On the Kafka console, you can view the disk usage of each broker.Select the region where your Kafka instance is located.You can query topics that use the most disk space ",
+ "desc":"Automatic topic creation: A topic will be automatically created when a message is produced in or consumed from a topic that does not exist. By default, the topic has para",
"product_code":"dms",
- "title":"Viewing Disk Usage",
- "uri":"kafka-ug-0004.html",
+ "title":"Configuring Automatic Topic Creation",
+ "uri":"kafka_ug_0043.html",
"doc_type":"usermanual",
- "p_code":"19",
+ "p_code":"22",
"code":"28"
},
+ {
+ "desc":"Delete a topic using either of the following methods:Deleting a Kafka Topic (Console)Deleting a Kafka Topic on the ClientA Kafka instance has been created, and a topic ha",
+ "product_code":"dms",
+ "title":"Deleting a Kafka Topic",
+ "uri":"kafka-ug-180604019.html",
+ "doc_type":"usermanual",
+ "p_code":"22",
+ "code":"29"
+ },
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"dms",
- "title":"Managing Topics",
- "uri":"kafka-ug-0720001.html",
+ "title":"Connecting to an Instance",
+ "uri":"kafka-ug190605003.html",
"doc_type":"usermanual",
"p_code":"",
- "code":"29"
- },
- {
- "desc":"A topic is a stream of messages. If automatic topic creation is not enabled during Kafka instance creation, you need to manually create topics for creating and retrieving",
- "product_code":"dms",
- "title":"Creating a Topic",
- "uri":"dms-ug-180604018.html",
- "doc_type":"usermanual",
- "p_code":"29",
"code":"30"
},
{
- "desc":"Delete a topic using either of the following methods:By using the consoleBy using Kafka CLIA Kafka instance has been created, and a topic has been created in this instanc",
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"dms",
- "title":"Deleting a Topic",
- "uri":"kafka-ug-180604019.html",
+ "title":"Configuring Kafka Network Connections",
+ "uri":"kafka-ug-0059.html",
"doc_type":"usermanual",
- "p_code":"29",
+ "p_code":"30",
"code":"31"
},
{
- "desc":"Aging time is a period that messages in the topic are retained for. Consumers must retrieve messages before this period ends. Otherwise, the messages will be deleted and ",
+ "desc":"A client can connect to a Kafka instance in public or private networks. Notes before using a private network:By default, a client and a Kafka instance are interconnected ",
"product_code":"dms",
- "title":"Modifying Topic Aging Time",
- "uri":"kafka-ug-200506001.html",
+ "title":"Kafka Network Connection Conditions",
+ "uri":"kafka-ug-180604012.html",
"doc_type":"usermanual",
- "p_code":"29",
+ "p_code":"31",
"code":"32"
},
{
- "desc":"After creating a topic, you can increase the number of partitions based on service requirements.Changing the number of partitions does not affect services.Methods for cha",
+ "desc":"To access a Kafka instance over a public network, enable public access and configure EIPs for the instance.If you no longer need public access to the instance, you can di",
"product_code":"dms",
- "title":"Changing Partition Quantity",
- "uri":"kafka-ug-0006.html",
+ "title":"Configuring Kafka Public Access",
+ "uri":"kafka-ug-0319001.html",
"doc_type":"usermanual",
- "p_code":"29",
+ "p_code":"31",
"code":"33"
},
{
- "desc":"Synchronous replication: A message is returned to the client only after the message creation request has been received and the message has been acknowledged by all replic",
+ "desc":"VPCs are logically isolated from each other. If a Kafka instance and a Kafka client are in different VPCs within a region, they cannot communicate with each other. In thi",
"product_code":"dms",
- "title":"Modifying Synchronous Replication and Flushing Settings",
- "uri":"kafka_ug_0022.html",
+ "title":"Accessing Kafka Using a VPC Endpoint Across VPCs",
+ "uri":"kafka-ug-0001.html",
"doc_type":"usermanual",
- "p_code":"29",
+ "p_code":"31",
"code":"34"
},
{
- "desc":"On the console, view sample code for creating and retrieving messages in Java, Go, and Python.Select the region where your Kafka instance is located.View sample code for ",
+ "desc":"You can use destination NAT (DNAT) to access a Kafka instance so that the instance can provide services on the public network through port mapping.You have created EIPs. ",
"product_code":"dms",
- "title":"Viewing Sample Code",
- "uri":"kafka_ug_0024.html",
+ "title":"Accessing Kafka in a Public Network Using DNAT",
+ "uri":"kafka-dnat.html",
"doc_type":"usermanual",
- "p_code":"29",
+ "p_code":"31",
"code":"35"
},
{
- "desc":"Export topics on the console. Batch export is supported.A topic has been created.Select the region where your Kafka instance is located.The topic list contains the follow",
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"dms",
- "title":"Exporting Topics",
- "uri":"kafka_ug_0027.html",
+ "title":"Configuring Kafka Access Control",
+ "uri":"kafka-ug-0061.html",
"doc_type":"usermanual",
- "p_code":"29",
+ "p_code":"30",
"code":"36"
},
+ {
+ "desc":"You can access a Kafka instance in plaintext or ciphertext. This section describes how to change the access mode on the console.When you change the access mode for the fi",
+ "product_code":"dms",
+ "title":"Configuring Plaintext or Ciphertext Access to Kafka Instances",
+ "uri":"kafka_ug_0044.html",
+ "doc_type":"usermanual",
+ "p_code":"36",
+ "code":"37"
+ },
+ {
+ "desc":"DMS supports access control list (ACL) for topics. You can differentiate user permissions by granting users different permissions in a topic.This section describes how to",
+ "product_code":"dms",
+ "title":"Configuring Kafka Users",
+ "uri":"kafka-ug-0003.html",
+ "doc_type":"usermanual",
+ "p_code":"36",
+ "code":"38"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"dms",
+ "title":"Configuring the Kafka Client",
+ "uri":"kafka-ug-0062.html",
+ "doc_type":"usermanual",
+ "p_code":"30",
+ "code":"39"
+ },
+ {
+ "desc":"This section provides recommendations on configuring common parameters for Kafka producers and consumers. Kafka clients in different versions may have different parameter",
+ "product_code":"dms",
+ "title":"Setting Parameters for Kafka Clients",
+ "uri":"Kafka-client-parameter.html",
+ "doc_type":"usermanual",
+ "p_code":"39",
+ "code":"40"
+ },
+ {
+ "desc":"Ensure that the owner thread does not exit abnormally. Otherwise, the client may fail to initiate consumption requests and the consumption will be blocked.Commit messages",
+ "product_code":"dms",
+ "title":"Suggestions on Using the Kafka Client",
+ "uri":"Kafka-client-best-practice.html",
+ "doc_type":"usermanual",
+ "p_code":"39",
+ "code":"41"
+ },
+ {
+ "desc":"This section describes how to use an open-source Kafka client to access a Kafka instance in plaintext. Two scenarios: private network within a VPC and public network. To ",
+ "product_code":"dms",
+ "title":"Connecting to Kafka Using the Client (Plaintext Access)",
+ "uri":"kafka-ug-180604020.html",
+ "doc_type":"usermanual",
+ "p_code":"30",
+ "code":"42"
+ },
+ {
+ "desc":"If you enable ciphertext access when creating an instance, SASL authentication will be required when your client connects to a Kafka instance.For security purposes, TLS_E",
+ "product_code":"dms",
+ "title":"Connecting to Kafka Using the Client (Ciphertext Access)",
+ "uri":"kafka-ug-180801001.html",
+ "doc_type":"usermanual",
+ "p_code":"30",
+ "code":"43"
+ },
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"dms",
@@ -330,61 +393,34 @@
"uri":"kafka-ug-0720002.html",
"doc_type":"usermanual",
"p_code":"",
- "code":"37"
+ "code":"44"
},
{
"desc":"You can view the offset of different partitions, the message size, creation time, and body of messages in topics.Select the region where your Kafka instance is located.If",
"product_code":"dms",
- "title":"Querying Messages",
+ "title":"Viewing Kafka Messages",
"uri":"kafka-ug-190904001.html",
"doc_type":"usermanual",
- "p_code":"37",
- "code":"38"
+ "p_code":"44",
+ "code":"45"
},
{
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "desc":"Aging time is a period that messages in the topic are retained for. Consumers must retrieve messages before this period ends. Otherwise, the messages will be deleted and ",
"product_code":"dms",
- "title":"Managing Users",
- "uri":"kafka-ug-0009.html",
+ "title":"Changing Kafka Message Retention Period",
+ "uri":"kafka-ug-200506001.html",
"doc_type":"usermanual",
- "p_code":"",
- "code":"39"
+ "p_code":"44",
+ "code":"46"
},
{
- "desc":"DMS supports ACL permission management for topics. You can differentiate the operations that different users are allowed to perform on a topic by granting the users diffe",
+ "desc":"This section describes how to delete messages on the console.Deleted messages cannot be recovered.Before deleting a message, set the auto.offset.reset parameter in the co",
"product_code":"dms",
- "title":"Creating a SASL_SSL User",
- "uri":"kafka-ug-0003.html",
+ "title":"Deleting Kafka Messages",
+ "uri":"kafka_ug_0046.html",
"doc_type":"usermanual",
- "p_code":"39",
- "code":"40"
- },
- {
- "desc":"DMS supports ACL permission management for topics. You can differentiate the operations that different users are allowed to perform on a topic by granting the users diffe",
- "product_code":"dms",
- "title":"Granting Permissions to a SASL_SSL User",
- "uri":"kafka-ug-0002.html",
- "doc_type":"usermanual",
- "p_code":"39",
- "code":"41"
- },
- {
- "desc":"If you forget the password of a SASL_SSL user created on the Users tab page, you can reset the password and use the new password to connect to the Kafka instance.If you f",
- "product_code":"dms",
- "title":"Resetting the SASL_SSL Password",
- "uri":"kafka_ug_0025.html",
- "doc_type":"usermanual",
- "p_code":"39",
- "code":"42"
- },
- {
- "desc":"This section describes how to delete a SASL_SSL user.Select the region where your Kafka instance is located.On the Users tab page, click Delete in the row that contains t",
- "product_code":"dms",
- "title":"Deleting a SASL_SSL User",
- "uri":"kafka_ug_0026.html",
- "doc_type":"usermanual",
- "p_code":"39",
- "code":"43"
+ "p_code":"44",
+ "code":"47"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
@@ -393,106 +429,223 @@
"uri":"kafka-ug-0011.html",
"doc_type":"usermanual",
"p_code":"",
- "code":"44"
+ "code":"48"
},
{
- "desc":"View the consumer group list, consumer list, and consumer offsets.The consumer list can be viewed only when consumers in a consumer group are connected to the Kafka insta",
+ "desc":"Create a consumer group on the console.auto.create.groups.enable: a consumer group is automatically created when a consumer attempts to enter a group that does not exist.",
"product_code":"dms",
- "title":"Querying Consumer Group Details",
+ "title":"Creating a Kafka Consumer Group",
+ "uri":"kafka-ug-0041.html",
+ "doc_type":"usermanual",
+ "p_code":"48",
+ "code":"49"
+ },
+ {
+ "desc":"This section describes how to query the consumer group list.Select the region where your Kafka instance is located.The consumer group name, status, and Coordinator (ID) a",
+ "product_code":"dms",
+ "title":"Querying the Kafka Consumer Group List",
"uri":"kafka_ug_0021.html",
"doc_type":"usermanual",
- "p_code":"44",
- "code":"45"
+ "p_code":"48",
+ "code":"50"
},
{
- "desc":"You can delete a consumer group using either of the following methods:Method 1: Delete a consumer group on the console.Method 2: Use Kafka CLI to delete a consumer group.",
+ "desc":"This section describes how to view the consumer list and consumer connection addresses.The consumer list and connection address can be viewed only when consumers in a con",
"product_code":"dms",
- "title":"Deleting a Consumer Group",
- "uri":"kafka-ug-0012.html",
- "doc_type":"usermanual",
- "p_code":"44",
- "code":"46"
- },
- {
- "desc":"Resetting the consumer offset is to change the retrieval position of a consumer.Messages may be retrieved more than once after the offset is reset. Exercise caution when ",
- "product_code":"dms",
- "title":"Resetting the Consumer Offset",
- "uri":"kafka-ug-0014.html",
- "doc_type":"usermanual",
- "p_code":"44",
- "code":"47"
- },
- {
- "desc":"View consumer connection addresses on the DMS console.The connection address of a consumer can be viewed only when the consumer is connected to a Kafka instance.Select th",
- "product_code":"dms",
- "title":"Viewing Consumer Connection Addresses",
+ "title":"Viewing Kafka Consumer Details",
"uri":"kafka-ug-0015.html",
"doc_type":"usermanual",
- "p_code":"44",
- "code":"48"
+ "p_code":"48",
+ "code":"51"
+ },
+ {
+ "desc":"This section describes how to view and reset consumption offsets. Resetting consumption offsets is to change the consumption position for consumers.Messages may be retrie",
+ "product_code":"dms",
+ "title":"Viewing and Resetting Kafka Consumption Offsets",
+ "uri":"kafka-ug-0014.html",
+ "doc_type":"usermanual",
+ "p_code":"48",
+ "code":"52"
+ },
+ {
+ "desc":"Export the consumer group list from the console.Select the region where your Kafka instance is located.Select the desired consumer groups and choose Export > Export selec",
+ "product_code":"dms",
+ "title":"Exporting Kafka Consumer Groups",
+ "uri":"kafka-ug-0056.html",
+ "doc_type":"usermanual",
+ "p_code":"48",
+ "code":"53"
+ },
+ {
+ "desc":"You can delete a consumer group in either of the following ways:Method 1: Delete a consumer group on the console.Method 2: Use Kafka CLI to delete a consumer group. (Ensu",
+ "product_code":"dms",
+ "title":"Deleting a Kafka Consumer Group",
+ "uri":"kafka-ug-0012.html",
+ "doc_type":"usermanual",
+ "p_code":"48",
+ "code":"54"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"dms",
+ "title":"Managing Instances",
+ "uri":"kafka-ug-180604011.html",
+ "doc_type":"usermanual",
+ "p_code":"",
+ "code":"55"
+ },
+ {
+ "desc":"This section describes how to view the details, and modify the basic information of a Kafka instance on the console.After creating a Kafka instance, you can modify some p",
+ "product_code":"dms",
+ "title":"Viewing and Modifying Basic Information of a Kafka Instance",
+ "uri":"kafka-ug-180604014.html",
+ "doc_type":"usermanual",
+ "p_code":"55",
+ "code":"56"
+ },
+ {
+ "desc":"This section describes how to view the disk usage of each broker on the Kafka console.This function is unavailable for single-node instances.Select the region where your ",
+ "product_code":"dms",
+ "title":"Viewing Kafka Disk Usage",
+ "uri":"kafka-ug-0004.html",
+ "doc_type":"usermanual",
+ "p_code":"55",
+ "code":"57"
+ },
+ {
+ "desc":"After you initiate certain instance operations listed in Table 1, a background task will start for each operation. On the console, you can view the background task status",
+ "product_code":"dms",
+ "title":"Viewing Kafka Background Tasks",
+ "uri":"kafka-ug-200119002.html",
+ "doc_type":"usermanual",
+ "p_code":"55",
+ "code":"58"
+ },
+ {
+ "desc":"On the console, view sample code for creating and retrieving messages in Java, Go, and Python.Select the region where your Kafka instance is located.View sample code for ",
+ "product_code":"dms",
+ "title":"Viewing Sample Code of Kafka Production and Consumption",
+ "uri":"kafka_ug_0024.html",
+ "doc_type":"usermanual",
+ "p_code":"55",
+ "code":"59"
},
{
"desc":"Your Kafka instances, topics, and consumers come with default configuration parameter settings. You can modify common parameters on the DMS console. For details about par",
"product_code":"dms",
- "title":"Modifying Kafka Parameters",
+ "title":"Modifying Kafka Instance Configuration Parameters",
"uri":"kafka-ug-0007.html",
"doc_type":"usermanual",
+ "p_code":"55",
+ "code":"60"
+ },
+ {
+ "desc":"Tags facilitate Kafka instance identification and management.You can add tags to a Kafka instance when creating the instance or add tags on the Tags tab page of the creat",
+ "product_code":"dms",
+ "title":"Configuring Kafka Instance Tags",
+ "uri":"TagManagement.html",
+ "doc_type":"usermanual",
+ "p_code":"55",
+ "code":"61"
+ },
+ {
+ "desc":"This section describes how to export the Kafka instance list from the console.Select the region where your Kafka instance is located.Select the desired instances and choo",
+ "product_code":"dms",
+ "title":"Exporting the Kafka Instance List",
+ "uri":"kafka-ug-0053.html",
+ "doc_type":"usermanual",
+ "p_code":"55",
+ "code":"62"
+ },
+ {
+ "desc":"Restart one or more Kafka instances at a time on the DMS console.When a Kafka instance is being restarted, message retrieval and creation requests of clients will be reje",
+ "product_code":"dms",
+ "title":"Restarting a Kafka Instance",
+ "uri":"kafka-ug-180604015.html",
+ "doc_type":"usermanual",
+ "p_code":"55",
+ "code":"63"
+ },
+ {
+ "desc":"Delete one or more Kafka instances at a time on the DMS console.Deleting a Kafka instance will delete the data in the instance without any backup. Exercise caution when p",
+ "product_code":"dms",
+ "title":"Deleting Kafka Instances",
+ "uri":"kafka-ug-180604016.html",
+ "doc_type":"usermanual",
+ "p_code":"55",
+ "code":"64"
+ },
+ {
+ "desc":"After creating a Kafka instance, you can increase its specifications. Table 1 lists available modification options. Only one object can be modified per operation: broker ",
+ "product_code":"dms",
+ "title":"Modifying Kafka Instance Specifications",
+ "uri":"kafka-ug-181221001.html",
+ "doc_type":"usermanual",
"p_code":"",
- "code":"49"
+ "code":"65"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
"product_code":"dms",
- "title":"Monitoring",
+ "title":"Migrating Data",
+ "uri":"kafka_ug_0016.html",
+ "doc_type":"usermanual",
+ "p_code":"",
+ "code":"66"
+ },
+ {
+ "desc":"You can migrate Kafka services to connect message producers and consumers to a new Kafka instance and can even migrate persisted message data to the new Kafka instance. K",
+ "product_code":"dms",
+ "title":"Kafka Data Migration Overview",
+ "uri":"kafka-bp-migration.html",
+ "doc_type":"usermanual",
+ "p_code":"66",
+ "code":"67"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"dms",
+ "title":"Monitoring and Alarms",
"uri":"kafka-ug-180413001.html",
"doc_type":"usermanual",
"p_code":"",
- "code":"50"
+ "code":"68"
},
{
"desc":"Cloud Eye monitors Kafka instance metrics in real time. You can view these metrics on the Cloud Eye console.At least one Kafka instance has been created. The instance has",
"product_code":"dms",
- "title":"Viewing Metrics",
+ "title":"Viewing Kafka Monitoring Metrics",
"uri":"kafka-ug-190605001.html",
"doc_type":"usermanual",
- "p_code":"50",
- "code":"51"
+ "p_code":"68",
+ "code":"69"
},
{
- "desc":"This section describes DMS metrics reported to Cloud Eye as well as their namespace and dimensions. You can use the Cloud Eye console to query the Kafka metrics and alarm",
+ "desc":"This section describes metrics reported by DMS to Cloud Eye as well as their namespaces and dimensions. You can use the Cloud Eye console or APIs to query the Kafka metri",
"product_code":"dms",
"title":"Kafka Metrics",
- "uri":"dms-ug-180413002.html",
+ "uri":"kafka-ug-180413002.html",
"doc_type":"usermanual",
- "p_code":"50",
- "code":"52"
+ "p_code":"68",
+ "code":"70"
},
{
- "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "desc":"This section describes the alarm rules of some metrics and how to configure them. In actual services, you are advised to configure alarm rules for metrics based on the fo",
"product_code":"dms",
- "title":"Auditing",
- "uri":"kafka-ug-180418001.html",
+ "title":"Configuring a Kafka Alarm Rule",
+ "uri":"kafka-ug-180524001.html",
"doc_type":"usermanual",
- "p_code":"",
- "code":"53"
+ "p_code":"68",
+ "code":"71"
},
{
- "desc":"With Cloud Trace Service (CTS), you can record operations associated with DMS for later query, audit, and backtrack operations.",
+ "desc":"With Cloud Trace Service (CTS), you can record operations associated with DMS for later query, audit, and backtrack operations.CTS has been enabled.See Querying Real-Time",
"product_code":"dms",
- "title":"Operations Logged by CTS",
+ "title":"Viewing Kafka Audit Logs",
"uri":"kafka-ug-180418002.html",
"doc_type":"usermanual",
- "p_code":"53",
- "code":"54"
- },
- {
- "desc":"This section describes how to view operation records of the last 7 days on the CTS console.Select the region where your Kafka instance is located.Trace Source: Select DMS",
- "product_code":"dms",
- "title":"Viewing Audit Logs",
- "uri":"kafka-ug-180418003.html",
- "doc_type":"usermanual",
- "p_code":"53",
- "code":"55"
+ "p_code":"",
+ "code":"72"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
@@ -501,7 +654,7 @@
"uri":"kafka-ug-0723004.html",
"doc_type":"usermanual",
"p_code":"",
- "code":"56"
+ "code":"73"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
@@ -509,8 +662,8 @@
"title":"Instances",
"uri":"kafka-faq-191030002.html",
"doc_type":"usermanual",
- "p_code":"56",
- "code":"57"
+ "p_code":"73",
+ "code":"74"
},
{
"desc":"To improve the reliability of a Kafka instance, you are advised to select three AZs or more when creating the instance. You cannot select two AZs.Each Kafka instance cont",
@@ -518,35 +671,35 @@
"title":"Why Can't I Select Two AZs?",
"uri":"kafka-faq-200426002.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"58"
+ "p_code":"74",
+ "code":"75"
},
{
- "desc":"This may be because you do not have the Server Administrator and VPC Administrator permissions. For details about how to add permissions to a user group, see \"User and Us",
+ "desc":"This may be because you do not have the Server Administrator and VPC Administrator permissions. For details about how to add permissions to a user group, see Viewing and ",
"product_code":"dms",
"title":"Why Can't I View the Subnet and Security Group Information When Creating a DMS Instance?",
"uri":"kafka-faq-200426003.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"59"
+ "p_code":"74",
+ "code":"76"
},
{
- "desc":"The storage space is the space for storing messages (including messages in replicas), logs and metadata. When specifying storage space, specify the disk type and disk siz",
+ "desc":"The storage space is the space for storing messages (including messages in replicas), logs and metadata. To select a storage space, specify the disk type and disk size. F",
"product_code":"dms",
"title":"How Do I Select Storage Space for a Kafka Instance?",
"uri":"kafka-faq-200426005.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"60"
+ "p_code":"74",
+ "code":"77"
},
{
- "desc":"High I/O: The average latency is 6 to 10 ms, and the maximum bandwidth is 120 MB/s (read + write).Ultra-high I/O: The average latency is 1 to 3 ms, and the maximum bandwi",
+ "desc":"High I/O: The average latency is 6 to 10 ms, and the maximum bandwidth is 150 MB/s (read + write).Ultra-high I/O: The average latency is 1 to 3 ms, and the maximum bandwi",
"product_code":"dms",
"title":"How Do I Choose Between High I/O and Ultra-high I/O?",
"uri":"kafka-faq-200426006.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"61"
+ "p_code":"74",
+ "code":"78"
},
{
"desc":"The following policies are supported:Stop productionWhen the memory usage reaches the disk capacity threshold (95%), new messages will no longer be created, but existing ",
@@ -554,17 +707,17 @@
"title":"Which Capacity Threshold Policy Should I Use?",
"uri":"kafka-faq-200426007.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"62"
+ "p_code":"74",
+ "code":"79"
},
{
- "desc":"Kafka v1.1.0, v2.3.0, and v2.7.",
+ "desc":"Kafka v2.3.0, v2.7, and v3.x.For details about how to create a Kafka instance, see Creating a Kafka Instance.",
"product_code":"dms",
"title":"Which Kafka Versions Are Supported?",
"uri":"kafka-faq-200426008.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"63"
+ "p_code":"74",
+ "code":"80"
},
{
"desc":"Kafka instances are managed using ZooKeeper. Opening ZooKeeper may cause misoperations and service losses. ZooKeeper is used only within Kafka clusters and does not provi",
@@ -572,26 +725,26 @@
"title":"What Is the ZooKeeper Address of a Kafka Instance?",
"uri":"kafka-faq-200426009.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"64"
+ "p_code":"74",
+ "code":"81"
},
{
- "desc":"Yes. A Kafka instance is a cluster that consists of three or more brokers.",
+ "desc":"Kafka instances are classified into single-node and cluster types. A single-node instance has only one broker in single-node mode. A cluster instance consists of three or",
"product_code":"dms",
"title":"Are Kafka Instances in Cluster Mode?",
"uri":"kafka-faq-200426010.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"65"
+ "p_code":"74",
+ "code":"82"
},
{
"desc":"No. You must access a Kafka instance through one of the following ports:Accessing a Kafka instance without SASL:The port varies with the access mode:Intra-VPC access: por",
"product_code":"dms",
- "title":"Can I Modify the Connection Address for Accessing a Kafka Instance?",
+ "title":"Can I Modify the Port for Accessing a Kafka Instance?",
"uri":"kafka-faq-200426011.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"66"
+ "p_code":"74",
+ "code":"83"
},
{
"desc":"The certificates are valid for more than 15 years. You do not need to worry about certificate expiration. The certificates are used for one-way authentication when enabli",
@@ -599,8 +752,8 @@
"title":"How Long Are Kafka SSL Certificates Valid for?",
"uri":"kafka-faq-200426012.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"67"
+ "p_code":"74",
+ "code":"84"
},
{
"desc":"Unfortunately, you cannot synchronize two Kafka instances in real time. To migrate services from one instance to another, create messages to both instances. After all mes",
@@ -608,17 +761,44 @@
"title":"How Do I Synchronize Data from One Kafka Instance to Another?",
"uri":"kafka-faq-200426013.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"68"
+ "p_code":"74",
+ "code":"85"
},
{
- "desc":"The SASL_SSL setting cannot be changed once the instance has been created. Be careful when configuring this setting during instance creation. If you need to change the se",
+ "desc":"The SASL_SSL setting can be changed for cluster instances (see Configuring Plaintext or Ciphertext Access to Kafka Instances), but cannot be changed for single-node insta",
"product_code":"dms",
"title":"How Do I Change the SASL_SSL Setting of a Kafka Instance?",
"uri":"kafka-faq-200426014.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"69"
+ "p_code":"74",
+ "code":"86"
+ },
+ {
+ "desc":"After an instance is created, its SASL mechanism cannot be modified. If you want to change it, create an instance again.",
+ "product_code":"dms",
+ "title":"How Do I Modify the SASL Mechanism?",
+ "uri":"kafka_faq_0052.html",
+ "doc_type":"usermanual",
+ "p_code":"74",
+ "code":"87"
+ },
+ {
+ "desc":"The security protocol can be changed on the console. In the Connection area on the Kafka instance details page, disable Ciphertext Access and then configure security prot",
+ "product_code":"dms",
+ "title":"How Do I Change the Security Protocol?",
+ "uri":"kafka_faq_0062.html",
+ "doc_type":"usermanual",
+ "p_code":"74",
+ "code":"88"
+ },
+ {
+ "desc":"No. A Kafka instance will not be restarted if you modify its enterprise project.",
+ "product_code":"dms",
+ "title":"Will a Kafka Instance Be Restarted After Its Enterprise Project Is Modified?",
+ "uri":"kafka-faq-0008.html",
+ "doc_type":"usermanual",
+ "p_code":"74",
+ "code":"89"
},
{
"desc":"Kafka brokers and ZooKeeper are deployed on the same VM.",
@@ -626,8 +806,8 @@
"title":"Are Kafka Brokers and ZooKeeper Deployed on the Same VM or on Different VMs?",
"uri":"kafka-faq-0015.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"70"
+ "p_code":"74",
+ "code":"90"
},
{
"desc":"For security purposes, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 is supported.",
@@ -635,26 +815,26 @@
"title":"Which Cipher Suites Are Supported by Kafka?",
"uri":"kafka-faq-0020.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"71"
+ "p_code":"74",
+ "code":"91"
},
{
- "desc":"No. The AZ configuration cannot be changed once the instance is created. To use multiple AZs, create another instance.",
+ "desc":"No. The AZ cannot be changed once the instance is created. To use multiple AZs, create another instance.",
"product_code":"dms",
- "title":"Can I Change an Instance from Single-AZ Deployment to Multi-AZ Deployment?",
+ "title":"Can I Change Single-AZ Deployment to Multi-AZ Deployment for an Instance?",
"uri":"kafka-faq-0023.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"72"
+ "p_code":"74",
+ "code":"92"
},
{
"desc":"DMS for Kafka supports cross-AZ disaster recovery. If you select multiple AZs when creating an instance, cross-AZ disaster recovery will be available.You can view the AZs",
"product_code":"dms",
- "title":"Does DMS for Kafka Support Cross-AZ Disaster Recovery? Where Can I View the AZs Configured for an Existing Instance?",
+ "title":"Does DMS for Kafka Support Cross-AZ Disaster Recovery? Where Can I Check Whether an Existing Instance is Across-AZs?",
"uri":"kafka-faq-0025.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"73"
+ "p_code":"74",
+ "code":"93"
},
{
"desc":"Yes.",
@@ -662,8 +842,8 @@
"title":"Do Kafka Instances Support Disk Encryption?",
"uri":"kafka-faq-0030.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"74"
+ "p_code":"74",
+ "code":"94"
},
{
"desc":"No. Once an instance is created, its VPC and subnet cannot be changed.",
@@ -671,8 +851,8 @@
"title":"Can I Change the VPC and Subnet After a Kafka Instance Is Created?",
"uri":"kafka-faq-0036.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"75"
+ "p_code":"74",
+ "code":"95"
},
{
"desc":"You can find Kafka Streams use cases on the official Kafka website.",
@@ -680,8 +860,8 @@
"title":"Where Can I Find Kafka Streams Use Cases?",
"uri":"kafka-faq-0037.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"76"
+ "p_code":"74",
+ "code":"96"
},
{
"desc":"No. Kafka instances cannot be upgraded once they are created. To use a higher Kafka version, create another Kafka instance.",
@@ -689,8 +869,8 @@
"title":"Can I Upgrade Kafka Instances?",
"uri":"kafka-faq-0040.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"77"
+ "p_code":"74",
+ "code":"97"
},
{
"desc":"On the DMS console, click the name of the target Kafka instance. Disable Public Access in the Connection section on the Basic Information tab page, and then enable it aga",
@@ -698,8 +878,53 @@
"title":"How Do I Bind an EIP Again?",
"uri":"kafka_faq_0046.html",
"doc_type":"usermanual",
- "p_code":"57",
- "code":"78"
+ "p_code":"74",
+ "code":"98"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"dms",
+ "title":"Specification Modification",
+ "uri":"kafka_faq_0053.html",
+ "doc_type":"usermanual",
+ "p_code":"73",
+ "code":"99"
+ },
+ {
+ "desc":"Table 1 describes the impact of increasing specifications. It takes 5 to 10 minutes to modify specifications on one broker. The more brokers, the longer time the modifica",
+ "product_code":"dms",
+ "title":"Does Specification Modification Affect Services?",
+ "uri":"kafka-faq-0035.html",
+ "doc_type":"usermanual",
+ "p_code":"99",
+ "code":"100"
+ },
+ {
+ "desc":"No. Data will not be migrated when you increase specifications.",
+ "product_code":"dms",
+ "title":"Will Data Migration Be Involved When I Increase Specifications?",
+ "uri":"kafka_faq_0054.html",
+ "doc_type":"usermanual",
+ "p_code":"99",
+ "code":"101"
+ },
+ {
+ "desc":"Possible cause: When you increase the broker flavor, a rolling restart is performed on brokers. During the restart, partition leaders are changed. The producer has cached",
+ "product_code":"dms",
+ "title":"Why Does Message Production Fail During Scaling?",
+ "uri":"kafka_faq_0056.html",
+ "doc_type":"usermanual",
+ "p_code":"99",
+ "code":"102"
+ },
+ {
+ "desc":"Symptom: Specifications fail to be increased, and a message is displayed indicating that the underlying ECS/EVS resources are insufficient. However, the required ECSs can",
+ "product_code":"dms",
+ "title":"What Can I Do When I Fail to Increase Specifications Due to Insufficient Resources?",
+ "uri":"kafka_faq_0057.html",
+ "doc_type":"usermanual",
+ "p_code":"99",
+ "code":"103"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
@@ -707,17 +932,8 @@
"title":"Connections",
"uri":"kafka-faq-191030001.html",
"doc_type":"usermanual",
- "p_code":"56",
- "code":"79"
- },
- {
- "desc":"This section describes how to troubleshoot Kafka connection problems.If the connection to a Kafka instance is abnormal, perform the following operations to troubleshoot t",
- "product_code":"dms",
- "title":"Troubleshooting Kafka Connection Exceptions",
- "uri":"kafka-faq-0604001.html",
- "doc_type":"usermanual",
- "p_code":"79",
- "code":"80"
+ "p_code":"73",
+ "code":"104"
},
{
"desc":"Kafka instances can be accessed within a VPC, across VPCs, through DNAT, or over public networks. Before accessing a Kafka instance, configure a security group.If they us",
@@ -725,8 +941,8 @@
"title":"How Do I Select and Configure a Security Group?",
"uri":"kafka-faq-180604024.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"81"
+ "p_code":"104",
+ "code":"105"
},
{
"desc":"Yes. For details, see the instance access instructions.",
@@ -734,17 +950,17 @@
"title":"Can I Access a Kafka Instance Over a Public Network?",
"uri":"kafka-faq-200426015.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"82"
+ "p_code":"104",
+ "code":"106"
},
{
- "desc":"The number of connection addresses of a Kafka instance is the same as the number of brokers of the instance.",
+ "desc":"The number of connection addresses of a Kafka instance is the same as the number of brokers of the instance. The following table lists the number of brokers corresponding",
"product_code":"dms",
"title":"How Many Connection Addresses Does a Kafka Instance Have by Default?",
"uri":"kafka-faq-200426016.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"83"
+ "p_code":"104",
+ "code":"107"
},
{
"desc":"Yes. You can access a Kafka instance across regions over a public network or by using direct connections.",
@@ -752,8 +968,8 @@
"title":"Do Kafka Instances Support Cross-Region Access?",
"uri":"kafka-faq-200426017.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"84"
+ "p_code":"104",
+ "code":"108"
},
{
"desc":"Yes. You can use one of the following methods to access a Kafka instance across VPCs:Establish a VPC peering connection to allow two VPCs to communicate with each other. ",
@@ -761,17 +977,17 @@
"title":"Do Kafka Instances Support Cross-VPC Access?",
"uri":"kafka-faq-200426019.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"85"
+ "p_code":"104",
+ "code":"109"
},
{
- "desc":"Yes.If the client and the instance are in the same VPC, cross-subnet access is supported.",
+ "desc":"Yes.If the client and the instance are in the same VPC, cross-subnet access is supported. By default, subnets in the same VPC can communicate with each other.",
"product_code":"dms",
"title":"Do Kafka Instances Support Cross-Subnet Access?",
"uri":"kafka-faq-200426020.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"86"
+ "p_code":"104",
+ "code":"110"
},
{
"desc":"No, Kerberos authentication is not supported. Kafka supports client authentication with SASL and API calling authentication using tokens and AK/SK.To access an instance i",
@@ -779,26 +995,17 @@
"title":"Does DMS for Kafka Support Authentication with Kerberos?",
"uri":"kafka-faq-200426023.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"87"
+ "p_code":"104",
+ "code":"111"
},
{
- "desc":"Yes. No password is required for accessing a Kafka instance with SASL disabled. For details, see Accessing a Kafka Instance Without SASL.",
+ "desc":"Yes. No password is required when accessing a Kafka instance in plaintext. For details, see Connecting to Kafka Using the Client (Plaintext Access).",
"product_code":"dms",
"title":"Does DMS for Kafka Support Password-Free Access?",
"uri":"kafka-faq-200708002.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"88"
- },
- {
- "desc":"Kafka instances are fully compatible with open-source clients. You can obtain clients in other programming languages and access your instance as instructed by the officia",
- "product_code":"dms",
- "title":"Obtaining Kafka Clients",
- "uri":"kafka-connect-other.html",
- "doc_type":"usermanual",
- "p_code":"79",
- "code":"89"
+ "p_code":"104",
+ "code":"112"
},
{
"desc":"Click the name of your Kafka instance. In the Connection section on the Basic Information tab page, view Instance Address (Public Network).For details about how to connec",
@@ -806,8 +1013,8 @@
"title":"How Do I Obtain the Public Access Address After Public Access Is Enabled?",
"uri":"kafka-faq-0001.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"90"
+ "p_code":"104",
+ "code":"113"
},
{
"desc":"No.",
@@ -815,8 +1022,8 @@
"title":"Does DMS for Kafka Support Authentication on Clients by the Server?",
"uri":"kafka-faq-0026.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"91"
+ "p_code":"104",
+ "code":"114"
},
{
"desc":"No. You can only use JKS certificates for connecting to instances in Java.",
@@ -824,8 +1031,8 @@
"title":"Can I Use PEM SSL Truststore When Connecting to a Kafka Instance with SASL_SSL Enabled?",
"uri":"kafka-faq-0027.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"92"
+ "p_code":"104",
+ "code":"115"
},
{
"desc":"JKS certificates are used for connecting to instances in Java and CRT certificates are used for connecting to instances in Python.",
@@ -833,8 +1040,8 @@
"title":"What Are the Differences Between JKS and CRT Certificates?",
"uri":"kafka-faq-0028.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"93"
+ "p_code":"104",
+ "code":"116"
},
{
"desc":"TLS 1.2.",
@@ -842,17 +1049,17 @@
"title":"Which TLS Version Does DMS for Kafka Support?",
"uri":"kafka-faq-0029.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"94"
+ "p_code":"104",
+ "code":"117"
},
{
- "desc":"Yes. The maximum allowed number of client connections varies by instance specifications.If the bandwidth is 100 MB/s, a maximum of 3000 client connections are allowed.If ",
+ "desc":"Yes. The maximum allowed number of client connections varies by instance specifications.",
"product_code":"dms",
"title":"Is There a Limit on the Number of Client Connections to a Kafka Instance?",
"uri":"kafka-faq-0033.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"95"
+ "p_code":"104",
+ "code":"118"
},
{
"desc":"Each Kafka broker allows a maximum of 1000 connections from each IP address by default. Excess connections will be rejected. You can change the limit by referring to Modi",
@@ -860,8 +1067,35 @@
"title":"How Many Connections Are Allowed from Each IP Address?",
"uri":"kafka-faq-0034.html",
"doc_type":"usermanual",
- "p_code":"79",
- "code":"96"
+ "p_code":"104",
+ "code":"119"
+ },
+ {
+ "desc":"No, and you cannot specify the IP addresses.",
+ "product_code":"dms",
+ "title":"Can I Change the Private Network Addresses of a Kafka Instance?",
+ "uri":"kafka_faq_0048.html",
+ "doc_type":"usermanual",
+ "p_code":"104",
+ "code":"120"
+ },
+ {
+ "desc":"Yes. All Kafka instances and users use the same SSL certificate.To obtain the SSL certificate, perform the following steps:",
+ "product_code":"dms",
+ "title":"Is the Same SSL Certificate Used for Different Instances?",
+ "uri":"kafka_faq_0051.html",
+ "doc_type":"usermanual",
+ "p_code":"104",
+ "code":"121"
+ },
+ {
+ "desc":"If a Sarama client is used to send and receive messages, the following issues may occur:Sarama cannot detect partition changes. Adding topic partitions requires client re",
+ "product_code":"dms",
+ "title":"Why Is It Not Recommended to Use a Sarama Client for Messaging?",
+ "uri":"kafka_faq_0061.html",
+ "doc_type":"usermanual",
+ "p_code":"104",
+ "code":"122"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
@@ -869,8 +1103,8 @@
"title":"Topics and Partitions",
"uri":"kafka-faq-191030003.html",
"doc_type":"usermanual",
- "p_code":"56",
- "code":"97"
+ "p_code":"73",
+ "code":"123"
},
{
"desc":"The number of topics is related to the total number of topic partitions and the number of partitions in each topic. There is an upper limit on the aggregate number of par",
@@ -878,8 +1112,8 @@
"title":"Is There a Limit on the Number of Topics in a Kafka Instance?",
"uri":"kafka-faq-200426024.html",
"doc_type":"usermanual",
- "p_code":"97",
- "code":"98"
+ "p_code":"123",
+ "code":"124"
},
{
"desc":"Kafka manages messages by partition. If there are too many partitions, message creation, storage, and retrieval will be fragmented, affecting the performance and stabilit",
@@ -887,26 +1121,26 @@
"title":"Why Is Partition Quantity Limited?",
"uri":"kafka-faq-200426025.html",
"doc_type":"usermanual",
- "p_code":"97",
- "code":"99"
+ "p_code":"123",
+ "code":"125"
},
{
"desc":"No. If you want to use fewer partitions, delete the corresponding topic, create another one, and specify the desired number of partitions.",
"product_code":"dms",
- "title":"Can I Change the Partition Quantity?",
+ "title":"Can I Reduce the Partition Quantity?",
"uri":"kafka-faq-200426101.html",
"doc_type":"usermanual",
- "p_code":"97",
- "code":"100"
+ "p_code":"123",
+ "code":"126"
},
{
- "desc":"Possible cause: The aggregate number of partitions of created topics has reached the upper limit. The maximum number of partitions varies with instance specifications. Fo",
+ "desc":"Possible cause: The aggregate number of partitions of created topics has reached the upper limit. The upper limit on partitions varies by instance specifications. For det",
"product_code":"dms",
"title":"Why Do I Fail to Create Topics?",
"uri":"kafka-faq-200426026.html",
"doc_type":"usermanual",
- "p_code":"97",
- "code":"101"
+ "p_code":"123",
+ "code":"127"
},
{
"desc":"Automatic topic creation is supported, but batch topic import is not supported. You can only export topics in batches.Enable automatic topic creation using one of the fol",
@@ -914,26 +1148,26 @@
"title":"Do Kafka Instances Support Batch Importing Topics or Automatic Topic Creation?",
"uri":"kafka-faq-200426027.html",
"doc_type":"usermanual",
- "p_code":"97",
- "code":"102"
+ "p_code":"123",
+ "code":"128"
},
{
- "desc":"This may be because automatic topic creation has been enabled and a consumer is connecting to the topic. If no existing topics are available for message creation, new top",
+ "desc":"Possible cause: Automatic topic creation has been enabled and a consumer is connecting to the topic. If no existing topics are available for message creation, new topics ",
"product_code":"dms",
"title":"Why Do Deleted Topics Still Exist?",
"uri":"kafka-faq-200426028.html",
"doc_type":"usermanual",
- "p_code":"97",
- "code":"103"
+ "p_code":"123",
+ "code":"129"
},
{
- "desc":"Yes. Use either of the following methods to check the disk space used by a topic:Click next to the Kafka instance name to go to the Cloud Eye console. On the Queues tab ",
+ "desc":"Yes. Use either of the following methods to check the disk space used by a topic:In the row containing the desired Kafka instance, click View Metric to go to the Cloud Ey",
"product_code":"dms",
"title":"Can I View the Disk Space Used by a Topic?",
"uri":"kafka-faq-200426030.html",
"doc_type":"usermanual",
- "p_code":"97",
- "code":"104"
+ "p_code":"123",
+ "code":"130"
},
{
"desc":"If you have enabled SASL_SSL for your Kafka instance, you can configure ACL permissions for your topics. On the Topics tab page of the Kafka console, click Grant User Per",
@@ -941,44 +1175,44 @@
"title":"Can I Add ACL Permissions for Topics?",
"uri":"kafka-faq-200426032.html",
"doc_type":"usermanual",
- "p_code":"97",
- "code":"105"
+ "p_code":"123",
+ "code":"131"
},
{
- "desc":"Messages are not deleted immediately after being retrieved. They are deleted only when the aging time expires.You can shorten the aging time.",
+ "desc":"Messages are not deleted immediately after being retrieved. They are deleted only when the aging time expires.You can shorten the aging time or expand the storage space.",
"product_code":"dms",
"title":"What Should I Do If Kafka Storage Space Is Used Up Because Retrieved Messages Are Not Deleted?",
"uri":"kafka-faq-0003.html",
"doc_type":"usermanual",
- "p_code":"97",
- "code":"106"
+ "p_code":"123",
+ "code":"132"
},
{
- "desc":"Yes. A Kafka instance will be restarted if you enable or disable automatic topic creation for it.",
+ "desc":"You can increase the partition quantity by adding brokers.To do so, go to the Kafka console, locate the row that contains the desired instance, and choose More > Modify S",
+ "product_code":"dms",
+ "title":"How Do I Increase the Partition Quantity?",
+ "uri":"kafka-faq-0005.html",
+ "doc_type":"usermanual",
+ "p_code":"123",
+ "code":"133"
+ },
+ {
+ "desc":"Enabling or disabling automatic topic creation may cause instance restarts. For details, see the information displayed on the Kafka console.",
"product_code":"dms",
"title":"Will a Kafka Instance Be Restarted After Its Automatic Topic Creation Setting Is Modified?",
"uri":"kafka-faq-0010.html",
"doc_type":"usermanual",
- "p_code":"97",
- "code":"107"
+ "p_code":"123",
+ "code":"134"
},
{
- "desc":"On the Kafka console, click the name of your instance.In the Instance Information section of the Basic Information tab page, click next to Automatic Topic Creation to di",
- "product_code":"dms",
- "title":"How Do I Disable Automatic Topic Creation?",
- "uri":"kafka-faq-0014.html",
- "doc_type":"usermanual",
- "p_code":"97",
- "code":"108"
- },
- {
- "desc":"Yes, just simply unsubscribe from it on the Kafka client.",
+ "desc":"Just simply unsubscribe from them on the Kafka client.",
"product_code":"dms",
"title":"Can I Delete Unnecessary Topics in a Consumer Group?",
"uri":"kafka-faq-0031.html",
"doc_type":"usermanual",
- "p_code":"97",
- "code":"109"
+ "p_code":"123",
+ "code":"135"
},
{
"desc":"Symptom: Different consumers in a consumer group have different topic permissions. When a consumer attempts to retrieve messages from a topic, the error message \"Not auth",
@@ -986,8 +1220,8 @@
"title":"What Can I Do If a Consumer Fails to Retrieve Messages from a Topic Due to Insufficient Permissions?",
"uri":"kafka-faq-0038.html",
"doc_type":"usermanual",
- "p_code":"97",
- "code":"110"
+ "p_code":"123",
+ "code":"136"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
@@ -995,35 +1229,44 @@
"title":"Consumer Groups",
"uri":"kafka-faq-200423001.html",
"doc_type":"usermanual",
- "p_code":"56",
- "code":"111"
+ "p_code":"73",
+ "code":"137"
},
{
- "desc":"No. They are generated automatically when you use the instance.For details about creating and retrieving messages after connecting to a Kafka instance, see Accessing a Ka",
+ "desc":"When parameter auto.create.groups.enable is set to true, you do not need to create a consumer group, producer, or consumer because they are generated automatically when y",
"product_code":"dms",
"title":"Do I Need to Create Consumer Groups, Producers, and Consumers for Kafka Instances?",
"uri":"kafka-faq-200426033.html",
"doc_type":"usermanual",
- "p_code":"111",
- "code":"112"
+ "p_code":"137",
+ "code":"138"
},
{
- "desc":"No. You can directly delete the consumer group.",
- "product_code":"dms",
- "title":"Do I Need to Unsubscribe from a Topic Before Deleting a Consumer Group?",
- "uri":"kafka-faq-0032.html",
- "doc_type":"usermanual",
- "p_code":"111",
- "code":"113"
- },
- {
- "desc":"Yes.Kafka uses the offsets.retention.minutes parameter to control how long to keep offsets for a consumer group. If offsets are not committed within this period, they wil",
+ "desc":"This depends on the offsets.retention.minutes and auto.create.groups.enable parameters.For instances created much earlier, auto.create.groups.enable is set to true by def",
"product_code":"dms",
"title":"Will a Consumer Group Without Active Consumers Be Automatically Deleted in 14 Days?",
"uri":"kafka-faq-0043.html",
"doc_type":"usermanual",
- "p_code":"111",
- "code":"114"
+ "p_code":"137",
+ "code":"139"
+ },
+ {
+ "desc":"Possible cause: Automatic consumer group creation has been enabled and your service is connected to the consumer group and consuming messages. Therefore, the consumer gro",
+ "product_code":"dms",
+ "title":"Why Does a Deleted Consumer Group Still Exist?",
+ "uri":"kafka_faq_0059.html",
+ "doc_type":"usermanual",
+ "p_code":"137",
+ "code":"140"
+ },
+ {
+ "desc":"Check whether Flink is used for consumption. Flink uses the assign mode and the client assigns specific partitions to be consumed, so you cannot see any consumer on the K",
+ "product_code":"dms",
+ "title":"Why Can't I View Consumers When Instance Consumption Is Normal?",
+ "uri":"kafka_faq_0060.html",
+ "doc_type":"usermanual",
+ "p_code":"137",
+ "code":"141"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
@@ -1031,8 +1274,8 @@
"title":"Messages",
"uri":"kafka-faq-190416001.html",
"doc_type":"usermanual",
- "p_code":"56",
- "code":"115"
+ "p_code":"73",
+ "code":"142"
},
{
"desc":"10 MB.",
@@ -1040,8 +1283,8 @@
"title":"What Is the Maximum Size of a Message that Can be Created?",
"uri":"kafka-faq-200426035.html",
"doc_type":"usermanual",
- "p_code":"115",
- "code":"116"
+ "p_code":"142",
+ "code":"143"
},
{
"desc":"Rebalancing is a process where partitions of topics are re-allocated for a consumer group.In normal cases, rebalancing occurs inevitably when a consumer is added to or re",
@@ -1049,8 +1292,8 @@
"title":"Why Does Message Poll Often Fail During Rebalancing?",
"uri":"kafka-faq-200426036.html",
"doc_type":"usermanual",
- "p_code":"115",
- "code":"117"
+ "p_code":"142",
+ "code":"144"
},
{
"desc":"Possible cause 1: The message has been aged.Solution: Change the aging time.Solution: Change the aging time.Possible cause 2: The createTime timestamp of the message is i",
@@ -1058,8 +1301,17 @@
"title":"Why Can't I Query Messages on the Console?",
"uri":"kafka-faq-200426037.html",
"doc_type":"usermanual",
- "p_code":"115",
- "code":"118"
+ "p_code":"142",
+ "code":"145"
+ },
+ {
+ "desc":"Symptom: An alarm is generated for the Accumulated Messages metric.Solution:Log in to the Kafka console and click the instance for which the alarm is generated. The insta",
+ "product_code":"dms",
+ "title":"What Can I Do If Kafka Messages Are Accumulated?",
+ "uri":"kafka-faq-200426100.html",
+ "doc_type":"usermanual",
+ "p_code":"142",
+ "code":"146"
},
{
"desc":"If the aging time has been set for a topic, the value of the log.retention.hours parameter does not take effect for the topic. The value of the log.retention.hours parame",
@@ -1067,8 +1319,8 @@
"title":"Why Do Messages Still Exist After the Retention Period Elapses?",
"uri":"kafka-faq-200708001.html",
"doc_type":"usermanual",
- "p_code":"115",
- "code":"119"
+ "p_code":"142",
+ "code":"147"
},
{
"desc":"No.",
@@ -1076,8 +1328,8 @@
"title":"Do Kafka Instances Support Delayed Message Delivery?",
"uri":"kafka-faq-0018.html",
"doc_type":"usermanual",
- "p_code":"115",
- "code":"120"
+ "p_code":"142",
+ "code":"148"
},
{
"desc":"View the number of accumulated messages using any of the following methods:On the Consumer Groups page of an instance, click the name of the consumer group whose accumula",
@@ -1085,8 +1337,8 @@
"title":"How Do I View the Number of Accumulated Messages?",
"uri":"kafka-faq-0041.html",
"doc_type":"usermanual",
- "p_code":"115",
- "code":"121"
+ "p_code":"142",
+ "code":"149"
},
{
"desc":"The message creation time is specified by CreateTime when a producer creates messages. If this parameter is not set during message creation, the message creation time is ",
@@ -1094,8 +1346,17 @@
"title":"Why Is the Message Creation Time Displayed as Year 1970?",
"uri":"kafka-faq-0045.html",
"doc_type":"usermanual",
- "p_code":"115",
- "code":"122"
+ "p_code":"142",
+ "code":"150"
+ },
+ {
+ "desc":"message.max.bytes can be modified on the Parameters page on the console. For details, see Modifying Kafka Instance Configuration Parameters.The maximum value of message.m",
+ "product_code":"dms",
+ "title":"How Do I Modify message.max.bytes?",
+ "uri":"kafka_faq_0058.html",
+ "doc_type":"usermanual",
+ "p_code":"142",
+ "code":"151"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
@@ -1103,17 +1364,17 @@
"title":"Monitoring & Alarm",
"uri":"kafka-faq-191030004.html",
"doc_type":"usermanual",
- "p_code":"56",
- "code":"123"
+ "p_code":"73",
+ "code":"152"
},
{
- "desc":"The possible causes are as follows:The topic name starts with a special character, such as an underscore (_) or a number sign (#).The consumer group name starts with a sp",
+ "desc":"If topic monitoring data is not displayed, the possible causes are as follows:The topic name starts with a special character, such as an underscore (_) or a number sign (",
"product_code":"dms",
"title":"Why Can't I View the Monitoring Data?",
"uri":"kafka-faq-200426041.html",
"doc_type":"usermanual",
- "p_code":"123",
- "code":"124"
+ "p_code":"152",
+ "code":"153"
},
{
"desc":"Symptom: The monitoring data shows that there are 810 million accumulated messages. However, the Kafka console shows that there are 100 million messages in all six topics",
@@ -1121,8 +1382,8 @@
"title":"Why Is the Monitored Number of Accumulated Messages Inconsistent with the Message Quantity Displayed on the Kafka Console?",
"uri":"kafka-faq-0007.html",
"doc_type":"usermanual",
- "p_code":"123",
- "code":"125"
+ "p_code":"152",
+ "code":"154"
},
{
"desc":"The monitoring data is reported every minute. The reported data will be displayed on the monitoring page after being sorted. This process takes less than 20 minutes. Afte",
@@ -1130,8 +1391,62 @@
"title":"Why Is a Consumer Group Still on the Monitoring Page After Being Deleted?",
"uri":"kafka-faq-0022.html",
"doc_type":"usermanual",
- "p_code":"123",
- "code":"126"
+ "p_code":"152",
+ "code":"155"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"dms",
+ "title":"Troubleshooting",
+ "uri":"kafka-ug-0723006.html",
+ "doc_type":"usermanual",
+ "p_code":"",
+ "code":"156"
+ },
+ {
+ "desc":"This section describes how to troubleshoot Kafka connection problems.If the connection to a Kafka instance is abnormal, perform the following operations to troubleshoot t",
+ "product_code":"dms",
+ "title":"Troubleshooting Kafka Connection Exceptions",
+ "uri":"kafka-faq-0604001.html",
+ "doc_type":"usermanual",
+ "p_code":"156",
+ "code":"157"
+ },
+ {
+ "desc":"The duration from message creation to retrieval occasionally reaches 6 minutes, which is not tolerable to services.Service requests are stacked and cannot be processed in",
+ "product_code":"dms",
+ "title":"Troubleshooting 6-Min Latency Between Message Creation and Retrieval",
+ "uri":"kafka-trouble-0709001.html",
+ "doc_type":"usermanual",
+ "p_code":"156",
+ "code":"158"
+ },
+ {
+ "desc":"The system displays the error message \"Disk error when trying to access log file on the disk\".The disk usage of the broker is too high.Expand the disk space by referring ",
+ "product_code":"dms",
+ "title":"Troubleshooting Message Creation Failures",
+ "uri":"kafka-trouble-0001.html",
+ "doc_type":"usermanual",
+ "p_code":"156",
+ "code":"159"
+ },
+ {
+ "desc":"A deleted topic still exists.Automatic topic creation has been enabled for the instance, and a consumer is connecting to the topic. If services are not stopped, message c",
+ "product_code":"dms",
+ "title":"Troubleshooting Topic Deletion Failures",
+ "uri":"kafka-trouble-0002.html",
+ "doc_type":"usermanual",
+ "p_code":"156",
+ "code":"160"
+ },
+ {
+ "desc":"For a Kafka instance deployed in multiple AZs, if one of the AZs is faulty, error message \"Topic {{topic_name}} not present in metadata after 60000 ms\" may be reported on",
+ "product_code":"dms",
+ "title":"Troubleshooting Error \"Topic {{topic_name}} not present in metadata after 60000 ms\" During Message Production or Consumption",
+ "uri":"kafka-trouble-0004.html",
+ "doc_type":"usermanual",
+ "p_code":"156",
+ "code":"161"
},
{
"desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
@@ -1140,7 +1455,16 @@
"uri":"kafka-ug-00001.html",
"doc_type":"usermanual",
"p_code":"",
- "code":"127"
+ "code":"162"
+ },
+ {
+ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.",
+ "product_code":"dms",
+ "title":"Acronyms and Abbreviations",
+ "uri":"kafka-ug-00002.html",
+ "doc_type":"usermanual",
+ "p_code":"",
+ "code":"163"
},
{
"desc":"See Glossary.",
@@ -1149,6 +1473,6 @@
"uri":"dms-ug-0312114.html",
"doc_type":"usermanual",
"p_code":"",
- "code":"128"
+ "code":"164"
}
]
\ No newline at end of file
diff --git a/docs/dms/umn/CreateUserAndGrantPolicy.html b/docs/dms/umn/CreateUserAndGrantPolicy.html
new file mode 100644
index 00000000..245c3ca1
--- /dev/null
+++ b/docs/dms/umn/CreateUserAndGrantPolicy.html
@@ -0,0 +1,113 @@
+
+
+
Creating a User and Granting DMS for Kafka Permissions
+This section describes how to use Identity and Access Management (IAM) for fine-grained permissions control for your Distributed Message Service (DMS) for Kafka resources. With IAM, you can:
+
+
- Create IAM users for personnel based on your enterprise's organizational structure. Each IAM user has their own identity credentials for accessing DMS for Kafka resources.
- Grant users only the permissions required to perform a given task based on their job responsibilities.
- Entrust another account or cloud service to perform efficient O&M on your DMS for Kafka resources.
+
If your account meets your permissions requirements, you can skip this section.
+
This section describes the procedure for granting permissions (see Figure 1).
+
Prerequisites
Learn about the permissions (see System-defined roles and policies supported by DMS for Kafka) supported by DMS for Kafka and choose policies according to your requirements. For the permissions of other services, see Permissions.
+
+
Process Flow
Figure 1 Process for granting DMS for Kafka permissions
+
+
For the following example, create a user group on the IAM console and assign the DMS ReadOnlyAccess policy to the group.
+- Create an IAM user and add it to the created user group.
- Log in as the IAM user and verify permissions.
In the authorized region, perform the following operations:
+- Choose Service List > Distributed Message Service. Then click Create Instance on the console of DMS for Kafka. If a message appears indicating that you cannot perform the operation, the DMS ReadOnlyAccess policy is in effect.
- Choose Service List > Elastic Volume Service. If a message appears indicating that you have insufficient permissions, the DMS ReadOnlyAccess policy is in effect.
- Choose Service List > Distributed Message Service. If the Kafka instance list can be displayed, the DMS ReadOnlyAccess policy is in effect.
+
+
+
Example Custom Policies
You can create custom policies to supplement the system-defined policies of DMS for Kafka. For details about actions supported in custom policies, see "Permissions Policies and Supported Actions" in Distributed Message Service API Reference
+
To create a custom policy, choose either visual editor or JSON.
+
- Visual editor: Select cloud services, actions, resources, and request conditions. This does not require knowledge of policy syntax.
- JSON: Create a JSON policy or edit an existing one.
+
For details, see Creating a Custom Policy. The following lists examples of common DMS for Kafka custom policies.
+
- DMS for Kafka permissions policies are based on DMS. Therefore, when assigning permissions, select DMS permissions policies.
- Due to data caching, a policy involving Object Storage Service (OBS) actions will take effect five minutes after it is attached to a user, user group, or project.
+
+
- Example 1: Grant permission to delete and restart instances.
{
+ "Version": "1.1",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "dms:instance:modifyStatus",
+ "dms:instance:delete"
+ ]
+ }
+ ]
+}
+ - Example 2: Grant permission to deny instance deletion.
A policy with only "Deny" permissions must be used together with other policies. If the permissions granted to an IAM user contain both "Allow" and "Deny", the "Deny" permissions take precedence over the "Allow" permissions.
+Assume that you want to grant the permissions of the DMS FullAccess policy to a user but want to prevent them from deleting instances. You can create a custom policy for denying instance deletion, and attach this policy together with the DMS FullAccess policy to the user. As an explicit deny in any policy overrides any allows, the user can perform all operations on DMS for Kafka excepting deleting instances.
+Example policy denying instance deletion:
+{
+ "Version": "1.1",
+ "Statement": [
+ {
+ "Effect": "Deny",
+ "Action": [
+ "dms:instance:delete"
+ ]
+ }
+ ]
+}
+
+
+
DMS for Kafka Resources
A resource is an object that exists within a service. DMS for Kafka resources include kafka. To select these resources, specify their paths.
+
+
Table 1 DMS for Kafka resources and their pathsResource
+ |
+Resource Name
+ |
+Path
+ |
+
+
+kafka
+ |
+Instance
+ |
+[Format]
+DMS:*:*: kafka:instance ID
+[Notes]
+For instance resources, IAM automatically generates the prefix (DMS:*:*:kafka:) of the resource path.
+For the path of a specific resource, add the instance ID to the end. You can also use an asterisk * to indicate any resource. For example:
+DMS:*:*:kafka:* indicates any Kafka instance.
+ |
+
+
+
+
+
+
DMS for Kafka Request Conditions
Request conditions are useful in determining when a custom policy is in effect. A request condition consists of condition keys and operators. Condition keys are either global or service-level and are used in the Condition element of a policy statement. Global condition keys (starting with g:) are available for operations of all services, while service-specific condition keys (starting with a service name such as dms:) are available only for operations of specific services. An operator must be used together with a condition key to form a complete condition statement.
+
DMS for Kafka has a group of predefined condition keys that can be used in IAM. For example, to define an "Allow" permission, use the condition dms:ssl to filter instances by SASL configurations. The following table lists the DMS for Kafka predefined condition keys.
+
+
Table 2 Predefined condition keys of DMS for KafkaCondition Key
+ |
+Operator
+ |
+Description
+ |
+
+
+dms:publicIP
+ |
+Bool
+ |
+Whether public access is enabled
+ |
+
+dms:ssl
+ |
+Bool
+ |
+Whether SSL is enabled
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/dms/umn/Kafka-client-best-practice.html b/docs/dms/umn/Kafka-client-best-practice.html
new file mode 100644
index 00000000..9abf69c4
--- /dev/null
+++ b/docs/dms/umn/Kafka-client-best-practice.html
@@ -0,0 +1,23 @@
+
+
+Suggestions on Using the Kafka Client
+Consumers
- Ensure that the owner thread does not exit abnormally. Otherwise, the client may fail to initiate consumption requests and the consumption will be blocked.
- Commit messages only after they have been processed. Otherwise, the messages may fail to be processed and cannot be polled again.
- Generally, do not commit every message. Otherwise, there will be many OFFSET_COMMIT requests, causing high CPU usage. For example, if a consumption request pulls 1000 messages and commits every one of them, TPS of the commit requests is 1000 times that of consumption. The smaller the message size, the larger the ratio. You can commit a specific number of messages in batches or enable enable.auto.commit. However, if the client is faulty, some cached consumption offset may be lost, resulting in repeated consumption. Therefore, you are advised to commit messages in batches based on service requirements.
- A consumer cannot frequently join or leave a group. Otherwise, the consumer will frequently perform rebalancing, which blocks consumption.
- The number of consumers cannot be greater than the number of partitions in the topic. Otherwise, some consumers may fail to poll for messages.
- Ensure that the consumer polls at regular intervals to keep sending heartbeats to the server. If the consumer stops sending heartbeats for long enough, the consumer session will time out and the consumer will be considered to have stopped. This will also block consumption.
- Ensure that there is a limitation on the size of messages buffered locally to avoid an out-of-memory (OOM) situation.
- Set the timeout for the consumer session to 30 seconds: session.timeout.ms=30000.
- Kafka supports exactly-once delivery. Therefore, ensure the idempotency of processing messages for services.
- Always close the consumer before exiting. Otherwise, consumers in the same group may be blocked within the timeout set by session.timeout.ms.
- Do not start a consumer group name with a special character, such as a number sign (#). Otherwise, monitoring data of the consumer group cannot be displayed.
+
+
Producers
- Synchronous replication: Set acks to all.
- Retry message sending: Set retries to 3.
- Optimize message sending: For latency-sensitive messages, set linger.ms to 0. For latency-insensitive messages, set linger.ms to a value ranging from 100 to 1000.
- Ensure that the producer has sufficient JVM memory to avoid blockages.
- Set the timestamp to the local time. Messages will fail to age if the timestamp is a future time.
+
+
Topics
Recommended topic configurations: Use 3 replicas, enable synchronous replication, and set the minimum number of in-sync replicas to 2. The number of in-sync replicas cannot be the same as the number of replicas of the topic. Otherwise, if one replica is unavailable, messages cannot be produced.
+
You can enable or disable automatic topic creation. If automatic topic creation is enabled, the system automatically creates a topic when a message is created in or retrieved from a topic that does not exist. This topic has the following default settings: 3 partitions, 3 replicas, aging time 72 hours, synchronous replication and flushing disabled, CreateTime message timestamp, and maximum 10,485,760 bytes message size.
+
+
Others
Maximum number of connections: 3000
+
Maximum size of a message: 10 MB
+
Access Kafka using SASL_SSL. Ensure that your DNS service is capable of resolving an IP address to a domain name. Alternatively, map all Kafka broker IP addresses to host names in the hosts file. Prevent Kafka clients from performing reverse resolution. Otherwise, connections may fail to be established.
+
Apply for a disk space size that is more than twice the size of service data multiplied by the number of replicas. In other words, keep 50% of the disk space idle.
+
Avoid frequent full GC in JVM. Otherwise, message production and consumption will be blocked.
+
+
+
+
diff --git a/docs/dms/umn/Kafka-client-parameter.html b/docs/dms/umn/Kafka-client-parameter.html
new file mode 100644
index 00000000..4a581831
--- /dev/null
+++ b/docs/dms/umn/Kafka-client-parameter.html
@@ -0,0 +1,135 @@
+
+
+Setting Parameters for Kafka Clients
+This section provides recommendations on configuring common parameters for Kafka producers and consumers. Kafka clients in different versions may have different parameter names. The following parameters are supported in v1.1.0 and later. For details about other parameters and versions, see Kafka Configuration.
+
+
Table 1 Producer parametersParameter
+ |
+Default Value
+ |
+Recommended Value
+ |
+Description
+ |
+
+
+acks
+ |
+1
+ |
+all or –1 (if high reliability mode is selected)
+1 (if high throughput mode is selected)
+ |
+Indicates the number of acknowledgments the producer requires the server to return before considering a request complete. This controls the durability of records that are sent. The value of this parameter can be any of the following:
+0: The producer will not wait for any acknowledgment from the server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received the record, and the retries configuration will not take effect (as the client generally does not know of any failures). The offset given back for each record will always be set to –1.
+1: The leader will write the record to its local log but will respond without waiting until receiving full acknowledgement from all followers. If the leader fails immediately after acknowledging the record but before the followers have replicated it, the record will be lost.
+all or -1: The leader needs to wait until all backups in the ISR are written into logs. As long as any backup survives, data will not be lost. min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
+ |
+
+retries
+ |
+0
+ |
+/
+ |
+Number of times that the client resends a message. Setting this parameter to a value greater than zero will cause the client to resend any record that failed to be sent.
+Note that this retry is no different than if the client re-sent the record upon receiving the error. Allowing retries will potentially change the ordering of records because if two batches are sent to the same partition, and the first fails and is retried but the second succeeds, then the records in the second batch may appear first.
+You are advised to configure producers so that they can be able to retry in case of network disconnections. Set retries to 3 and the retry interval retry.backoff.ms to 1000.
+ |
+
+request.timeout.ms
+ |
+30000
+ |
+/
+ |
+Maximum amount of time (in ms) the client will wait for the response of a request. If the response is not received before the timeout elapses, the client will throw a timeout exception.
+Setting this parameter to a large value, for example, 127000 (127s), can prevent records from failing to be sent in high-concurrency scenarios.
+ |
+
+block.on.buffer.full
+ |
+TRUE
+ |
+TRUE
+ |
+Setting this parameter to TRUE indicates that when buffer memory is exhausted, the producer must stop receiving new message records or throw an exception.
+By default, this parameter is set to TRUE. However, in some cases, non-blocking usage is desired and it is better to throw an exception immediately. Setting this parameter to FALSE will cause the producer to instead throw "BufferExhaustedException" when buffer memory is exhausted.
+ |
+
+batch.size
+ |
+16384
+ |
+262144
+ |
+Default maximum number of bytes of messages that can be processed at a time. The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps improve performance of both the client and the server. No attempt will be made to batch records larger than this size.
+Requests sent to brokers will contain multiple batches, one for each partition with data available to be sent.
+A smaller batch size will make batching less common and may reduce throughput (a batch size of zero will disable batching entirely). A larger batch size may use more memory as a buffer of the specified batch size will always be allocated in anticipation of additional records.
+ |
+
+buffer.memory
+ |
+33554432
+ |
+67108864
+ |
+Total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the broker, the producer will stop sending records or throw a "block.on.buffer.full" exception.
+This setting should correspond roughly to the total memory the producer will use, but is not a rigid bound since not all memory the producer uses is used for buffering. Some additional memory will be used for compression (if compression is enabled) as well as for maintaining in-flight requests.
+ |
+
+
+
+
+
+
Table 2 Consumer parametersParameter
+ |
+Default Value
+ |
+Recommended Value
+ |
+Description
+ |
+
+
+auto.commit.enable
+ |
+TRUE
+ |
+FALSE
+ |
+If this parameter is set to TRUE, the offset of messages already fetched by the consumer will be periodically committed to ZooKeeper. This committed offset will be used when the process fails as the position from which the new consumer will begin.
+Constraints: If this parameter is set to FALSE, to avoid message loss, an offset must be committed to ZooKeeper after the messages are successfully consumed.
+ |
+
+auto.offset.reset
+ |
+latest
+ |
+earliest
+ |
+Indicates what to do when there is no initial offset in ZooKeeper or if the current offset has been deleted. Options:
+- earliest: Automatically reset to the smallest offset.
- latest: The offset is automatically reset to the largest offset.
- none: The system throws an exception to the consumer if no offset is available.
- anything else: The system throws an exception to the consumer.
+ NOTE: If this parameter is set to latest, the producer may start to send messages to new partitions (if any) before the consumer resets to the initial offset. As a result, some messages will be lost.
+
+ |
+
+connections.max.idle.ms
+ |
+600000
+ |
+30000
+ |
+Timeout interval (in ms) for an idle connection. The server closes the idle connection after this period of time ends. Setting this parameter to 30000 can reduce the server response failures when the network condition is poor.
+ |
+
+
+
+
+
+
+
diff --git a/docs/dms/umn/Kafka-specification.html b/docs/dms/umn/Kafka-specification.html
index 2338555b..abe80dcb 100644
--- a/docs/dms/umn/Kafka-specification.html
+++ b/docs/dms/umn/Kafka-specification.html
@@ -1,102 +1,336 @@
Specifications
-Kafka Instance Specifications
Kafka instances are compatible with open-source Kafka 1.1.0, 2.3.0, and 2.7. The instance specifications are classified based on bandwidth, namely, 100 MB/s, 300 MB/s, 600 MB/s, and 1200 MB/s.
+
Kafka Instance Specifications
Kafka instances are compatible with open-source Kafka v1.1.0, v2.3.0, v2.7, and v3.x. Kafka instances are classified into cluster and single-node types. A cluster instance consists of three or more brokers and a single-node one has one broker.
+
Kafka instances are classified based on instance ECS flavors as follows:
+
- Cluster
- kafka.2u4g.cluster.small
- kafka.2u4g.cluster
- kafka.4u8g.cluster
- kafka.8u16g.cluster
- kafka.12u24g.cluster
- kafka.16u32g.cluster
- kafka.2u4g.cluster.beta
- kafka.4u8g.cluster.beta
+ - Single-node
- kafka.2u4g.single.small
- kafka.2u4g.single
+
+
For Kafka instances, the number of transactions per second (TPS) is the maximum number of messages that can be written per second. In the following table, transactions per second (TPS) are calculated assuming that the size of a message is 1 KB. The test scenario is private access in plaintext. The disk type is ultra-high I/O.
+
Cluster Kafka instances support v1.1.0, v2.3.0, v2.7, and v3.x. Single-node Kafka instances support v2.7.
+
-
Table 1 TPS and the maximum number of partitions supported by different instance specifications and I/O typesBandwidth
+Table 1 Kafka instance specifications (v1.1.0/v2.3.0/v2.7 cluster instances)Flavor
|
-I/O Type
+ | Brokers
|
-TPS (High-Throughput)
+ | Maximum TPS per Broker
|
-TPS (Synchronous Replication)
+ | Maximum Partitions per Broker
|
-Maximum Partitions
+ | Recommended Consumer Groups per Broker
+ |
+Maximum Client Connections per Broker
+ |
+Storage Space
+ |
+Traffic per Broker (MB/s)
|
-100 MB/s
+ | kafka.2u4g.cluster.small
|
-High I/O
+ | 3–30
|
-100,000
+ | 20,000
|
-60,000
+ | 100
|
-300
+ | 15
+ |
+2000
+ |
+300 GB–300,000 GB
+ |
+40
|
-Ultra-high I/O
+ | kafka.2u4g.cluster
|
-100,000
+ | 3–30
|
-80,000
+ | 30,000
|
-300
+ | 250
+ |
+20
+ |
+2000
+ |
+300 GB–300,000 GB
+ |
+100
|
-300 MB/s
+ | kafka.4u8g.cluster
|
-High I/O
+ | 3–30
|
-300,000
+ | 100,000
|
-150,000
+ | 500
|
-900
+ | 100
+ |
+4000
+ |
+300 GB–600,000 GB
+ |
+200
|
-Ultra-high I/O
+ | kafka.8u16g.cluster
|
-300,000
+ | 3–50
|
-200,000
+ | 150,000
|
-900
+ | 1000
+ |
+150
+ |
+4000
+ |
+300 GB–1,500,000 GB
+ |
+250
|
-600 MB/s
+ | kafka.12u24g.cluster
|
-Ultra-high I/O
+ | 3–50
|
-600,000
+ | 200,000
|
-300,000
+ | 1500
|
-1800
+ | 200
+ |
+4000
+ |
+300 GB–1,500,000 GB
+ |
+375
|
-1200 MB/s
+ | kafka.16u32g.cluster
|
-Ultra-high I/O
+ | 3–50
|
-1,200,000
+ | 250,000
|
-400,000
+ | 2000
|
-1800
+ | 200
+ |
+4000
+ |
+300 GB–1,500,000 GB
+ |
+500
|
- For Kafka instances, the number of transactions per second (TPS) is the maximum number of messages that can be written per second. The preceding TPS is calculated with each message being 1 KB.
-
+
+Table 2 Kafka instance specifications (v3.x cluster instances)Flavor
+ |
+Brokers
+ |
+TPS per Broker
+ |
+Maximum Partitions per Broker
+ |
+Recommended Consumer Groups per Broker
+ |
+Maximum Client Connections per Broker
+ |
+Storage Space
+ |
+Traffic per Broker (MB/s)
+ |
+
+
+kafka.2u4g.cluster.beta
+ |
+3
+ |
+30,000
+ |
+250
+ |
+20
+ |
+2000
+ |
+300 GB
+ |
+100
+ |
+
+kafka.4u8g.cluster.beta
+ |
+3
+ |
+100,000
+ |
+500
+ |
+100
+ |
+4000
+ |
+300 GB
+ |
+200
+ |
+
+
+
-Bandwidth SelectionThe bandwidth of a Kafka instance refers to the maximum read or write bandwidth. You are advised to select a bandwidth 30% higher than what is required.
- - 100 MB/s
Recommended for up to 3000 client connections, 60 consumer groups, and 70 MB/s service traffic.
- - 300 MB/s
Recommended for up to 10,000 client connections, 300 consumer groups, and 210 MB/s service traffic.
- - 600 MB/s
Recommended for up to 20,000 client connections, 600 consumer groups, and 420 MB/s service traffic.
- - 1200 MB/s
Recommended for up to 20,000 client connections, 600 consumer groups, and 840 MB/s service traffic.
+
+Table 3 Kafka instance specifications (single-node)Flavor
+ |
+Brokers
+ |
+TPS per Broker
+ |
+Maximum Partitions per Broker
+ |
+Recommended Consumer Groups per Broker
+ |
+Maximum Client Connections per Broker
+ |
+Storage Space
+ |
+Traffic per Broker (MB/s)
+ |
+
+
+kafka.2u4g.single.small
+ |
+1
+ |
+20,000
+ |
+100
+ |
+15
+ |
+2000
+ |
+100 GB–10,000 GB
+ |
+40
+ |
+
+kafka.2u4g.single
+ |
+1
+ |
+30,000
+ |
+250
+ |
+20
+ |
+2000
+ |
+100 GB–10,000 GB
+ |
+100
+ |
+
+
+
+
+
+Instance Specifications and Network BandwidthThe network bandwidth of a Kafka instance consists of the following:
+ - Network bandwidth used by the instance brokers
- Bandwidth of the disk used by the instance brokers. For details, see Disk Types and Performance.
+ Note:
+ - By default, Kafka tests are performed in the tail read scenario (that is, only the latest production data is consumed) instead of the cold read scenario (that is, historical data is consumed from the beginning).
- The bandwidth of an instance with an old flavor (such as 100 MB/s) is the total network bandwidth of the instance's all brokers.
+ Traffic calculation of instances with new flavors (such as kafka.2u4g.cluster) is described as follows:
+ - The read/write ratio is 1:1.
- The default number of topic replicas is 3.
- Total network traffic = Traffic per broker x Broker quantity
- Total instance traffic = Service traffic + Data replication traffic between brokers
+ Assume that the current flavor is kafka.2u4g.cluster, the traffic per broker is 100 MB/s, and the number of brokers is 3. What are the total network traffic, maximum read traffic, and maximum write traffic of the instance?
+ - Total network traffic = Traffic per broker x Broker quantity = 100 MB/s x 3 = 300 MB/s
- Maximum read traffic = Total instance network traffic/Default number of replicas/2 = 300 MB/s/3/2= 50 MB/s
- Maximum write traffic = Total instance network traffic/Default number of replicas/2 = 300 MB/s/3/2 = 50 MB/s
+
+Mapping Between Old and New FlavorsTable 4 compares the old and new Kafka instance flavors.
+
+ Table 4 Mapping between old and new Kafka instance flavorsOld Flavor
+ |
+New Flavor
+ |
+
+
+Flavor
+ |
+Total Instance Network Traffic
+ |
+Flavor
+ |
+Total Instance Network Traffic
+ |
+
+100 MB/s
+ |
+100 MB/s
+ |
+kafka.2u4g.cluster.small * 3
+ |
+120 MB/s
+ |
+
+300 MB/s
+ |
+300 MB/s
+ |
+kafka.2u4g.cluster * 3
+ |
+300 MB/s
+ |
+
+600 MB/s
+ |
+600 MB/s
+ |
+kafka.4u8g.cluster * 3
+ |
+600 MB/s
+ |
+
+1200 MB/s
+ |
+1200 MB/s
+ |
+kafka.4u8g.cluster * 6
+ |
+1250 MB/s
+ |
+
+
+
+
+ Instances with new flavors have the following features:
+ - Better performance and cost effectiveness: They use exclusive resources (except for kafka.2u4g.cluster.small). By contrast, old flavors use non-exclusive resources. If the load is heavy, resources conflicts will occur.
- Latest functions, for example, reassigning partitions and changing the SSL setting.
- Flexible flavor changes: For example, you can increase the broker flavor.
- Flexible disk capacity: Only related to the broker quantity, and not to the flavor.
- More specification options: A wider range of combinations of broker flavor (over 10,000 MB/s) and quantity are available.
+
+Flavor Selection- kafka.2u4g.cluster.small with 3 brokers
Recommended for up to 6000 client connections, 45 consumer groups, and 60,000 TPS
+ - kafka.2u4g.cluster with 3 brokers
Recommended for up to 6000 client connections, 60 consumer groups, and 90,000 TPS
+ - kafka.4u8g.cluster with 3 brokers
Recommended for up to 12,000 client connections, 300 consumer groups, and 300,000 TPS
+ - kafka.8u16g.cluster with 3 brokers
Recommended for up to 12,000 client connections, 450 consumer groups, and 450,000 TPS
+ - kafka.12u24g.cluster with 3 brokers
Recommended for up to 12,000 client connections, 600 consumer groups, and 600,000 TPS
+ - kafka.16u32g.cluster with 3 brokers
Recommended for up to 12,000 client connections, 600 consumer groups, and 750,000 TPS
-Storage Space SelectionKafka instances support storage with 1 to 3 replicas. The storage space is consumed by all replicas. When creating an instance, specify its storage space based on the expected service message size and the number of replicas.
- For example, if the estimated message size is 100 GB, the disk capacity must be at least: 100 GB x Number of replicas + 100 GB (reserved space).
+ Storage Space SelectionKafka instances can store messages in multiple replicas. The storage space is consumed by message replicas, logs, and metadata. When creating an instance, specify its storage space based on the expected service message size, the number of replicas, and reserved disk space. Each Kafka broker reserves 33 GB disk space for storing logs and metadata.
+ For example, if the expected service message size is 100 GB, the number of replicas is 2, and the number of brokers is 3, the disk size should be at least 299 GB (100 GB x 2 + 33 GB x 3).
+ The storage space can be expanded as your service grows.
Topic QuantityThere are limits on the topic quantity and the aggregate number of partitions in the topics. When the partition quantity limit is reached, you can no longer create topics.
- The number of topics is related to the maximum number of partitions allowed and the specified number of partitions in each topic (see Table 1).
- The maximum number of partitions for a 100 MB/s instance is 300.
- - If the number of partitions of each topic in the instance is 3, the maximum number of topics is 300/3 = 100.
- If the number of partitions of each topic in the instance is 1, the maximum number of topics is 300/1 = 300.
+ The number of topics is related to the maximum number of partitions allowed (see Figure 1) and the specified number of partitions in each topic (see Table 1 and Table 3).
+ Figure 1 Setting the number of partitions
+ The maximum number of partitions allowed for an instance with kafka.2u4g.cluster and 3 brokers is 750.
+ - If the number of partitions of each topic in the instance is 3, the maximum number of topics is 750/3 = 250.
- If the number of partitions of each topic in the instance is 1, the maximum number of topics is 750/1 = 750.
diff --git a/docs/dms/umn/ProductDescPrivilegeManagement.html b/docs/dms/umn/ProductDescPrivilegeManagement.html
new file mode 100644
index 00000000..51728a61
--- /dev/null
+++ b/docs/dms/umn/ProductDescPrivilegeManagement.html
@@ -0,0 +1,270 @@
+
+
+ Permission
+ You can use Identity and Access Management (IAM) to manage DMS for Kafka permissions and control access to your resources. IAM provides identity authentication, permissions management, and access control.
+ You can create IAM users for your employees, and assign permissions to these users on a principle of least privilege (PoLP) basis to control their access to specific resource types. For example, you can create IAM users for software developers and assign specific permissions to allow them to use Kafka instance resources but prevent them from being able to delete resources or perform any high-risk operations.
+ If your account does not require individual IAM users for permissions management, skip this section.
+ IAM is a free service. You only pay for the resources in your account.
+ For more information, see IAM Service Overview.
+ DMS PermissionsBy default, new IAM users do not have any permissions assigned. To assign permissions to these new users, add them to one or more groups, and attach permissions policies or roles to these groups.
+ DMS is a project-level service deployed and accessed in specific physical regions. When assigning DMS for Kafka permissions to a user group, specify region-specific projects where the permissions will take effect. If you select All projects, the permissions will be granted for all region-specific projects. When accessing DMS, the users need to switch to a region where they have been authorized to use this service.
+ You can grant permissions by using roles and policies. - Roles: A type of coarse-grained authorization mechanism that provides only a limited number of service-level roles. When using roles to grant permissions, you also need to assign dependency roles. However, roles are not an ideal choice for fine-grained authorization and secure access control.
- Policies: A fine-grained authorization strategy that defines permissions required to perform operations on specific cloud resources under certain conditions. This mechanism allows for more flexible policy-based authorization for more secure access control. For example, you can grant DMS for Kafka users only the permissions for managing instances. Most policies define permissions based on APIs. For the API actions supported by DMS for Kafka, see "Permissions Policies and Supported Actions" in the Distributed Message Service API Reference.
+
+ Table 1 lists all the system-defined policies supported by DMS for Kafka.
+
+ Table 1 System-defined policies supported by DMS for KafkaRole/Policy Name
+ |
+Description
+ |
+Type
+ |
+Dependency
+ |
+
+
+DMS FullAccess
+ |
+Administrator permissions for DMS. Users granted these permissions can perform all operations on DMS.
+ |
+System-defined policy
+ |
+None
+ |
+
+DMS UserAccess
+ |
+Common user permissions for DMS, excluding permissions for creating, modifying, deleting, and scaling up instances.
+ |
+System-defined policy
+ |
+None
+ |
+
+DMS ReadOnlyAccess
+ |
+Read-only permissions for DMS. Users granted these permissions can only view DMS data.
+ |
+System-defined policy
+ |
+None
+ |
+
+DMS VPCAccess
+ |
+VPC operation permissions to assign to DMS agencies.
+ |
+System-defined policy
+ |
+None
+ |
+
+DMS KMSAccess
+ |
+KMS operation permissions to assign to DMS agencies.
+ |
+System-defined policy
+ |
+None
+ |
+
+
+
+
+ System-defined policies contain OBS actions. Due to data caching, the policies take effect five minutes after they are attached to a user, user group, or enterprise project.
+
+ Table 2 lists the common operations supported by each DMS for Kafka system policy. Select the policies as required.
+
+ Table 2 Common operations supported by each system-defined policy of DMS for KafkaOperation
+ |
+DMS FullAccess
+ |
+DMS UserAccess
+ |
+DMS ReadOnlyAccess
+ |
+DMS VPCAccess
+ |
+DMS KMSAccess
+ |
+
+
+Creating instances
+ |
+√
+ |
+×
+ |
+×
+ |
+×
+ |
+×
+ |
+
+Modifying instances
+ |
+√
+ |
+×
+ |
+×
+ |
+×
+ |
+×
+ |
+
+Deleting instances
+ |
+√
+ |
+×
+ |
+×
+ |
+×
+ |
+×
+ |
+
+Modifying instance specifications
+ |
+√
+ |
+×
+ |
+×
+ |
+×
+ |
+×
+ |
+
+Restarting instances
+ |
+√
+ |
+√
+ |
+×
+ |
+×
+ |
+×
+ |
+
+Querying instance information
+ |
+√
+ |
+√
+ |
+√
+ |
+×
+ |
+×
+ |
+
+
+
+
+
+ Fine-grained AuthorizationTo use a custom fine-grained policy, log in to the IAM console as an administrator and select the desired fine-grained permissions for DMS. Table 3 describes fine-grained permission dependencies of DMS for Kafka.
+
+ Table 3 Fine-grained permission dependencies of DMS for KafkaPermission
+ |
+Description
+ |
+Dependency
+ |
+
+
+dms:instance:list
+ |
+Viewing the instance list
+ |
+None
+ |
+
+dms:instance:get
+ |
+Viewing instance details
+ |
+None
+ |
+
+dms:instance:create
+ |
+Creating an instance
+ |
+- vpc:vpcs:get
- vpc:ports:create
- vpc:securityGroups:get
- vpc:ports:get
- vpc:subnets:get
- vpc:vpcs:list
- vpc:publicIps:get
- vpc:publicIps:list
- vpc:ports:update
- vpc:publicIps:update
- vpc:ports:delete
- kms:cmk:list
+ |
+
+dms:instance:getBackgroundTask
+ |
+Viewing background task details
+ |
+None
+ |
+
+dms:instance:deleteBackgroundTask
+ |
+Deleting a background task
+ |
+None
+ |
+
+dms:instance:modifyStatus
+ |
+Restarting an instance
+ |
+None
+ |
+
+dms:instance:resetAuthInfo
+ |
+Resetting an instance password
+ |
+None
+ |
+
+dms:instance:modifyAuthInfo
+ |
+Changing an instance password
+ |
+None
+ |
+
+dms:instance:modify
+ |
+Modifying an instance
+ |
+- vpc:vpcs:get
- vpc:ports:create
- vpc:securityGroups:get
- vpc:ports:get
- vpc:subnets:get
- vpc:vpcs:list
- vpc:publicIps:get
- vpc:publicIps:list
- vpc:ports:update
- vpc:publicIps:update
- vpc:ports:delete
+ |
+
+dms:instance:scale
+ |
+Scaling up an instance
+ |
+- vpc:vpcs:get
- vpc:ports:create
- vpc:securityGroups:get
- vpc:ports:get
- vpc:subnets:get
- vpc:vpcs:list
- vpc:publicIps:get
- vpc:publicIps:list
- vpc:ports:update
- vpc:publicIps:update
+ |
+
+dms:instance:delete
+ |
+Deleting an instance
+ |
+None
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/dms/umn/TagManagement.html b/docs/dms/umn/TagManagement.html
index 47a5b302..d3721fa6 100644
--- a/docs/dms/umn/TagManagement.html
+++ b/docs/dms/umn/TagManagement.html
@@ -1,8 +1,8 @@
- Managing Instance Tags
+ Configuring Kafka Instance Tags
Tags facilitate Kafka instance identification and management.
- You can add tags to a Kafka instance when creating the instance or add tags on the Tags tab page of the created instance. Up to 20 tags can be added to an instance. Tags can be modified and deleted.
+ You can add tags to a Kafka instance when creating the instance or add tags on the Tags tab page of the created instance. Up to 20 tags can be added to an instance. Tags can be deleted.
A tag consists of a tag key and a tag value. Table 1 lists the tag key and value requirements.
Table 1 Tag key and value requirementsParameter
@@ -13,25 +13,22 @@
|
Tag key
|
-- Cannot be left blank.
- Must be unique for the same instance.
- Can contain a maximum of 36 characters.
- Cannot contain the following characters: =*<>\,|/
- Cannot start or end with a space.
+ | - Cannot be left blank.
- Must be unique for the same instance.
- Can contain 1 to 128 characters.
- Can contain letters, digits, spaces, and special characters _.:=+-@ : = + - @
- Cannot start or end with a space.
|
Tag value
|
-- Cannot be left blank.
- Can contain a maximum of 43 characters.
- Cannot contain the following characters: =*<>\,|/
- Cannot start or end with a space.
+ | - Can contain 0 to 255 characters.
- Can contain letters, digits, spaces, and special characters _.:=+-@ : = + - @
- Cannot start or end with a space.
|
- Procedure- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
-
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the name of an instance.
- Click the Tags tab.
View the tags of the instance.
- - Perform the following operations as required:
- Add a tag
- Click Add/Edit Tag.
- Enter a tag key and a tag value, and click Add.
If you have predefined tags, select a predefined pair of tag key and value, and click Add.
+Configuring Kafka Instance Tags- Log in to the console.
- Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the name of an instance.
- In the navigation pane on the left, choose Tags.
View the tags of the instance.
+ - Perform the following operations as required:
diff --git a/docs/dms/umn/UserPrivilegeManagement.html b/docs/dms/umn/UserPrivilegeManagement.html
new file mode 100644
index 00000000..aa4306e6
--- /dev/null
+++ b/docs/dms/umn/UserPrivilegeManagement.html
@@ -0,0 +1,12 @@
+
+
+Permission Management
+
+
+
diff --git a/docs/dms/umn/dms-ug-0312114.html b/docs/dms/umn/dms-ug-0312114.html
index 0fd511c5..c1735fbd 100644
--- a/docs/dms/umn/dms-ug-0312114.html
+++ b/docs/dms/umn/dms-ug-0312114.html
@@ -3,5 +3,4 @@
Glossary
-
diff --git a/docs/dms/umn/dms-ug-180413002.html b/docs/dms/umn/dms-ug-180413002.html
deleted file mode 100644
index 8617a7d6..00000000
--- a/docs/dms/umn/dms-ug-180413002.html
+++ /dev/null
@@ -1,656 +0,0 @@
-
-
-Kafka Metrics
-IntroductionThis section describes DMS metrics reported to Cloud Eye as well as their namespace and dimensions. You can use the Cloud Eye console to query the Kafka metrics and alarms.
-
-
- Instance Metrics
- Table 1 Instance metricsMetric ID
- |
-Metric Name
- |
-Description
- |
-Value Range
- |
-Monitored Object
- |
-Monitoring Period (Raw Data)
- |
-
-
-current_partitions
- |
-Partitions
- |
-Number of used partitions in the instance
-Unit: count
- |
-0~1800
- |
-Kafka instance
- |
-1 minute
- |
-
-current_topics
- |
-Topics
- |
-Number of created topics in the instance
-Unit: count
- |
-0–600
- |
-Kafka instance
- |
-1 minute
- |
-
-group_msgs
- |
-Accumulated Messages
- |
-Total number of accumulated messages in all consumer groups of the instance
-Unit: count
- |
-0–1,000,000,000
- |
-Kafka instance
- |
-1 minute
- |
-
-
-
-
-
- Broker Metrics
- Table 2 Broker metricsMetric ID
- |
-Metric Name
- |
-Description
- |
-Value Range
- |
-Monitored Object
- |
-Monitoring Period (Raw Data)
- |
-
-
-broker_data_size
- |
-Message Size
- |
-Total size of messages in the broker
-Unit: byte, KB, MB, GB, TB or PB
- |
-0–5,000,000,000,000
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_messages_in_rate
- |
-Message Creation Rate
- |
-Number of messages created per second
-Unit: count/s
- |
-0–500,000
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_bytes_out_rate
- |
-Message Retrieval
- |
-Number of bytes retrieved per second
-Unit: byte/s, KB/s, MB/s, or GB/s
- |
-0–500,000,000
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_bytes_in_rate
- |
-Message Creation
- |
-Number of bytes created per second
-Unit: byte/s, KB/s, MB/s, or GB/s
- |
-0–500,000,000
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_public_bytes_in_rate
- |
-Public Inbound Traffic
- |
-Inbound traffic over public networks per second
-Unit: byte/s, KB/s, MB/s, or GB/s
- NOTE: You can view this metric on the EIP console if public access has been enabled and EIPs have been assigned to the instance.
-
- |
-0–500,000,000
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_public_bytes_out_rate
- |
-Public Outbound Traffic
- |
-Outbound traffic over public networks per second
-Unit: byte/s, KB/s, MB/s, or GB/s
- NOTE: You can view this metric on the EIP console if public access has been enabled and EIPs have been assigned to the instance.
-
- |
-0–500,000,000
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_fetch_mean
- |
-Average Message Retrieval Processing Duration
- |
-Average time that the broker spends processing message retrieval requests
-Unit: ms
- |
-0–10,000
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_produce_mean
- |
-Average Message Creation Processing Duration
- |
-Average time that the broker spends processing message creation requests
-Unit: ms
- |
-0–10,000
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_cpu_core_load
- |
-Average Load per CPU Core
- |
-Average load of each CPU core of the Kafka VM
-Unit: %
- |
-0–20
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_disk_usage
- |
-Disk Capacity Usage
- |
-Disk usage of the Kafka VM
-Unit: %
- |
-0–100
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_memory_usage
- |
-Memory Usage
- |
-Memory usage of the Kafka VM
-Unit: %
- |
-0–100
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_heap_usage
- |
-JVM Heap Memory Usage of Kafka
- |
-Heap memory usage of the Kafka JVM
-Unit: %
- |
-0–100
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_alive
- |
-Broker Alive
- |
-Whether the Kafka broker is alive
- |
-1: alive
-0: not alive
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_connections
- |
-Connections
- |
-Total number of TCP connections on the Kafka broker
-Unit: count
- |
-> 0
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_cpu_usage
- |
-CPU Usage
- |
-CPU usage of the Kafka VM
-Unit: %
- |
-0–100
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_disk_read_await
- |
-Average Disk Read Time
- |
-Average time for each disk I/O read in the monitoring period
-Unit: ms
- |
-> 0
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_disk_write_await
- |
-Average Disk Write Time
- |
-Average time for each disk I/O write in the monitoring period
-Unit: ms
- |
-> 0
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_total_bytes_in_rate
- |
-Inbound Traffic
- |
-Inbound traffic per second
-Unit: byte/s
- |
-> 0
- |
-Kafka instance broker
- |
-1 minute
- |
-
-broker_total_bytes_out_rate
- |
-Outbound Traffic
- |
-Outbound traffic per second
-Unit: byte/s
- |
-> 0
- |
-Kafka instance broker
- |
-1 minute
- |
-
-
-
-
-
- Topic Metrics
- Table 3 Topic metricsMetric ID
- |
-Metric Name
- |
-Description
- |
-Value Range
- |
-Monitored Object
- |
-Monitoring Period (Raw Data)
- |
-
-
-topic_bytes_in_rate
- |
-Message Creation
- |
-Number of bytes created per second
-Unit: byte/s, KB/s, MB/s, or GB/s
- NOTE: This metric is available only when Scope is set to Basic monitoring on the Queues tab page.
-
- |
-0–500,000,000
- |
-Topic in a Kafka instance
- |
-1 minute
- |
-
-topic_bytes_out_rate
- |
-Message Retrieval
- |
-Number of bytes retrieved per second
-Unit: byte/s, KB/s, MB/s, or GB/s
- NOTE: This metric is available only when Scope is set to Basic monitoring on the Queues tab page.
-
- |
-0–500,000,000
- |
-Topic in a Kafka instance
- |
-1 minute
- |
-
-topic_data_size
- |
-Message Size
- |
-Total size of messages in the queue
-Unit: byte, KB, MB, GB, TB or PB
- NOTE: This metric is available only when Scope is set to Basic monitoring on the Queues tab page.
-
- |
-0–5,000,000,000,000
- |
-Topic in a Kafka instance
- |
-1 minute
- |
-
-topic_messages
- |
-Total Messages
- |
-Total number of messages in the queue
-Unit: count
- NOTE: This metric is available only when Scope is set to Basic monitoring on the Queues tab page.
-
- |
-≥ 0
- |
-Topic in a Kafka instance
- |
-1 minute
- |
-
-topic_messages_in_rate
- |
-Message Creation Rate
- |
-Number of messages created per second
-Unit: count/s
- NOTE: This metric is available only when Scope is set to Basic monitoring on the Queues tab page.
-
- |
-0–500,000
- |
-Topic in a Kafka instance
- |
-1 minute
- |
-
-partition_messages
- |
-Partition Messages
- |
-Total number of messages in the partition
-Unit: count
- NOTE: This metric is available only when Scope is set to Partition monitoring on the Queues tab page.
-
- |
-≥ 0
- |
-Topic in a Kafka instance
- |
-1 minute
- |
-
-produced_messages
- |
-Created Messages
- |
-Number of messages that have been created
-Unit: count
- NOTE: This metric is available only when Scope is set to Partition monitoring on the Queues tab page.
-
- |
-≥ 0
- |
-Topic in a Kafka instance
- |
-1 minute
- |
-
-
-
-
-
- Consumer Group Metrics
- Table 4 Consumer group metricsMetric ID
- |
-Metric Name
- |
-Description
- |
-Value Range
- |
-Monitored Object
- |
-Monitoring Period (Raw Data)
- |
-
-
-messages_consumed
- |
-Retrieved Messages
- |
-Number of messages that have been retrieved in the consumer group
-Unit: count
- NOTE: This metric is available only when Queue is set to a specified topic name and Monitoring Type is set to Partition monitoring on the By Consumer Group tab page.
-
- |
-≥ 0
- |
-Consumer group of a Kafka instance
- |
-1 minute
- |
-
-messages_remained
- |
-Available Messages
- |
-Number of messages that can be retrieved in the consumer group
-Unit: count
- NOTE: This metric is available only when Queue is set to a specified topic name and Monitoring Type is set to Partition monitoring on the By Consumer Group tab page.
-
- |
-≥ 0
- |
-Consumer group of a Kafka instance
- |
-1 minute
- |
-
-topic_messages_remained
- |
-Topic Available Messages
- |
-Number of remaining messages that can be retrieved from the specified topic in the consumer group
-Unit: Count
- NOTE: This metric is available only when Queue is set to a specified topic name and Monitoring Type is set to Basic monitoring on the By Consumer Group tab page.
-
- |
-0 to 263–1
- |
-Consumer group of a Kafka instance
- |
-1 minute
- |
-
-topic_messages_consumed
- |
-Topic Retrieved Messages
- |
-Number of messages that have been retrieved from the specified topic in the consumer group
-Unit: Count
- NOTE: This metric is available only when Queue is set to a specified topic name and Monitoring Type is set to Basic monitoring on the By Consumer Group tab page.
-
- |
-0 to 263–1
- |
-Consumer group of a Kafka instance
- |
-1 minute
- |
-
-consumer_messages_remained
- |
-Consumer Available Messages
- |
-Number of remaining messages that can be retrieved in the consumer group
-Unit: Count
- NOTE: This metric is available only when Queue is set to All queues on the By Consumer Group tab page.
-
- |
-0 to 263–1
- |
-Consumer group of a Kafka instance
- |
-1 minute
- |
-
-consumer_messages_consumed
- |
-Consumer Retrieved Messages
- |
-Number of messages that have been retrieved in the consumer group
-Unit: Count
- NOTE: This metric is available only when Queue is set to All queues on the By Consumer Group tab page.
-
- |
-0 to 263–1
- |
-Consumer group of a Kafka instance
- |
-1 minute
- |
-
-
-
-
-
- Dimension
- Key
- |
-Value
- |
-
-
-kafka_instance_id
- |
-Kafka instance
- |
-
-kafka_broker
- |
-Kafka instance broker
- |
-
-kafka_topics
- |
-Topic in a Kafka instance
- |
-
-kafka_partitions
- |
-Partition in a Kafka instance
- |
-
-kafka_groups-partitions
- |
-Partition consumer group in a Kafka instance
- |
-
-kafka_groups_topics
- |
-Topic consumer group in a Kafka instance
- |
-
-kafka_groups
- |
-Consumer group of a Kafka instance
- |
-
-
-
-
-
-
-
-
diff --git a/docs/dms/umn/dms-ug-180604013.html b/docs/dms/umn/dms-ug-180604013.html
deleted file mode 100644
index 9a572d7b..00000000
--- a/docs/dms/umn/dms-ug-180604013.html
+++ /dev/null
@@ -1,45 +0,0 @@
-
-
-Creating an Instance
-ScenarioKafka instances are physically isolated and exclusively occupied by each tenant. You can customize the computing capabilities and storage space of an instance based on service requirements.
-
- Before You Start- Before creating a Kafka instance, ensure that a VPC configured with security groups and subnets is available.
- (Optional) If you want to access a Kafka instance over a public network, prepare an elastic IP address (EIP) in advance.
- (Optional) If you need to encrypt the disk, prepare a KMS key in advance.
-
- Procedure- Log in to the management console.
- Click
in the upper left corner to select a region. Select the same region as your application service.
-
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click Create Instance in the upper right corner of the page.
By default, you can create a maximum of 100 Kafka instances for each project. To create more instances, contact customer service to increase your quota.
- - Specify Region, Project, and AZ.
- Enter an instance name.
- Configure the following instance parameters:
- Version: Kafka v1.1.0, v2.3.0, and v2.7 are supported. v2.7 is recommended. The version cannot be changed once the instance is created.
- CPU Architecture: The x86 architecture is supported.
- Flavor: Select a bandwidth based on the estimated service traffic.
You can view the broker quantity and flavor, the maximum number of partitions allowed, and number of consumer groups recommended for each bandwidth option.
-The Maximum Partitions parameter indicates the maximum number of partitions that can be created for a Kafka instance. If the total number of partitions of all topics exceeds this threshold, topic creation will fail.
- - Storage Space: Disk type and total disk space for storing the instance data. The disk type cannot be changed once the instance is created.
The storage space is the total space to be consumed by all replicas. Specify the storage space based on the expected service message size and the number of replicas. For example, if the required disk size to store the data for the retention period is 100 GB, the disk capacity must be at least: 100 GB x Number of replicas + 100 GB (reserved space).
-Disks are formatted when an instance is created. As a result, the actual available disk space is 93% to 95% of the total disk space.
-- 100 MB/s bandwidth: The value range of Storage Space is 600–90,000 GB.
- 300 MB/s bandwidth: The value range of Storage Space is 1200–90,000 GB.
- 600 MB/s bandwidth: The value range of Storage Space is 2400–90,000 GB.
- 1200 MB/s bandwidth: The value range of Storage Space is 4800–90,000 GB.
- - High I/O + 100 MB/s bandwidth: If the average message size is 1 KB, the transactions per second (TPS) can reach 100,000 in high throughput scenarios and 60,000 in synchronous replication scenarios.
- High I/O + 300 MB/s bandwidth: If the average message size is 1 KB, the TPS can reach 300,000 in high throughput scenarios and 150,000 in synchronous replication scenarios.
- Ultra-high I/O + 100 MB/s bandwidth: If the average message size is 1 KB, the TPS can reach 100,000 in high throughput scenarios and 80,000 in synchronous replication scenarios.
- Ultra-high I/O + 300 MB/s bandwidth: If the average message size is 1 KB, the TPS can reach 300,000 in high throughput scenarios and 200,000 in synchronous replication scenarios.
- Ultra-high I/O + 600 MB/s bandwidth: If the average message size is 1 KB, the TPS can reach 600,000 in high throughput scenarios and 300,000 in synchronous replication scenarios.
- Ultra-high I/O + 1200 MB/s bandwidth: If the average message size is 1 KB, the TPS can reach 1,200,000 in high throughput scenarios and 400,000 in synchronous replication scenarios.
-
- - Disk Encryption: Specify whether to enable disk encryption. Enabling disk encryption improves data security. Disk encryption depends on Key Management Service (KMS). If you enable disk encryption, select a KMS key. This parameter cannot be modified once the instance is created.
- Capacity Threshold Policy: policy used when the disk usage reaches the threshold. The capacity threshold is 95%.
- Automatically delete: Messages can be created and retrieved, but 10% of the earliest messages will be deleted to ensure sufficient disk space. This policy is suitable for scenarios where no service interruption can be tolerated. Data may be lost.
- Stop production: New messages cannot be created, but existing messages can still be retrieved. This policy is suitable for scenarios where no data loss can be tolerated.
-
- - Configure the instance network parameters.
- Select a VPC and a subnet.
A VPC provides an isolated virtual network for your Kafka instances. You can configure and manage the network as required.
- After the Kafka instance is created, its VPC and subnet cannot be changed.
-
- - Select a security group.
A security group is a set of rules for accessing a Kafka instance. You can click Manage Security Group to view or create security groups on the network console.
-
- - Click Advanced Settings to configure more parameters.
- Configure public access.
Public access is disabled by default. You can enable or disable it as required.
-After public access is enabled, configure an IPv4 EIP for each broker.
- - Configure Kafka SASL_SSL.
This parameter indicates whether to enable SSL authentication when a client connects to the instance. If you enable Kafka SASL_SSL, data will be encrypted before transmission to enhance security.
-Kafka SASL_SSL is disabled by default. You can enable or disable it as required. This setting cannot be changed after the instance is created. If you want to use a different setting, you must create a new instance.
-If you enable Kafka SASL_SSL, you must also set the username and password for accessing the instance.
- - Configure Automatic Topic Creation.
This setting is disabled by default. You can enable or disable it as required.
-If automatic topic creation is enabled, the system automatically creates a topic when a message is created in or retrieved from a topic that does not exist. This topic has the following default settings: 3 partitions, 3 replicas, aging time 72 hours, and synchronous replication and flushing disabled.
-After you change the value of the log.retention.hours, default.replication.factor, or num.partitions parameter, automatically created topics later use the new value. For example, if num.partitions is set to 5, an automatically created topic will have the following settings: 5 partitions, 3 replicas, aging time 72 hours, and synchronous replication and flushing disabled.
- - Specify Tags.
Tags are used to identify cloud resources. When you have many cloud resources of the same type, you can use tags to classify them by dimension (for example, use, owner, or environment).
-- If you have predefined tags, select a predefined pair of tag key and value. Click View predefined tags. On the Tag Management Service (TMS) console, view predefined tags or create tags.
- You can also create new tags by specifying Tag key and Tag value.
-Up to 20 tags can be added to each Kafka instance. For details about the requirements on tags, see Managing Instance Tags.
- - Enter a description of the instance.
- - Click Create.
- Confirm the instance information, and click Submit.
- Return to the instance list and check whether the Kafka instance has been created.
It takes 3 to 15 minutes to create an instance. During this period, the instance status is Creating.
-- If the instance is created successfully, its status changes to Running.
- If the instance fails to be created, view Instance Creation Failures. Delete the instance by referring to Deleting an Instance and create another instance. If the instance creation fails again, contact customer service.
Instances that fail to be created do not occupy other resources.
-
-
-
-
-
-
-
diff --git a/docs/dms/umn/dms-ug-180604018.html b/docs/dms/umn/dms-ug-180604018.html
deleted file mode 100644
index 2d8d7932..00000000
--- a/docs/dms/umn/dms-ug-180604018.html
+++ /dev/null
@@ -1,85 +0,0 @@
-
-
-Creating a Topic
-A topic is a stream of messages. If automatic topic creation is not enabled during Kafka instance creation, you need to manually create topics for creating and retrieving messages. If automatic topic creation has been enabled for the instance, this operation is optional.
- If automatic topic creation is enabled, the system automatically creates a topic when a message is created in or retrieved from a topic that does not exist. This topic has the following default settings: 3 partitions, 3 replicas, aging time 72 hours, and synchronous replication and flushing disabled. After you change the value of the log.retention.hours, default.replication.factor, or num.partitions parameter, automatically created topics later use the new value. For example, if num.partitions is set to 5, an automatically created topic will have the following settings: 5 partitions, 3 replicas, aging time 72 hours, and synchronous replication and flushing disabled.
- There is a limit on the total number of partitions in topics. When the partition quantity limit is reached, you can no longer create topics. The total number of partitions varies with instance specifications. For details, see Specifications.
- Methods that can be used to manually create a topic:
-
- If an instance node is faulty, an internal service error may be reported when you query messages in a topic with only one replica. Therefore, you are not advised to use a topic with only one replica.
-
- Method 1: Creating a Topic on the Console- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
-
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- Click the Topics tab, and click Create Topic.
The Create Topic dialog box is displayed.
- - Specify the topic parameters listed in the following table.
- Table 1 Topic parametersParameter
- |
-Description
- |
-
-
-Topic Name
- |
-When creating a topic, you can modify the automatically generated topic name.
-Once the topic is created, you cannot modify its name.
- |
-
-Partitions
- |
-A larger number of partitions for a topic indicates more messages retrieved concurrently.
-If this parameter is set to 1, messages will be retrieved in the FIFO order.
-Value range: 1 to 100
-Default value: 3
- |
-
-Replicas
- |
-A higher number of replicas delivers higher reliability. Data is automatically backed up on each replica. When one Kafka broker becomes faulty, data is still available on other brokers.
-If this parameter is set to 1, only one set of data is available.
-Value range: 1 to 3
-Default value: 3
- NOTE: If an instance node is faulty, an internal service error may be reported when you query messages in a topic with only one replica. Therefore, you are not advised to use a topic with only one replica.
-
- |
-
-Aging Time (h)
- |
-The period that messages are retained for. Consumers must retrieve messages before this period ends. Otherwise, the messages will be deleted and can no longer be retrieved.
-Value range: 1 to 720
-Default value: 72
- |
-
-Synchronous Replication
- |
-A message is returned to the client only after the message creation request has been received and the message has been acknowledged by all replicas.
-After enabling synchronous replication, set acks to all or –1 on the client. Otherwise, this function will not take effect.
-If there is only one replica, synchronous replication cannot be enabled.
- |
-
-Synchronous Flushing
- |
-An indicator of whether a message is immediately flushed to disk once created.
-- Enabled: A message is immediately flushed to disk once it is created, resulting in higher reliability.
- Disabled: A message is stored in the memory instead of being immediately flushed to disk once created.
- |
-
-
-
-
- - Click OK.
-
- Method 2: Create a Topic by Using Kafka CLIIf your client is v2.2 or later, you can use kafka-topics.sh to create topics and manage topic parameters.
- If a topic name starts with a special character, for example, an underscore (_) or a number sign (#), monitoring data cannot be displayed.
-
-
-
-
-
-
diff --git a/docs/dms/umn/dms-ug-190128001.html b/docs/dms/umn/dms-ug-190128001.html
deleted file mode 100644
index 08828960..00000000
--- a/docs/dms/umn/dms-ug-190128001.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-Permissions
-By default, there are two types of user permissions: user management and resource management.
- - User management refers to the management of users, user groups, and user group rights.
- Resource management refers to the control operations that can be performed by users on cloud service resources.
- For further details, see Permissions.
-
-
-
diff --git a/docs/dms/umn/en-us_image_0252462634.png b/docs/dms/umn/en-us_image_0000001073623595.png
similarity index 100%
rename from docs/dms/umn/en-us_image_0252462634.png
rename to docs/dms/umn/en-us_image_0000001073623595.png
diff --git a/docs/dms/umn/en-us_image_0272312053.png b/docs/dms/umn/en-us_image_0000001073725903.png
similarity index 100%
rename from docs/dms/umn/en-us_image_0272312053.png
rename to docs/dms/umn/en-us_image_0000001073725903.png
diff --git a/docs/dms/umn/en-us_image_0000001073862086.png b/docs/dms/umn/en-us_image_0000001073862086.png
new file mode 100644
index 00000000..38b318cd
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001073862086.png differ
diff --git a/docs/dms/umn/en-us_image_0252462689.png b/docs/dms/umn/en-us_image_0000001073954006.png
similarity index 100%
rename from docs/dms/umn/en-us_image_0252462689.png
rename to docs/dms/umn/en-us_image_0000001073954006.png
diff --git a/docs/dms/umn/en-us_image_0252483830.png b/docs/dms/umn/en-us_image_0000001074272218.png
similarity index 100%
rename from docs/dms/umn/en-us_image_0252483830.png
rename to docs/dms/umn/en-us_image_0000001074272218.png
diff --git a/docs/dms/umn/en-us_image_0252462263.png b/docs/dms/umn/en-us_image_0000001074591800.png
similarity index 100%
rename from docs/dms/umn/en-us_image_0252462263.png
rename to docs/dms/umn/en-us_image_0000001074591800.png
diff --git a/docs/dms/umn/en-us_image_0000001093972624.png b/docs/dms/umn/en-us_image_0000001093972624.png
new file mode 100644
index 00000000..9f3f6ab1
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001093972624.png differ
diff --git a/docs/dms/umn/en-us_image_0000001143589128.png b/docs/dms/umn/en-us_image_0000001143589128.png
new file mode 100644
index 00000000..e29a3255
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001143589128.png differ
diff --git a/docs/dms/umn/en-us_image_0000001160616010.png b/docs/dms/umn/en-us_image_0000001160616010.png
new file mode 100644
index 00000000..478a5525
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001160616010.png differ
diff --git a/docs/dms/umn/en-us_image_0000001174310752.png b/docs/dms/umn/en-us_image_0000001174310752.png
new file mode 100644
index 00000000..b30dd575
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001174310752.png differ
diff --git a/docs/dms/umn/en-us_image_0000001191769789.png b/docs/dms/umn/en-us_image_0000001191769789.png
new file mode 100644
index 00000000..6bfaf761
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001191769789.png differ
diff --git a/docs/dms/umn/en-us_image_0000001194643828.png b/docs/dms/umn/en-us_image_0000001194643828.png
deleted file mode 100644
index adf55a9b..00000000
Binary files a/docs/dms/umn/en-us_image_0000001194643828.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001284017553.png b/docs/dms/umn/en-us_image_0000001284017553.png
new file mode 100644
index 00000000..28ef5d34
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001284017553.png differ
diff --git a/docs/dms/umn/en-us_image_0000001328313684.png b/docs/dms/umn/en-us_image_0000001328313684.png
deleted file mode 100644
index 973d6c4e..00000000
Binary files a/docs/dms/umn/en-us_image_0000001328313684.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001328633848.png b/docs/dms/umn/en-us_image_0000001328633848.png
deleted file mode 100644
index b342e033..00000000
Binary files a/docs/dms/umn/en-us_image_0000001328633848.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001328644244.png b/docs/dms/umn/en-us_image_0000001328644244.png
deleted file mode 100644
index be9039f2..00000000
Binary files a/docs/dms/umn/en-us_image_0000001328644244.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001329185932.png b/docs/dms/umn/en-us_image_0000001329185932.png
deleted file mode 100644
index c849f283..00000000
Binary files a/docs/dms/umn/en-us_image_0000001329185932.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001329793006.png b/docs/dms/umn/en-us_image_0000001329793006.png
deleted file mode 100644
index d025e16d..00000000
Binary files a/docs/dms/umn/en-us_image_0000001329793006.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001329906052.png b/docs/dms/umn/en-us_image_0000001329906052.png
deleted file mode 100644
index 7a79d223..00000000
Binary files a/docs/dms/umn/en-us_image_0000001329906052.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001330112650.png b/docs/dms/umn/en-us_image_0000001330112650.png
deleted file mode 100644
index 38189a0e..00000000
Binary files a/docs/dms/umn/en-us_image_0000001330112650.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001378919582.png b/docs/dms/umn/en-us_image_0000001378919582.png
deleted file mode 100644
index 0f21e634..00000000
Binary files a/docs/dms/umn/en-us_image_0000001378919582.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001379301802.png b/docs/dms/umn/en-us_image_0000001379301802.png
deleted file mode 100644
index 1909444d..00000000
Binary files a/docs/dms/umn/en-us_image_0000001379301802.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001379445357.png b/docs/dms/umn/en-us_image_0000001379445357.png
deleted file mode 100644
index 49a7f6e7..00000000
Binary files a/docs/dms/umn/en-us_image_0000001379445357.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001380945917.png b/docs/dms/umn/en-us_image_0000001380945917.png
deleted file mode 100644
index caf8732b..00000000
Binary files a/docs/dms/umn/en-us_image_0000001380945917.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001381108612.png b/docs/dms/umn/en-us_image_0000001381108612.png
deleted file mode 100644
index adf55a9b..00000000
Binary files a/docs/dms/umn/en-us_image_0000001381108612.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001403219302.png b/docs/dms/umn/en-us_image_0000001403219302.png
new file mode 100644
index 00000000..3c021054
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001403219302.png differ
diff --git a/docs/dms/umn/en-us_image_0000001404290946.png b/docs/dms/umn/en-us_image_0000001404290946.png
new file mode 100644
index 00000000..0447e67f
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001404290946.png differ
diff --git a/docs/dms/umn/en-us_image_0000001427521685.png b/docs/dms/umn/en-us_image_0000001427521685.png
deleted file mode 100644
index 2ad6a460..00000000
Binary files a/docs/dms/umn/en-us_image_0000001427521685.png and /dev/null differ
diff --git a/docs/dms/umn/en-us_image_0000001453201733.png b/docs/dms/umn/en-us_image_0000001453201733.png
new file mode 100644
index 00000000..9f2eb4a1
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001453201733.png differ
diff --git a/docs/dms/umn/en-us_image_0000001454518289.png b/docs/dms/umn/en-us_image_0000001454518289.png
new file mode 100644
index 00000000..994db513
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001454518289.png differ
diff --git a/docs/dms/umn/en-us_image_0000001540501562.png b/docs/dms/umn/en-us_image_0000001540501562.png
new file mode 100644
index 00000000..5a75764f
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001540501562.png differ
diff --git a/docs/dms/umn/en-us_image_0000001563854478.png b/docs/dms/umn/en-us_image_0000001563854478.png
new file mode 100644
index 00000000..569aa578
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001563854478.png differ
diff --git a/docs/dms/umn/en-us_image_0000001586445178.png b/docs/dms/umn/en-us_image_0000001586445178.png
new file mode 100644
index 00000000..bb24d490
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001586445178.png differ
diff --git a/docs/dms/umn/en-us_image_0281104603.png b/docs/dms/umn/en-us_image_0000001605213324.png
similarity index 100%
rename from docs/dms/umn/en-us_image_0281104603.png
rename to docs/dms/umn/en-us_image_0000001605213324.png
diff --git a/docs/dms/umn/en-us_image_0000001605533602.png b/docs/dms/umn/en-us_image_0000001605533602.png
new file mode 100644
index 00000000..47a03930
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001605533602.png differ
diff --git a/docs/dms/umn/en-us_image_0000001614245881.png b/docs/dms/umn/en-us_image_0000001614245881.png
new file mode 100644
index 00000000..b2d09077
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001614245881.png differ
diff --git a/docs/dms/umn/en-us_image_0000001614425289.png b/docs/dms/umn/en-us_image_0000001614425289.png
new file mode 100644
index 00000000..3786480d
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001614425289.png differ
diff --git a/docs/dms/umn/en-us_image_0143920315.png b/docs/dms/umn/en-us_image_0000001654533309.png
similarity index 100%
rename from docs/dms/umn/en-us_image_0143920315.png
rename to docs/dms/umn/en-us_image_0000001654533309.png
diff --git a/docs/dms/umn/en-us_image_0000001654716901.png b/docs/dms/umn/en-us_image_0000001654716901.png
new file mode 100644
index 00000000..c8df48a5
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001654716901.png differ
diff --git a/docs/dms/umn/en-us_image_0000001655076581.png b/docs/dms/umn/en-us_image_0000001655076581.png
new file mode 100644
index 00000000..c8df48a5
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001655076581.png differ
diff --git a/docs/dms/umn/en-us_image_0000001655285129.png b/docs/dms/umn/en-us_image_0000001655285129.png
new file mode 100644
index 00000000..18738052
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001655285129.png differ
diff --git a/docs/dms/umn/en-us_image_0000001707049736.png b/docs/dms/umn/en-us_image_0000001707049736.png
new file mode 100644
index 00000000..5a75764f
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001707049736.png differ
diff --git a/docs/dms/umn/en-us_image_0000001755301270.png b/docs/dms/umn/en-us_image_0000001755301270.png
new file mode 100644
index 00000000..ca70c5b8
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001755301270.png differ
diff --git a/docs/dms/umn/en-us_image_0000001756206030.png b/docs/dms/umn/en-us_image_0000001756206030.png
new file mode 100644
index 00000000..ab5ed366
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001756206030.png differ
diff --git a/docs/dms/umn/en-us_image_0000001756349630.png b/docs/dms/umn/en-us_image_0000001756349630.png
new file mode 100644
index 00000000..97724523
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001756349630.png differ
diff --git a/docs/dms/umn/en-us_image_0000001756356494.png b/docs/dms/umn/en-us_image_0000001756356494.png
new file mode 100644
index 00000000..a6334ddb
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001756356494.png differ
diff --git a/docs/dms/umn/en-us_image_0000001756372046.png b/docs/dms/umn/en-us_image_0000001756372046.png
new file mode 100644
index 00000000..9f534605
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001756372046.png differ
diff --git a/docs/dms/umn/en-us_image_0000001756508438.png b/docs/dms/umn/en-us_image_0000001756508438.png
new file mode 100644
index 00000000..9b57ac1a
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001756508438.png differ
diff --git a/docs/dms/umn/en-us_image_0000001756853218.png b/docs/dms/umn/en-us_image_0000001756853218.png
new file mode 100644
index 00000000..8d8384e7
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001756853218.png differ
diff --git a/docs/dms/umn/en-us_image_0000001757003050.png b/docs/dms/umn/en-us_image_0000001757003050.png
new file mode 100644
index 00000000..08787cad
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001757003050.png differ
diff --git a/docs/dms/umn/en-us_image_0000001757257336.png b/docs/dms/umn/en-us_image_0000001757257336.png
new file mode 100644
index 00000000..9c97fa16
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001757257336.png differ
diff --git a/docs/dms/umn/en-us_image_0000001781630389.png b/docs/dms/umn/en-us_image_0000001781630389.png
new file mode 100644
index 00000000..39d1583e
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001781630389.png differ
diff --git a/docs/dms/umn/en-us_image_0000001803290001.png b/docs/dms/umn/en-us_image_0000001803290001.png
new file mode 100644
index 00000000..bee06805
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001803290001.png differ
diff --git a/docs/dms/umn/en-us_image_0000001803492553.png b/docs/dms/umn/en-us_image_0000001803492553.png
new file mode 100644
index 00000000..c8fb414a
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001803492553.png differ
diff --git a/docs/dms/umn/en-us_image_0000001803507917.png b/docs/dms/umn/en-us_image_0000001803507917.png
new file mode 100644
index 00000000..03b78ce3
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001803507917.png differ
diff --git a/docs/dms/umn/en-us_image_0000001803832641.png b/docs/dms/umn/en-us_image_0000001803832641.png
new file mode 100644
index 00000000..43e7dbc4
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001803832641.png differ
diff --git a/docs/dms/umn/en-us_image_0000001803837729.png b/docs/dms/umn/en-us_image_0000001803837729.png
new file mode 100644
index 00000000..199ee1bf
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001803837729.png differ
diff --git a/docs/dms/umn/en-us_image_0000001803846097.png b/docs/dms/umn/en-us_image_0000001803846097.png
new file mode 100644
index 00000000..45004ea6
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001803846097.png differ
diff --git a/docs/dms/umn/en-us_image_0000001803876329.png b/docs/dms/umn/en-us_image_0000001803876329.png
new file mode 100644
index 00000000..ca841d55
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001803876329.png differ
diff --git a/docs/dms/umn/en-us_image_0000001803937277.png b/docs/dms/umn/en-us_image_0000001803937277.png
new file mode 100644
index 00000000..124dccb8
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001803937277.png differ
diff --git a/docs/dms/umn/en-us_image_0000001917432480.png b/docs/dms/umn/en-us_image_0000001917432480.png
new file mode 100644
index 00000000..248a1e20
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001917432480.png differ
diff --git a/docs/dms/umn/en-us_image_0000001921463342.png b/docs/dms/umn/en-us_image_0000001921463342.png
new file mode 100644
index 00000000..7408cf8e
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001921463342.png differ
diff --git a/docs/dms/umn/en-us_image_0000001926137265.png b/docs/dms/umn/en-us_image_0000001926137265.png
new file mode 100644
index 00000000..b7e5d43f
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001926137265.png differ
diff --git a/docs/dms/umn/en-us_image_0000001927807598.png b/docs/dms/umn/en-us_image_0000001927807598.png
new file mode 100644
index 00000000..511c0429
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001927807598.png differ
diff --git a/docs/dms/umn/en-us_image_0000001940775828.png b/docs/dms/umn/en-us_image_0000001940775828.png
new file mode 100644
index 00000000..98ca9793
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001940775828.png differ
diff --git a/docs/dms/umn/en-us_image_0000001940935336.png b/docs/dms/umn/en-us_image_0000001940935336.png
new file mode 100644
index 00000000..3d2b617f
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001940935336.png differ
diff --git a/docs/dms/umn/en-us_image_0000001968058225.png b/docs/dms/umn/en-us_image_0000001968058225.png
new file mode 100644
index 00000000..a2d02226
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001968058225.png differ
diff --git a/docs/dms/umn/en-us_image_0000001968060361.png b/docs/dms/umn/en-us_image_0000001968060361.png
new file mode 100644
index 00000000..3da78bd7
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001968060361.png differ
diff --git a/docs/dms/umn/en-us_image_0000001968997809.png b/docs/dms/umn/en-us_image_0000001968997809.png
new file mode 100644
index 00000000..b567e467
Binary files /dev/null and b/docs/dms/umn/en-us_image_0000001968997809.png differ
diff --git a/docs/dms/umn/kafka-advantage.html b/docs/dms/umn/kafka-advantage.html
index b36d62de..b20374b0 100644
--- a/docs/dms/umn/kafka-advantage.html
+++ b/docs/dms/umn/kafka-advantage.html
@@ -5,14 +5,15 @@
- Rapid deployment
Simply set instance information on the DMS for Kafka console, submit your order, and a complete Kafka instance will be automatically created and deployed.
- Service migration without modifications
DMS for Kafka is compatible with open-source Kafka APIs and supports all message processing functions of open-source Kafka.
If your application services are developed based on open-source Kafka, you can easily migrate them to DMS after specifying a few authentication configurations.
- Kafka instances are compatible with Apache Kafka v1.1.0, v2.3.0, and v2.7. Keep the client and server versions the same.
+ Kafka instances are compatible with Apache Kafka v1.1.0, v2.3.0, v2.7, and v3.x. Keep the client and server versions the same.
- Security
Operations on Kafka instances are recorded and can be audited. Messages can be encrypted before storage.
In addition to Simple Authentication and Security Layer (SASL) authentication, Virtual Private Clouds (VPCs) and security groups also provide security controls on network access.
- Data reliability
Kafka instances support data persistence and replication. Messages can be synchronously or asynchronously replicated between replicas and flushed to disk.
- High availability
Kafka runs in clusters, enabling failover and fault tolerance so that services can run smoothly.
-Kafka instance brokers can be deployed across AZs to enhance service availability.
+Kafka instance brokers can be deployed across AZs to enhance service availability. Data is synchronized between different AZs based on Kafka's in-sync replica (ISR) mechanism. A topic must have multiple data copies and distribute them across ISRs. When ISR replication is normal, the recovery point objective (RPO) is close to 0.
- Simple O&M
The cloud service platform provides a whole set of monitoring and alarm services, eliminating the need for 24/7 attendance. Kafka instance metrics are monitored and reported, including the number of partitions, topics, and accumulated messages. You can configure alarm rules and receive SMS or email notifications on how your services are running in real time.
+ - Massive accumulation and scaling
Kafka features high scalability because it runs in a distributed system, or cluster. Users can configure up to 200 partitions for a topic. The storage space, broker quantity and flavor can be also expanded. This means that billions of messages can be accumulated, suitable for scenarios requiring high concurrency, high performance, and large-scale access.
- Flexible specifications
You can customize the bandwidth and storage space for the instance and the number of partitions and replicas for topics in the instance.
diff --git a/docs/dms/umn/kafka-bp-migration.html b/docs/dms/umn/kafka-bp-migration.html
new file mode 100644
index 00000000..30c3853d
--- /dev/null
+++ b/docs/dms/umn/kafka-bp-migration.html
@@ -0,0 +1,71 @@
+
+
+ Kafka Data Migration Overview
+ ScenarioYou can migrate Kafka services to connect message producers and consumers to a new Kafka instance and can even migrate persisted message data to the new Kafka instance. Kafka services can be migrated in the following two scenarios:
+ - Migrating services to the cloud without downtime
Services that have high requirements on continuity must be smoothly migrated to the cloud because they cannot afford a long downtime.
+ - Re-deploying services on the cloud
A Kafka instance deployed within an AZ is not capable of cross-AZ disaster recovery. For higher reliability, you can re-deploy services to an instance that is deployed across AZs.
+
+
+ Preparation- Configure the network environment.
A Kafka instance can be accessed within a VPC or over a public network. For public network access, the producer and consumer must have public access permissions, and the following security group rules must be configured.
+
+Table 1 Security group rulesDirection
+ |
+Protocol
+ |
+Port
+ |
+Source
+ |
+Description
+ |
+
+
+Inbound
+ |
+TCP
+ |
+9094
+ |
+0.0.0.0/0
+ |
+Accessing a Kafka instance in a public network (in plaintext)
+ |
+
+Inbound
+ |
+TCP
+ |
+9095
+ |
+0.0.0.0/0
+ |
+Accessing a Kafka instance in a public network (in ciphertext)
+ |
+
+
+
+
+ - Create a Kafka instance.
The specifications of the new instance cannot be lower than the original specifications. For more information, see Creating a Kafka Instance.
+ - Create a topic.
Create a topic with the same configurations as the original Kafka instance, including the topic name, number of replicas, number of partitions, message aging time, and whether to enable synchronous replication and flushing. For more information, see Creating a Kafka Topic.
+
+
+ Migration Scheme 1: Migrating the Production FirstMigrate the message production service to the new Kafka instance. After migration, the original Kafka instance will no longer produce messages. After all messages of the original Kafka instance are consumed, migrate the message consumption service to the new Kafka instance to consume messages of this instance.
+ - Change the Kafka connection address of the producer to that of the new Kafka instance.
- Restart the production service so that the producer can send new messages to the new Kafka instance.
- Check the consumption progress of each consumer group in the original Kafka instance until all data in the original Kafka instance is consumed.
- Change the Kafka connection addresses of the consumers to those of the new Kafka instance.
- Restart the consumption service so that consumers can consume messages from the new Kafka instance.
- Check whether consumers consume messages properly from the new Kafka instance.
- The migration is complete.
+ This is a common migration scheme. It is simple and easy to control on the service side. During the migration, the message sequence is ensured, so this scheme is suitable for scenarios with strict requirements on the message sequence. However, latency may occur because there is a period when you have to wait for all data to be consumed.
+
+ Migration Scheme 2: Migrating the Production LaterUse multiple consumers for the consumption service. Some consume messages from the original Kafka instance, and others consume messages from the new Kafka instances. Then, migrate the production service to the new Kafka instance so that all messages can be consumed in time.
+ - Start new consumer clients, set the Kafka connection addresses to that of the new Kafka instance, and consume data from the new Kafka instance.
Original consumer clients must continue running. Messages are consumed from both the original and new Kafka instances.
+
+ - Change the Kafka connection address of the producer to that of the new Kafka instance.
- Restart the producer client to migrate the production service to the new Kafka instance.
- After the production service is migrated, check whether the consumption service connected to the new Kafka instance is normal.
- After all data in the original Kafka is consumed, close the original consumption clients.
- The migration is complete.
+ In this scheme, the migration process is controlled by services. For a certain period of time, the consumption service consumes messages from both the original and new Kafka instances. Before the migration, message consumption from the new Kafka instance has already started, so there is no latency. However, early on in the migration, data is consumed from both the original and new Kafka instances, so the messages may not be consumed in the order that they are produced. This scheme is suitable for services that require low latency but do not require strict message sequence.
+
+ How Do I Migrate Persisted Data Along with Services?You can migrate consumed data from the original instance to a new instance by using the open-source tool MirrorMaker. This tool mirrors the original Kafka producer and consumer into new ones and migrates data to the new Kafka instance.
+ Note that each cloud Kafka instance stores data in three replicas. Therefore, the storage space of the new instance should be three times that of the original single-replica message storage.
+
+
+
+
diff --git a/docs/dms/umn/kafka-connect-other.html b/docs/dms/umn/kafka-connect-other.html
deleted file mode 100644
index 1d5e4e53..00000000
--- a/docs/dms/umn/kafka-connect-other.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
- Obtaining Kafka Clients
- Kafka instances are fully compatible with open-source clients. You can obtain clients in other programming languages and access your instance as instructed by the official Kafka website.
-
-
-
diff --git a/docs/dms/umn/kafka-dnat.html b/docs/dms/umn/kafka-dnat.html
index 5d645132..90b24e75 100644
--- a/docs/dms/umn/kafka-dnat.html
+++ b/docs/dms/umn/kafka-dnat.html
@@ -1,36 +1,40 @@
- Using DNAT to Access a Kafka Instance
+ Accessing Kafka in a Public Network Using DNAT
ScenarioYou can use destination NAT (DNAT) to access a Kafka instance so that the instance can provide services on the public network through port mapping.
- PrerequisitesYou have created EIPs. The number of EIPs is the same as the number of brokers in the Kafka instance.
+ PrerequisitesYou have created EIPs. The number of EIPs is the same as the number of brokers in the Kafka instance. For details about how to create an EIP, see Assigning an EIP.
- Step 1: Obtain Information About the Kafka Instance- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ Step 1: Obtain Information About the Kafka Instance- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the Connection area on the Basic Information tab page, view and record the private network access addresses of the Kafka instance. In the Network area, view and record the VPC and subnet where the Kafka instance is located.
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view its details.
- In the Connection area on the Basic Information tab page, view and record the private network access addresses of the Kafka instance. In the Network area, view and record the VPC and subnet where the Kafka instance is located.
Figure 1 Kafka instance information
+
Step 2: Create a Public NAT Gateway- Click Service List and choose Network > NAT Gateway.
- Click Create Public NAT Gateway.
- Set the following parameters:
- Region: Select the region that the Kafka instance is in.
- Name: Enter a name for the public NAT gateway.
- VPC: Select the VPC recorded in 5.
- Subnet: Select the subnet recorded in 5.
Set other parameters as required. For details, see Creating a NAT Gateway.
- - Click Create Now.
- Confirm the specifications and click Submit.
+ Figure 2 Create Public NAT Gateway
+ - Click Create Now.
- Confirm the specifications and click Submit.
- Step 3: Add a DNAT Rule- On Public NAT Gateways page, locate the row that contains the newly created public NAT gateway and click Add Rule in the Operation column.
- On the DNAT Rules tab page, click Add DNAT Rule.
- Set the following parameters:
- Scenario: Select VPC.
- Port Type: Select Specific port.
- Protocol: Select TCP.
- EIP: Select an EIP.
- Outside Port: Enter 9011.
- Private IP Address: Enter one of the private network addresses of the Kafka instance recorded in 5.
- Inside Port: Enter 9011.
+Step 3: Add a DNAT Rule- On Public NAT Gateways page, locate the row containing the newly created public NAT gateway and click Add Rule in the Operation column.
- On the DNAT Rules tab page, click Add DNAT Rule.
Figure 3 Public NAT gateway details
+ - Set the following parameters:
- Scenario: Select VPC.
- Port Type: Select Specific port.
- Protocol: Select TCP.
- EIP: Select an EIP.
- Outside Port: Enter 9011.
- Instance Type: Select Custom.
- Private IP Address: Enter one of the private network addresses of the Kafka instance recorded in 5.
- Inside Port: Enter 9011.
For details about more parameters, see Adding a DNAT Rule.
-Figure 1 Adding a DNAT rule
+Figure 4 Adding a DNAT rule
- Click OK.
View the DNAT rule status in the DNAT rule list. If Status is Running, the rule has been added successfully.
- - Repeat 2 to 4 to create DNAT rules for other private network addresses of the Kafka instance recorded in 5. Each private network address corresponds to a separate EIP.
- After all DNAT rules are created, click the DNAT Rules tab to view the created DNAT rules and record the EIPs corresponding to the private IP addresses.
+ - Create DNAT rules for other private network addresses of the Kafka instance recorded in 5. Configure a unique EIP for each DNAT rule.
For details about how to create a DNAT rule, see 2 to 4.
+ - After all DNAT rules are created, click the DNAT Rules tab to view the created DNAT rules and record the EIPs corresponding to the private IP addresses.
-Step 4: Bind EIPs on the Kafka Console- Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the Advanced Settings section on the Basic Information tab page, click Modify next to Cross-VPC Access.
- Change the values of advertised.listeners IP Address/Domain Name to the EIPs in the DNAT rules. Ensure that the mapping between the private network addresses and the EIPs is consistent with that recorded in 6. Then click Save.
Figure 2 Changing the advertised.listeners IP addresses
+Step 4: Bind EIPs on the Kafka Console- Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view its details.
- In the Advanced Settings section on the Basic Information tab page, click Modify next to Cross-VPC Access.
- Change the values of advertised.listeners IP Address/Domain Name to the EIPs in the DNAT rules. Ensure that the mapping between the private network addresses and the EIPs is consistent with that recorded in 6. Then click Save.
Figure 5 Changing the advertised.listeners IP address (for DNAT access)
-Step 5: Verify ConnectivityCheck whether messages can be created and retrieved by referring to Accessing a Kafka Instance Without SASL or Accessing a Kafka Instance with SASL.
+ Step 5: Verify ConnectivityCheck whether messages can be created and retrieved by referring to Connecting to Kafka Using the Client (Plaintext Access) or Connecting to Kafka Using the Client (Ciphertext Access).
Notes:
- - The address for connecting to a Kafka instance is in the format of "advertised.listeners IP:9011". For example, the addresses for connecting to the Kafka instance shown in Figure 2 are 100.xxx.xxx.20:9011,100.xxx.xxx.21:9011,100.xxx.xxx.23:9011.
- Configure security group rules for the Kafka instance to allow inbound access over port 9011.
- Public access must be enabled on the client connected to the Kafka instance.
+ - The address for connecting to a Kafka instance is in the format of "advertised.listeners IP:9011". For example, the addresses for connecting to the Kafka instance shown in Figure 5 are 100.xxx.xxx.20:9011,100.xxx.xxx.21:9011,100.xxx.xxx.23:9011.
- Configure security group rules for the Kafka instance to allow inbound access over port 9011.
- Public access must be enabled on the client connected to the Kafka instance.
diff --git a/docs/dms/umn/kafka-faq-0001.html b/docs/dms/umn/kafka-faq-0001.html
index 786df671..3f9338d2 100644
--- a/docs/dms/umn/kafka-faq-0001.html
+++ b/docs/dms/umn/kafka-faq-0001.html
@@ -1,8 +1,8 @@
How Do I Obtain the Public Access Address After Public Access Is Enabled?
-Click the name of your Kafka instance. In the Connection section on the Basic Information tab page, view Instance Address (Public Network).
- For details about how to connect to a Kafka instance, see Accessing a Kafka Instance.
+ Click the name of your Kafka instance. In the Connection section on the Basic Information tab page, view Instance Address (Public Network).
+ For details about how to connect to a Kafka instance, see Connecting to an Instance.
diff --git a/docs/dms/umn/kafka-faq-0003.html b/docs/dms/umn/kafka-faq-0003.html
index 8d4e40bc..8926c616 100644
--- a/docs/dms/umn/kafka-faq-0003.html
+++ b/docs/dms/umn/kafka-faq-0003.html
@@ -2,7 +2,7 @@
What Should I Do If Kafka Storage Space Is Used Up Because Retrieved Messages Are Not Deleted?
Messages are not deleted immediately after being retrieved. They are deleted only when the aging time expires.
- You can shorten the aging time.
+ You can shorten the aging time or expand the storage space.
diff --git a/docs/dms/umn/kafka-faq-0005.html b/docs/dms/umn/kafka-faq-0005.html
new file mode 100644
index 00000000..e128e183
--- /dev/null
+++ b/docs/dms/umn/kafka-faq-0005.html
@@ -0,0 +1,12 @@
+
+
+ How Do I Increase the Partition Quantity?
+ You can increase the partition quantity by adding brokers.
+ To do so, go to the Kafka console, locate the row that contains the desired instance, and choose More > Modify Specifications. On the page that is displayed, add brokers as required. For details, see Modifying Instance Specifications.
+
+
+
diff --git a/docs/dms/umn/kafka-faq-0008.html b/docs/dms/umn/kafka-faq-0008.html
new file mode 100644
index 00000000..c12461d1
--- /dev/null
+++ b/docs/dms/umn/kafka-faq-0008.html
@@ -0,0 +1,11 @@
+
+
+ Will a Kafka Instance Be Restarted After Its Enterprise Project Is Modified?
+ No. A Kafka instance will not be restarted if you modify its enterprise project.
+
+
+
diff --git a/docs/dms/umn/kafka-faq-0010.html b/docs/dms/umn/kafka-faq-0010.html
index 64e1a6e1..7fa9ffdb 100644
--- a/docs/dms/umn/kafka-faq-0010.html
+++ b/docs/dms/umn/kafka-faq-0010.html
@@ -1,7 +1,7 @@
Will a Kafka Instance Be Restarted After Its Automatic Topic Creation Setting Is Modified?
- Yes. A Kafka instance will be restarted if you enable or disable automatic topic creation for it.
+ Enabling or disabling automatic topic creation may cause instance restarts. For details, see the information displayed on the Kafka console.
diff --git a/docs/dms/umn/kafka-faq-0014.html b/docs/dms/umn/kafka-faq-0014.html
deleted file mode 100644
index 7164695a..00000000
--- a/docs/dms/umn/kafka-faq-0014.html
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
- How Do I Disable Automatic Topic Creation?
- - On the Kafka console, click the name of your instance.
- In the Instance Information section of the Basic Information tab page, click
next to Automatic Topic Creation to disable automatic topic creation.You can view the execution status of the task on the Background Tasks tab page.
-
-
-
-
diff --git a/docs/dms/umn/kafka-faq-0023.html b/docs/dms/umn/kafka-faq-0023.html
index 5aae68f3..8779d274 100644
--- a/docs/dms/umn/kafka-faq-0023.html
+++ b/docs/dms/umn/kafka-faq-0023.html
@@ -1,7 +1,7 @@
- Can I Change an Instance from Single-AZ Deployment to Multi-AZ Deployment?
- No. The AZ configuration cannot be changed once the instance is created. To use multiple AZs, create another instance.
+ Can I Change Single-AZ Deployment to Multi-AZ Deployment for an Instance?
+ No. The AZ cannot be changed once the instance is created. To use multiple AZs, create another instance.
diff --git a/docs/dms/umn/kafka-faq-0025.html b/docs/dms/umn/kafka-faq-0025.html
index 10fa0a54..fa5faab9 100644
--- a/docs/dms/umn/kafka-faq-0025.html
+++ b/docs/dms/umn/kafka-faq-0025.html
@@ -1,8 +1,8 @@
- Does DMS for Kafka Support Cross-AZ Disaster Recovery? Where Can I View the AZs Configured for an Existing Instance?
+ Does DMS for Kafka Support Cross-AZ Disaster Recovery? Where Can I Check Whether an Existing Instance is Across-AZs?
DMS for Kafka supports cross-AZ disaster recovery. If you select multiple AZs when creating an instance, cross-AZ disaster recovery will be available.
- You can view the AZs configured for an instance in the Network section on the Basic Information tab page of the instance. If there are multiple AZs, cross-AZ disaster recovery is available.
+ You can view the AZs in the Network section on the Basic Information page of the instance. If there are multiple AZs, cross-AZ disaster recovery is available.
diff --git a/docs/dms/umn/kafka-faq-0031.html b/docs/dms/umn/kafka-faq-0031.html
index 5fd0d22a..e401f71c 100644
--- a/docs/dms/umn/kafka-faq-0031.html
+++ b/docs/dms/umn/kafka-faq-0031.html
@@ -1,7 +1,7 @@
Can I Delete Unnecessary Topics in a Consumer Group?
- Yes, just simply unsubscribe from it on the Kafka client.
+ Just simply unsubscribe from them on the Kafka client.
diff --git a/docs/dms/umn/kafka-faq-0032.html b/docs/dms/umn/kafka-faq-0032.html
deleted file mode 100644
index 2de81778..00000000
--- a/docs/dms/umn/kafka-faq-0032.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
- Do I Need to Unsubscribe from a Topic Before Deleting a Consumer Group?
- No. You can directly delete the consumer group.
-
-
-
diff --git a/docs/dms/umn/kafka-faq-0033.html b/docs/dms/umn/kafka-faq-0033.html
index 22936073..2e4583a2 100644
--- a/docs/dms/umn/kafka-faq-0033.html
+++ b/docs/dms/umn/kafka-faq-0033.html
@@ -2,7 +2,96 @@
Is There a Limit on the Number of Client Connections to a Kafka Instance?
Yes. The maximum allowed number of client connections varies by instance specifications.
- - If the bandwidth is 100 MB/s, a maximum of 3000 client connections are allowed.
- If the bandwidth is 300 MB/s, a maximum of 10,000 client connections are allowed.
- If the bandwidth is 600 MB/s, a maximum of 20,000 client connections are allowed.
- If the bandwidth is 1200 MB/s, a maximum of 20,000 client connections are allowed.
+
+ Table 1 Number of connections of earlier Kafka instancesAssured Bandwidth
+ |
+Max. Connections
+ |
+
+
+100 MB/s
+ |
+3000
+ |
+
+300 MB/s
+ |
+10,000
+ |
+
+600 MB/s
+ |
+20,000
+ |
+
+1200 MB/s
+ |
+20,000
+ |
+
+
+
+
+
+ Table 2 Number of connections of later Kafka instancesFlavor
+ |
+Max. Client Connections per Broker
+ |
+
+
+kafka.2u4g.cluster.small
+ |
+2000
+ |
+
+kafka.2u4g.single.small
+ |
+2000
+ |
+
+kafka.2u4g.cluster
+ |
+2000
+ |
+
+kafka.2u4g.single
+ |
+2000
+ |
+
+kafka.2u4g.cluster.beta
+ |
+2000
+ |
+
+kafka.4u8g.cluster.beta
+ |
+4000
+ |
+
+kafka.4u8g.cluster
+ |
+4000
+ |
+
+kafka.8u16g.cluster
+ |
+4000
+ |
+
+kafka.12u24g.cluster
+ |
+4000
+ |
+
+kafka.16u32g.cluster
+ |
+4000
+ |
+
+
+
+
diff --git a/docs/dms/umn/kafka-faq-0034.html b/docs/dms/umn/kafka-faq-0034.html
index 34002e38..b3706602 100644
--- a/docs/dms/umn/kafka-faq-0034.html
+++ b/docs/dms/umn/kafka-faq-0034.html
@@ -1,7 +1,7 @@
How Many Connections Are Allowed from Each IP Address?
- Each Kafka broker allows a maximum of 1000 connections from each IP address by default. Excess connections will be rejected. You can change the limit by referring to Modifying Kafka Parameters.
+ Each Kafka broker allows a maximum of 1000 connections from each IP address by default. Excess connections will be rejected. You can change the limit by referring to Modifying Kafka Instance Configuration Parameters.
diff --git a/docs/dms/umn/kafka-faq-0035.html b/docs/dms/umn/kafka-faq-0035.html
new file mode 100644
index 00000000..37c7bed9
--- /dev/null
+++ b/docs/dms/umn/kafka-faq-0035.html
@@ -0,0 +1,39 @@
+
+
+ Does Specification Modification Affect Services?
+ Table 1 describes the impact of increasing specifications. It takes 5 to 10 minutes to modify specifications on one broker. The more brokers, the longer time the modification takes.
+
+ Table 1 Impact of specification modificationModified Object
+ |
+Impact
+ |
+
+
+Bandwidth or broker quantity
+ |
+- Increasing the bandwidth or adding brokers does not affect the original brokers or services.
- When you increase the bandwidth or change the broker quantity, the storage space is proportionally expanded based on the current disk space. For example, assume that the original number of brokers of an instance is 3 and the disk size of each broker is 200 GB. If the broker quantity changes to 10 and the disk size of each broker is still 200 GB, the total disk size becomes 2000 GB.
- New topics are created on new brokers, and the original topics are still on the original brokers, resulting in unbalanced partitions. You can reassign partitions to migrate the replicas of the original topic partitions to the new brokers.
+ |
+
+Storage space
+ |
+- You can expand the storage space 20 times.
- Storage space expansion does not affect services.
+ |
+
+Broker flavor
+ |
+- Single-replica topics do not support message production during this period. Services will be interrupted.
- If a topic has multiple replicas, modifying the broker flavor does not interrupt services, but may cause disorder of partition messages. Evaluate this impact and avoid peak hours.
- Broker rolling restarts will cause partition leader changes, interrupting connections for less than a minute when the network is stable. For multi-replica topics, configure the retry mechanism on the producer client. To do so:
+
+ |
+
+
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-faq-0041.html b/docs/dms/umn/kafka-faq-0041.html
index 810cffef..a09f4c59 100644
--- a/docs/dms/umn/kafka-faq-0041.html
+++ b/docs/dms/umn/kafka-faq-0041.html
@@ -2,8 +2,8 @@
How Do I View the Number of Accumulated Messages?
View the number of accumulated messages using any of the following methods:
- - On the Consumer Groups page of an instance, click the name of the consumer group whose accumulated messages are to be viewed. The consumer group details page is displayed. On the Consumer Offset tab page, view the number of messages accumulated in each topic of your target consumer group. For details, see Querying Consumer Group Details.
- On the Monitoring tab page of an instance, click the By Consumer Group tab. Select the desired consumer group for Consumer Group and All queues for Queue. The Consumer Available Messages metric reflects the number of messages accumulated in all topics of this consumer group. For details about viewing the monitoring data, see Viewing Metrics.
- On the Consumer Groups tab page of the Cloud Eye console, click the By Consumer Group tab. Select the desired consumer group for Consumer Group and All queues for Queue. The Consumer Available Messages metric reflects the number of messages accumulated in all topics of this consumer group. For details about viewing the monitoring data, see Viewing Metrics.
- On the Kafka client, run the kafka-consumer-groups.sh --bootstrap-server {Kafka connection address} --describe --group {Consumer group} command in the /{directory where the CLI is located}/kafka_{version}/bin/ directory to view the number of messages accumulated in each topic of the consumer group. LAG indicates the total number of messages accumulated in each topic.
Figure 1 Viewing the total number of messages accumulated in each topic
- If SASL authentication is enabled for the Kafka instance, the --command-config {SASL authentication configuration file consumer.properties} parameter must be added to the preceding command. For details about the configuration file consumer.properties, see the CLI access instructions provided in Accessing a Kafka Instance with SASL.
+ - On the Consumer Groups page of an instance, click the name of the consumer group whose accumulated messages are to be viewed. The consumer group details page is displayed. On the Consumer Offset tab page, view the number of messages accumulated in each topic of your target consumer group. For details, see Querying Consumer Group Details.
- On the Monitoring tab page of an instance, click the By Consumer Group tab. Select the desired consumer group for Consumer Group and All topics for Topic. The Consumer Available Messages metric reflects the number of messages accumulated in all topics of this consumer group. For details about viewing the monitoring data, see Viewing Metrics.
- On the Consumer Groups tab page of the Cloud Eye console, click the By Consumer Group tab. Select the desired consumer group for Consumer Group and All topics for Topic. The Consumer Available Messages metric reflects the number of messages accumulated in all topics of this consumer group. For details about viewing the monitoring data, see Viewing Metrics.
- In the /bin directory on the Kafka client, run command kafka-consumer-groups.sh --bootstrap-server ${connection-address} --describe --group ${consumer-group-name} to check number of accumulated messages of each topic in a consumer group. LAG indicates the total number of messages accumulated in each topic.
Figure 1 Viewing the total number of messages accumulated in each topic
+ If SASL authentication is enabled for the Kafka instance, the --command-config {SASL authentication configuration file consumer.properties} parameter must be added to the preceding command. For details about the configuration file consumer.properties, see the CLI access instructions provided in Accessing a Kafka Instance with SASL.
diff --git a/docs/dms/umn/kafka-faq-0043.html b/docs/dms/umn/kafka-faq-0043.html
index 514fc5a8..bfbd7f6b 100644
--- a/docs/dms/umn/kafka-faq-0043.html
+++ b/docs/dms/umn/kafka-faq-0043.html
@@ -1,10 +1,11 @@
Will a Consumer Group Without Active Consumers Be Automatically Deleted in 14 Days?
- Yes.
- Kafka uses the offsets.retention.minutes parameter to control how long to keep offsets for a consumer group. If offsets are not committed within this period, they will be deleted. The default value of offsets.retention.minutes is 20,160 minutes (14 days).
- If Kafka determines that there are no active consumers in a consumer group (for example, when the consumer group is empty) and there are no offsets, Kafka will delete the consumer group.
-
+ This depends on the offsets.retention.minutes and auto.create.groups.enable parameters.
+ - For instances created much earlier, auto.create.groups.enable is set to true by default. offsets.retention.minutes determines how long before a consumer group is deleted automatically, which can be changed on the console. For details, see Modifying Kafka Instance Configuration Parameters.
- For newly created instances:
- If auto.create.groups.enable is false, you need to manually delete consumer groups.
- If auto.create.groups.enable is true, a consumer group that has never committed an offset will be automatically deleted after 10 minutes.
- If auto.create.groups.enable is true, and a consumer group has committed an offset, offsets.retention.minutes determines how long before the group is deleted automatically, which can be changed on the console. For details, see Modifying Kafka Instance Configuration Parameters.
+
+ Kafka uses the offsets.retention.minutes parameter to control how long to keep offsets for a consumer group. If offsets are not committed within this period, they will be deleted. If Kafka determines that there are no active consumers in a consumer group (for example, when the consumer group is empty) and there are no offsets, Kafka will delete the consumer group.
+
diff --git a/docs/dms/umn/kafka-faq-0604001.html b/docs/dms/umn/kafka-faq-0604001.html
index ad3938f7..e8180724 100644
--- a/docs/dms/umn/kafka-faq-0604001.html
+++ b/docs/dms/umn/kafka-faq-0604001.html
@@ -1,45 +1,45 @@
Troubleshooting Kafka Connection Exceptions
- OverviewThis section describes how to troubleshoot Kafka connection problems.
+ OverviewThis section describes how to troubleshoot Kafka connection problems.
- Problem ClassificationIf the connection to a Kafka instance is abnormal, perform the following operations to troubleshoot the fault:
-
+ Problem ClassificationIf the connection to a Kafka instance is abnormal, perform the following operations to troubleshoot the problem:
+
- Checking the NetworkBefore connecting to a Kafka instance, ensure that the client and the instance are interconnected. If they cannot be connected, check the network connection.
- For example, if you have enabled SASL_SSL to access the Kafka instance, run the following command:
- curl -kv {ip}:{port}
- - If the network is normal, information similar to the following is shown:

- - If the network is abnormal or disconnected, information similar to the following is shown:

+Checking the NetworkEnsure that the client and the Kafka instance can be connected. If they cannot be connected, check the network.
+ For example, if you have enabled SASL for the Kafka instance, run the following command:
+ curl -kv {ip}:{port}
+ - If the network is normal, information similar to the following is displayed:

+ - If the network is abnormal or disconnected, information similar to the following is displayed:

-
- Checking Consumer and Producer ConfigurationsView logs to check whether the parameters printed during the initialization of the consumer and producer are the same as those set in the configuration files.
- If they are different, check the parameters in the configuration file.
+ Checking Consumer and Producer ConfigurationsView logs to check whether the parameters printed during initialization of the consumer and producer are the same as those set in the configuration files.
+ If they are different, check the parameters in the configuration files.
- Common Errors on Java Clients- Domain name verification enabled
The following error is displayed:
-
-Solution: Check the consumer.properties and producer.properties files, in which the ssl.endpoint.identification.algorithm parameter must be left empty, indicating that domain name verification is disabled.
-ssl.endpoint.identification.algorithm=
- - SSL certificate failing to be loaded
The following error is displayed:
-
-Solution:
-- Check whether the client.truststore.jks file exists in the corresponding address.
- Check the permissions on the processes and files.
- Check whether the ssl.truststore.password parameter in the consumer.properties and producer.properties files is correctly set.
ssl.truststore.password is the server certificate password, which must be set to dms@kafka and cannot be changed. ssl.truststore.password=dms@kafka
+ Checking for Common Errors on Java Clients- Error 1: Domain name verification is not disabled.
The following error information is displayed:
+
+Solution: Leave the ssl.endpoint.identification.algorithm parameter in the consumer.properties and producer.properties files empty to disable domain name verification.
+ssl.endpoint.identification.algorithm=
+ - Error 2: SSL certificates fail to be loaded.
The following error information is displayed:
+
+Solution:
+- Check whether the client.jks file exists in the corresponding address.
- Check the permissions on the processes and files.
- Check whether the ssl.truststore.password parameter in the consumer.properties and producer.properties files is correctly set.
ssl.truststore.password is the server certificate password, which must be set to dms@kafka and cannot be changed. ssl.truststore.password=dms@kafka
- - Incorrect topic name
The following error is displayed:
-
-Solution: Create another topic or enable the automatic topic creation function.
+ - Error 3: The topic name is incorrect.
The following error information is displayed:
+
+Solution: Create a new topic or enable the automatic topic creation function.
- Common Errors on the Go ClientThe Go client fails to connect to Kafka over SSL and the error "first record does not look like a TLS handshake" is returned.
- Solution: Enable the TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 cipher suite (which is disabled by default).
+ Checking for Common Errors on the Go ClientThe Go client fails to connect to Kafka over SSL and the error "first record does not look like a TLS handshake" is returned.
+ Solution: Enable the TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 cipher suite (which is disabled by default).
diff --git a/docs/dms/umn/kafka-faq-180604024.html b/docs/dms/umn/kafka-faq-180604024.html
index 7c3122e5..91eb26f8 100644
--- a/docs/dms/umn/kafka-faq-180604024.html
+++ b/docs/dms/umn/kafka-faq-180604024.html
@@ -23,7 +23,7 @@
0.0.0.0/0
|
- Accessing an instance within a VPC (with SSL encryption disabled)
+ | Accessing a Kafka instance over a private network within a VPC (in plaintext)
|
|
Inbound
@@ -34,7 +34,7 @@
|
0.0.0.0/0
|
-Accessing an instance within a VPC (with SSL encryption enabled)
+ | Accessing a Kafka instance over a private network within a VPC (in ciphertext)
|
@@ -44,19 +44,23 @@
Configure security group rules as follows.Assume that the security groups of the client and Kafka instance are sg-53d4 and Default_All, respectively. You can specify a security group or IP address as the destination in the following rule. A security group is used as an example.
To ensure that your client can access the Kafka instance, add the following rule to the security group configured for the client:
-Table 2 Security group ruleDirection
+Table 2 Security group ruleDirection
|
-Protocol & Port
+ | Action
|
-Destination
+ | Protocol & Port
+ |
+Destination
|
-Outbound
+ | Outbound
|
-All
+ | Allow
|
-Default_All
+ | All
+ |
+Default_All
|
@@ -64,19 +68,23 @@
To ensure that your client can access the Kafka instance, add the following rule to the security group configured for the instance.
-Table 3 Security group ruleDirection
+Table 3 Security group ruleDirection
|
-Protocol & Port
+ | Action
|
-Source
+ | Protocol & Port
+ |
+Source
|
-Inbound
+ | Inbound
|
-All
+ | Allow
|
-sg-53d4
+ | All
+ |
+sg-53d4
|
@@ -84,9 +92,9 @@
-Cross-VPC and DNAT-based Instance AccessConfigure security group rules according to Table 5.
+ Cross-VPC and DNAT-based Instance AccessConfigure security group rules according to Table 4.
- Table 4 Security group rulesDirection
+Table 4 Security group rulesDirection
|
Protocol
|
@@ -106,7 +114,7 @@
198.19.128.0/17
|
-Accessing a Kafka instance using VPC Endpoint (VPCEP)
+ | Accessing a Kafka instance using a VPC endpoint across VPCs (in cipher- or plaintext)
|
Inbound
@@ -117,13 +125,34 @@
|
0.0.0.0/0
|
-Accessing a Kafka instance using DNAT
+ | Accessing a Kafka instance using DNAT (in cipher- or plaintext)
+ |
+
+Inbound
+ |
+TCP
+ |
+9092
+ |
+0.0.0.0/0
+ |
+Accessing a Kafka instance using a peering connection across VPCs (in plaintext)
+ |
+
+Inbound
+ |
+TCP
+ |
+9093
+ |
+0.0.0.0/0
+ |
+Accessing a Kafka instance using a peering connection across VPCs (in ciphertext)
|
-
Public AccessConfigure security group rules according to Table 5.
@@ -147,7 +176,7 @@
0.0.0.0/0
|
- Access Kafka through the public network (without SSL encryption).
+ | Accessing a Kafka instance over a public network (in plaintext)
|
|
Inbound
@@ -158,7 +187,7 @@
|
0.0.0.0/0
|
-Access Kafka through the public network (with SSL encryption).
+ | Accessing a Kafka instance over a public network (in ciphertext)
|
diff --git a/docs/dms/umn/kafka-faq-190416001.html b/docs/dms/umn/kafka-faq-190416001.html
index 6a045168..99a73e17 100644
--- a/docs/dms/umn/kafka-faq-190416001.html
+++ b/docs/dms/umn/kafka-faq-190416001.html
@@ -10,6 +10,8 @@
Why Can't I Query Messages on the Console?
+What Can I Do If Kafka Messages Are Accumulated?
+
Why Do Messages Still Exist After the Retention Period Elapses?
Do Kafka Instances Support Delayed Message Delivery?
@@ -18,6 +20,8 @@
Why Is the Message Creation Time Displayed as Year 1970?
+How Do I Modify message.max.bytes?
+
diff --git a/docs/dms/umn/kafka-faq-191030001.html b/docs/dms/umn/kafka-faq-191030001.html
index f10a27b3..35c2c434 100644
--- a/docs/dms/umn/kafka-faq-191030001.html
+++ b/docs/dms/umn/kafka-faq-191030001.html
@@ -4,8 +4,6 @@
diff --git a/docs/dms/umn/kafka-faq-191030002.html b/docs/dms/umn/kafka-faq-191030002.html
index af20a2de..9e366e53 100644
--- a/docs/dms/umn/kafka-faq-191030002.html
+++ b/docs/dms/umn/kafka-faq-191030002.html
@@ -20,7 +20,7 @@
Are Kafka Instances in Cluster Mode?
- Can I Modify the Connection Address for Accessing a Kafka Instance?
+Can I Modify the Port for Accessing a Kafka Instance?
How Long Are Kafka SSL Certificates Valid for?
@@ -28,13 +28,19 @@
How Do I Change the SASL_SSL Setting of a Kafka Instance?
+ How Do I Modify the SASL Mechanism?
+
+ How Do I Change the Security Protocol?
+
+ Will a Kafka Instance Be Restarted After Its Enterprise Project Is Modified?
+
Are Kafka Brokers and ZooKeeper Deployed on the Same VM or on Different VMs?
Which Cipher Suites Are Supported by Kafka?
- Can I Change an Instance from Single-AZ Deployment to Multi-AZ Deployment?
+Can I Change Single-AZ Deployment to Multi-AZ Deployment for an Instance?
- Does DMS for Kafka Support Cross-AZ Disaster Recovery? Where Can I View the AZs Configured for an Existing Instance?
+Does DMS for Kafka Support Cross-AZ Disaster Recovery? Where Can I Check Whether an Existing Instance is Across-AZs?
Do Kafka Instances Support Disk Encryption?
diff --git a/docs/dms/umn/kafka-faq-191030003.html b/docs/dms/umn/kafka-faq-191030003.html
index 26c76256..acd9f518 100644
--- a/docs/dms/umn/kafka-faq-191030003.html
+++ b/docs/dms/umn/kafka-faq-191030003.html
@@ -8,7 +8,7 @@
Why Is Partition Quantity Limited?
- Can I Change the Partition Quantity?
+Can I Reduce the Partition Quantity?
Why Do I Fail to Create Topics?
@@ -22,9 +22,9 @@
What Should I Do If Kafka Storage Space Is Used Up Because Retrieved Messages Are Not Deleted?
- Will a Kafka Instance Be Restarted After Its Automatic Topic Creation Setting Is Modified?
+How Do I Increase the Partition Quantity?
- How Do I Disable Automatic Topic Creation?
+Will a Kafka Instance Be Restarted After Its Automatic Topic Creation Setting Is Modified?
Can I Delete Unnecessary Topics in a Consumer Group?
diff --git a/docs/dms/umn/kafka-faq-200423001.html b/docs/dms/umn/kafka-faq-200423001.html
index 7b7eaab5..c2bbd9ea 100644
--- a/docs/dms/umn/kafka-faq-200423001.html
+++ b/docs/dms/umn/kafka-faq-200423001.html
@@ -6,10 +6,12 @@
diff --git a/docs/dms/umn/kafka-faq-200426003.html b/docs/dms/umn/kafka-faq-200426003.html
index f70dfba4..62c2460f 100644
--- a/docs/dms/umn/kafka-faq-200426003.html
+++ b/docs/dms/umn/kafka-faq-200426003.html
@@ -1,7 +1,7 @@
Why Can't I View the Subnet and Security Group Information When Creating a DMS Instance?
- This may be because you do not have the Server Administrator and VPC Administrator permissions. For details about how to add permissions to a user group, see "User and User Group Management" > "Viewing and Modifying User Group Information" in the Identity and Access Management User Guide.
+ This may be because you do not have the Server Administrator and VPC Administrator permissions. For details about how to add permissions to a user group, see Viewing and Modifying User Group Information.
diff --git a/docs/dms/umn/kafka-faq-200426005.html b/docs/dms/umn/kafka-faq-200426005.html
index 9c0f5755..5ad0b37e 100644
--- a/docs/dms/umn/kafka-faq-200426005.html
+++ b/docs/dms/umn/kafka-faq-200426005.html
@@ -1,7 +1,7 @@
How Do I Select Storage Space for a Kafka Instance?
- The storage space is the space for storing messages (including messages in replicas), logs and metadata. When specifying storage space, specify the disk type and disk size. Different bandwidth configurations support different disk types. For details, see Table 1.For more information about disks, see "Overview" > "Disk Types and Performance" in the Elastic Volume Service User Guide.
+ The storage space is the space for storing messages (including messages in replicas), logs and metadata. To select a storage space, specify the disk type and disk size. For more information, see Disk Types and Performance.
For example, if the required disk size to store data for the retention period is 100 GB, the disk capacity must be at least: 100 GB x Number of replicas + 100 GB (reserved space). In a Kafka cluster, each node uses a 33 GB disk to store logs and ZooKeeper data. Therefore, the actual available storage space is less than the created storage space.
The number of replicas (3 by default) can be configured when you create a topic. If automatic topic creation has been enabled, each automatically created topic has three replicas by default. You can change this quantity by setting default.replication.factor on the Parameters tab page.
diff --git a/docs/dms/umn/kafka-faq-200426006.html b/docs/dms/umn/kafka-faq-200426006.html
index 1771e0ba..dac24a94 100644
--- a/docs/dms/umn/kafka-faq-200426006.html
+++ b/docs/dms/umn/kafka-faq-200426006.html
@@ -1,9 +1,8 @@
How Do I Choose Between High I/O and Ultra-high I/O?
- - High I/O: The average latency is 6 to 10 ms, and the maximum bandwidth is 120 MB/s (read + write).
- Ultra-high I/O: The average latency is 1 to 3 ms, and the maximum bandwidth is 320 MB/s (read + write).
+ - High I/O: The average latency is 6 to 10 ms, and the maximum bandwidth is 150 MB/s (read + write).
- Ultra-high I/O: The average latency is 1 to 3 ms, and the maximum bandwidth is 350 MB/s (read + write).
You are advised to select ultra-high I/O, because ultra-high I/O disks deliver much higher bandwidth than high I/O.
- Different bandwidth configurations support different disk types. For details, see Table 1.
diff --git a/docs/dms/umn/kafka-faq-200426008.html b/docs/dms/umn/kafka-faq-200426008.html
index 293b6c93..83630c55 100644
--- a/docs/dms/umn/kafka-faq-200426008.html
+++ b/docs/dms/umn/kafka-faq-200426008.html
@@ -1,7 +1,8 @@
Which Kafka Versions Are Supported?
- Kafka v1.1.0, v2.3.0, and v2.7.
+ Kafka v2.3.0, v2.7, and v3.x.
+ For details about how to create a Kafka instance, see Creating a Kafka Instance.
diff --git a/docs/dms/umn/kafka-faq-200426010.html b/docs/dms/umn/kafka-faq-200426010.html
index 3d7e323b..684c68cc 100644
--- a/docs/dms/umn/kafka-faq-200426010.html
+++ b/docs/dms/umn/kafka-faq-200426010.html
@@ -1,7 +1,7 @@
Are Kafka Instances in Cluster Mode?
- Yes. A Kafka instance is a cluster that consists of three or more brokers.
+ Kafka instances are classified into single-node and cluster types. A single-node instance has only one broker in single-node mode. A cluster instance consists of three or more brokers in cluster mode.
diff --git a/docs/dms/umn/kafka-faq-200426011.html b/docs/dms/umn/kafka-faq-200426011.html
index 6ae5e8f1..54c9a5db 100644
--- a/docs/dms/umn/kafka-faq-200426011.html
+++ b/docs/dms/umn/kafka-faq-200426011.html
@@ -1,13 +1,13 @@
- Can I Modify the Connection Address for Accessing a Kafka Instance?
+ Can I Modify the Port for Accessing a Kafka Instance?
No. You must access a Kafka instance through one of the following ports:
- Ensure that correct rules have been configured for the security group of the instance. For details, see How Do I Select and Configure a Security Group?
+ Ensure that proper rules have been configured for the security group of the instance. For details, see How Do I Select and Configure a Security Group?
diff --git a/docs/dms/umn/kafka-faq-200426012.html b/docs/dms/umn/kafka-faq-200426012.html
index 170bb172..bf8de8e4 100644
--- a/docs/dms/umn/kafka-faq-200426012.html
+++ b/docs/dms/umn/kafka-faq-200426012.html
@@ -1,7 +1,10 @@
How Long Are Kafka SSL Certificates Valid for?
- The certificates are valid for more than 15 years. You do not need to worry about certificate expiration. The certificates are used for one-way authentication when enabling SASL for Kafka instances.
+ The certificates are valid for more than 15 years. You do not need to worry about certificate expiration. The certificates are used for one-way authentication when enabling SASL_SSL for Kafka instances.
+ To check the validity of the SSL certificate, perform the following steps:
+ - Decompress the package downloaded from the Kafka instance console to obtain phy_ca.crt.
- Double-click phy_ca.crt. The Certificate dialog box is displayed.
- On the General tab page, view the certificate validity period.
Figure 1 Certificate validity period
+
diff --git a/docs/dms/umn/kafka-faq-200426013.html b/docs/dms/umn/kafka-faq-200426013.html
index 845ded18..9fd20172 100644
--- a/docs/dms/umn/kafka-faq-200426013.html
+++ b/docs/dms/umn/kafka-faq-200426013.html
@@ -1,7 +1,7 @@
How Do I Synchronize Data from One Kafka Instance to Another?
- Unfortunately, you cannot synchronize two Kafka instances in real time. To migrate services from one instance to another, create messages to both instances. After all messages in the original instance have been retrieved or aged, you can migrate services to the new instance.
+ Unfortunately, you cannot synchronize two Kafka instances in real time. To migrate services from one instance to another, create messages to both instances. After all messages in the original instance have been retrieved or aged, you can migrate services to the new instance.
diff --git a/docs/dms/umn/kafka-faq-200426014.html b/docs/dms/umn/kafka-faq-200426014.html
index fa200fa4..c16b5d27 100644
--- a/docs/dms/umn/kafka-faq-200426014.html
+++ b/docs/dms/umn/kafka-faq-200426014.html
@@ -1,7 +1,7 @@
How Do I Change the SASL_SSL Setting of a Kafka Instance?
- The SASL_SSL setting cannot be changed once the instance has been created. Be careful when configuring this setting during instance creation. If you need to change the setting, you must create another instance.
+ The SASL_SSL setting can be changed for cluster instances (see Configuring Plaintext or Ciphertext Access to Kafka Instances), but cannot be changed for single-node instances.
diff --git a/docs/dms/umn/kafka-faq-200426016.html b/docs/dms/umn/kafka-faq-200426016.html
index 12263f5d..4e94c15a 100644
--- a/docs/dms/umn/kafka-faq-200426016.html
+++ b/docs/dms/umn/kafka-faq-200426016.html
@@ -1,7 +1,243 @@
How Many Connection Addresses Does a Kafka Instance Have by Default?
- The number of connection addresses of a Kafka instance is the same as the number of brokers of the instance.
+ The number of connection addresses of a Kafka instance is the same as the number of brokers of the instance. The following table lists the number of brokers corresponding to each flavor.
+
+ Table 1 Kafka instance specifications (v1.1.0/v2.3.0/v2.7 cluster instances)Flavor
+ |
+Brokers
+ |
+Maximum TPS per Broker
+ |
+Maximum Partitions per Broker
+ |
+Recommended Consumer Groups per Broker
+ |
+Maximum Client Connections per Broker
+ |
+Storage Space
+ |
+Traffic per Broker (MB/s)
+ |
+
+
+kafka.2u4g.cluster.small
+ |
+3–30
+ |
+20,000
+ |
+100
+ |
+15
+ |
+2000
+ |
+300 GB–300,000 GB
+ |
+40
+ |
+
+kafka.2u4g.cluster
+ |
+3–30
+ |
+30,000
+ |
+250
+ |
+20
+ |
+2000
+ |
+300 GB–300,000 GB
+ |
+100
+ |
+
+kafka.4u8g.cluster
+ |
+3–30
+ |
+100,000
+ |
+500
+ |
+100
+ |
+4000
+ |
+300 GB–600,000 GB
+ |
+200
+ |
+
+kafka.8u16g.cluster
+ |
+3–50
+ |
+150,000
+ |
+1000
+ |
+150
+ |
+4000
+ |
+300 GB–1,500,000 GB
+ |
+250
+ |
+
+kafka.12u24g.cluster
+ |
+3–50
+ |
+200,000
+ |
+1500
+ |
+200
+ |
+4000
+ |
+300 GB–1,500,000 GB
+ |
+375
+ |
+
+kafka.16u32g.cluster
+ |
+3–50
+ |
+250,000
+ |
+2000
+ |
+200
+ |
+4000
+ |
+300 GB–1,500,000 GB
+ |
+500
+ |
+
+
+
+
+
+ Table 2 Kafka instance specifications (single-node)Flavor
+ |
+Brokers
+ |
+TPS per Broker
+ |
+Maximum Partitions per Broker
+ |
+Recommended Consumer Groups per Broker
+ |
+Maximum Client Connections per Broker
+ |
+Storage Space
+ |
+Traffic per Broker (MB/s)
+ |
+
+
+kafka.2u4g.single.small
+ |
+1
+ |
+20,000
+ |
+100
+ |
+15
+ |
+2000
+ |
+100 GB–10,000 GB
+ |
+40
+ |
+
+kafka.2u4g.single
+ |
+1
+ |
+30,000
+ |
+250
+ |
+20
+ |
+2000
+ |
+100 GB–10,000 GB
+ |
+100
+ |
+
+
+
+
+
+ Table 3 Kafka instance specifications (v3.x cluster instances)Flavor
+ |
+Brokers
+ |
+TPS per Broker
+ |
+Maximum Partitions per Broker
+ |
+Recommended Consumer Groups per Broker
+ |
+Maximum Client Connections per Broker
+ |
+Storage Space
+ |
+Traffic per Broker (MB/s)
+ |
+
+
+kafka.2u4g.cluster.beta
+ |
+3
+ |
+30,000
+ |
+250
+ |
+20
+ |
+2000
+ |
+300 GB
+ |
+100
+ |
+
+kafka.4u8g.cluster.beta
+ |
+3
+ |
+100,000
+ |
+500
+ |
+100
+ |
+4000
+ |
+300 GB
+ |
+200
+ |
+
+
+
+
diff --git a/docs/dms/umn/kafka-faq-200426019.html b/docs/dms/umn/kafka-faq-200426019.html
index ceee8124..e3538c9a 100644
--- a/docs/dms/umn/kafka-faq-200426019.html
+++ b/docs/dms/umn/kafka-faq-200426019.html
@@ -2,7 +2,7 @@
Do Kafka Instances Support Cross-VPC Access?
Yes. You can use one of the following methods to access a Kafka instance across VPCs:
- - Establish a VPC peering connection to allow two VPCs to communicate with each other. For details, see "Operation Guide" > "VPC Peering Connection" in Virtual Private Cloud User Guide.
- Use VPC Endpoint (VPCEP) to establish a cross-VPC connection. For details, see Cross-VPC Access to a Kafka Instance.
+
diff --git a/docs/dms/umn/kafka-faq-200426020.html b/docs/dms/umn/kafka-faq-200426020.html
index b39d380c..1c2bc7fc 100644
--- a/docs/dms/umn/kafka-faq-200426020.html
+++ b/docs/dms/umn/kafka-faq-200426020.html
@@ -2,7 +2,7 @@
Do Kafka Instances Support Cross-Subnet Access?
Yes.
- If the client and the instance are in the same VPC, cross-subnet access is supported.
+ If the client and the instance are in the same VPC, cross-subnet access is supported. By default, subnets in the same VPC can communicate with each other.
diff --git a/docs/dms/umn/kafka-faq-200426023.html b/docs/dms/umn/kafka-faq-200426023.html
index cb959108..69b0452b 100644
--- a/docs/dms/umn/kafka-faq-200426023.html
+++ b/docs/dms/umn/kafka-faq-200426023.html
@@ -2,7 +2,7 @@
Does DMS for Kafka Support Authentication with Kerberos?
No, Kerberos authentication is not supported. Kafka supports client authentication with SASL and API calling authentication using tokens and AK/SK.
- To access an instance in SASL mode, you need the certificates provided by DMS. For details, see Accessing a Kafka Instance with SASL.
+ To access an instance in SASL mode, you need the certificates provided by DMS. For details, see Connecting to Kafka Using the Client (Ciphertext Access).
diff --git a/docs/dms/umn/kafka-faq-200426024.html b/docs/dms/umn/kafka-faq-200426024.html
index 2a67e898..a83cfa05 100644
--- a/docs/dms/umn/kafka-faq-200426024.html
+++ b/docs/dms/umn/kafka-faq-200426024.html
@@ -4,78 +4,236 @@
The number of topics is related to the total number of topic partitions and the number of partitions in each topic. There is an upper limit on the aggregate number of partitions of topics. When this limit is reached, no more topics can be created.
The partition limit varies depending on the flavor, as shown in the following table.
- Table 1 TPS and the maximum number of partitions supported by different instance specifications and I/O typesBandwidth
+Table 1 Kafka instance specifications (v1.1.0/v2.3.0/v2.7 cluster instances)Flavor
|
-I/O Type
+ | Brokers
|
-TPS (High-Throughput)
+ | Maximum TPS per Broker
|
-TPS (Synchronous Replication)
+ | Maximum Partitions per Broker
|
-Maximum Partitions
+ | Recommended Consumer Groups per Broker
+ |
+Maximum Client Connections per Broker
+ |
+Storage Space
+ |
+Traffic per Broker (MB/s)
|
-100 MB/s
+ | kafka.2u4g.cluster.small
|
-High I/O
+ | 3–30
|
-100,000
+ | 20,000
|
-60,000
+ | 100
|
-300
+ | 15
+ |
+2000
+ |
+300 GB–300,000 GB
+ |
+40
|
-Ultra-high I/O
+ | kafka.2u4g.cluster
|
-100,000
+ | 3–30
|
-80,000
+ | 30,000
|
-300
+ | 250
+ |
+20
+ |
+2000
+ |
+300 GB–300,000 GB
+ |
+100
|
-300 MB/s
+ | kafka.4u8g.cluster
|
-High I/O
+ | 3–30
|
-300,000
+ | 100,000
|
-150,000
+ | 500
|
-900
+ | 100
+ |
+4000
+ |
+300 GB–600,000 GB
+ |
+200
|
-Ultra-high I/O
+ | kafka.8u16g.cluster
|
-300,000
+ | 3–50
|
-200,000
+ | 150,000
|
-900
+ | 1000
+ |
+150
+ |
+4000
+ |
+300 GB–1,500,000 GB
+ |
+250
|
-600 MB/s
+ | kafka.12u24g.cluster
|
-Ultra-high I/O
+ | 3–50
|
-600,000
+ | 200,000
|
-300,000
+ | 1500
|
-1800
+ | 200
+ |
+4000
+ |
+300 GB–1,500,000 GB
+ |
+375
|
-1200 MB/s
+ | kafka.16u32g.cluster
|
-Ultra-high I/O
+ | 3–50
|
-1,200,000
+ | 250,000
|
-400,000
+ | 2000
|
-1800
+ | 200
+ |
+4000
+ |
+300 GB–1,500,000 GB
+ |
+500
+ |
+
+
+
+
+
+Table 2 Kafka instance specifications (single-node)Flavor
+ |
+Brokers
+ |
+TPS per Broker
+ |
+Maximum Partitions per Broker
+ |
+Recommended Consumer Groups per Broker
+ |
+Maximum Client Connections per Broker
+ |
+Storage Space
+ |
+Traffic per Broker (MB/s)
+ |
+
+
+kafka.2u4g.single.small
+ |
+1
+ |
+20,000
+ |
+100
+ |
+15
+ |
+2000
+ |
+100 GB–10,000 GB
+ |
+40
+ |
+
+kafka.2u4g.single
+ |
+1
+ |
+30,000
+ |
+250
+ |
+20
+ |
+2000
+ |
+100 GB–10,000 GB
+ |
+100
+ |
+
+
+
+
+
+Table 3 Kafka instance specifications (v3.x cluster instances)Flavor
+ |
+Brokers
+ |
+TPS per Broker
+ |
+Maximum Partitions per Broker
+ |
+Recommended Consumer Groups per Broker
+ |
+Maximum Client Connections per Broker
+ |
+Storage Space
+ |
+Traffic per Broker (MB/s)
+ |
+
+
+kafka.2u4g.cluster.beta
+ |
+3
+ |
+30,000
+ |
+250
+ |
+20
+ |
+2000
+ |
+300 GB
+ |
+100
+ |
+
+kafka.4u8g.cluster.beta
+ |
+3
+ |
+100,000
+ |
+500
+ |
+100
+ |
+4000
+ |
+300 GB
+ |
+200
|
diff --git a/docs/dms/umn/kafka-faq-200426025.html b/docs/dms/umn/kafka-faq-200426025.html
index fd6c9d20..5f77089e 100644
--- a/docs/dms/umn/kafka-faq-200426025.html
+++ b/docs/dms/umn/kafka-faq-200426025.html
@@ -4,78 +4,236 @@
Kafka manages messages by partition. If there are too many partitions, message creation, storage, and retrieval will be fragmented, affecting the performance and stability. If the total number of partitions of topics reaches the upper limit, you cannot create more topics.
The partition limit varies depending on the flavor, as shown in the following table.
- Table 1 TPS and the maximum number of partitions supported by different instance specifications and I/O typesBandwidth
+Table 1 Kafka instance specifications (v1.1.0/v2.3.0/v2.7 cluster instances)Flavor
|
-I/O Type
+ | Brokers
|
-TPS (High-Throughput)
+ | Maximum TPS per Broker
|
-TPS (Synchronous Replication)
+ | Maximum Partitions per Broker
|
-Maximum Partitions
+ | Recommended Consumer Groups per Broker
+ |
+Maximum Client Connections per Broker
+ |
+Storage Space
+ |
+Traffic per Broker (MB/s)
|
-100 MB/s
+ | kafka.2u4g.cluster.small
|
-High I/O
+ | 3–30
|
-100,000
+ | 20,000
|
-60,000
+ | 100
|
-300
+ | 15
+ |
+2000
+ |
+300 GB–300,000 GB
+ |
+40
|
-Ultra-high I/O
+ | kafka.2u4g.cluster
|
-100,000
+ | 3–30
|
-80,000
+ | 30,000
|
-300
+ | 250
+ |
+20
+ |
+2000
+ |
+300 GB–300,000 GB
+ |
+100
|
-300 MB/s
+ | kafka.4u8g.cluster
|
-High I/O
+ | 3–30
|
-300,000
+ | 100,000
|
-150,000
+ | 500
|
-900
+ | 100
+ |
+4000
+ |
+300 GB–600,000 GB
+ |
+200
|
-Ultra-high I/O
+ | kafka.8u16g.cluster
|
-300,000
+ | 3–50
|
-200,000
+ | 150,000
|
-900
+ | 1000
+ |
+150
+ |
+4000
+ |
+300 GB–1,500,000 GB
+ |
+250
|
-600 MB/s
+ | kafka.12u24g.cluster
|
-Ultra-high I/O
+ | 3–50
|
-600,000
+ | 200,000
|
-300,000
+ | 1500
|
-1800
+ | 200
+ |
+4000
+ |
+300 GB–1,500,000 GB
+ |
+375
|
-1200 MB/s
+ | kafka.16u32g.cluster
|
-Ultra-high I/O
+ | 3–50
|
-1,200,000
+ | 250,000
|
-400,000
+ | 2000
|
-1800
+ | 200
+ |
+4000
+ |
+300 GB–1,500,000 GB
+ |
+500
+ |
+
+
+
+
+
+Table 2 Kafka instance specifications (single-node)Flavor
+ |
+Brokers
+ |
+TPS per Broker
+ |
+Maximum Partitions per Broker
+ |
+Recommended Consumer Groups per Broker
+ |
+Maximum Client Connections per Broker
+ |
+Storage Space
+ |
+Traffic per Broker (MB/s)
+ |
+
+
+kafka.2u4g.single.small
+ |
+1
+ |
+20,000
+ |
+100
+ |
+15
+ |
+2000
+ |
+100 GB–10,000 GB
+ |
+40
+ |
+
+kafka.2u4g.single
+ |
+1
+ |
+30,000
+ |
+250
+ |
+20
+ |
+2000
+ |
+100 GB–10,000 GB
+ |
+100
+ |
+
+
+
+
+
+Table 3 Kafka instance specifications (v3.x cluster instances)Flavor
+ |
+Brokers
+ |
+TPS per Broker
+ |
+Maximum Partitions per Broker
+ |
+Recommended Consumer Groups per Broker
+ |
+Maximum Client Connections per Broker
+ |
+Storage Space
+ |
+Traffic per Broker (MB/s)
+ |
+
+
+kafka.2u4g.cluster.beta
+ |
+3
+ |
+30,000
+ |
+250
+ |
+20
+ |
+2000
+ |
+300 GB
+ |
+100
+ |
+
+kafka.4u8g.cluster.beta
+ |
+3
+ |
+100,000
+ |
+500
+ |
+100
+ |
+4000
+ |
+300 GB
+ |
+200
|
diff --git a/docs/dms/umn/kafka-faq-200426026.html b/docs/dms/umn/kafka-faq-200426026.html
index a37df62e..e90c9804 100644
--- a/docs/dms/umn/kafka-faq-200426026.html
+++ b/docs/dms/umn/kafka-faq-200426026.html
@@ -1,8 +1,8 @@
Why Do I Fail to Create Topics?
-Possible cause: The aggregate number of partitions of created topics has reached the upper limit. The maximum number of partitions varies with instance specifications. For details, see Specifications.
- Solution: Delete unnecessary topics.
+ Possible cause: The aggregate number of partitions of created topics has reached the upper limit. The upper limit on partitions varies by instance specifications. For details, see Specifications.
+ Solution: Scale up the instance or delete unnecessary topics.
diff --git a/docs/dms/umn/kafka-faq-200426027.html b/docs/dms/umn/kafka-faq-200426027.html
index f525b301..10206e72 100644
--- a/docs/dms/umn/kafka-faq-200426027.html
+++ b/docs/dms/umn/kafka-faq-200426027.html
@@ -3,7 +3,7 @@
Do Kafka Instances Support Batch Importing Topics or Automatic Topic Creation?
Automatic topic creation is supported, but batch topic import is not supported. You can only export topics in batches.
Enable automatic topic creation using one of the following methods:
-
+
diff --git a/docs/dms/umn/kafka-faq-200426028.html b/docs/dms/umn/kafka-faq-200426028.html
index e9e5d2b9..aab07dfa 100644
--- a/docs/dms/umn/kafka-faq-200426028.html
+++ b/docs/dms/umn/kafka-faq-200426028.html
@@ -1,8 +1,8 @@
Why Do Deleted Topics Still Exist?
- This may be because automatic topic creation has been enabled and a consumer is connecting to the topic. If no existing topics are available for message creation, new topics will be automatically created.
- To solve this problem, disable automatic topic creation.
+ Possible cause: Automatic topic creation has been enabled and a consumer is connecting to the topic. If no existing topics are available for message creation, new topics will be automatically created.
+ Solution: Disable automatic topic creation.
diff --git a/docs/dms/umn/kafka-faq-200426030.html b/docs/dms/umn/kafka-faq-200426030.html
index 58b6de8e..d95f1424 100644
--- a/docs/dms/umn/kafka-faq-200426030.html
+++ b/docs/dms/umn/kafka-faq-200426030.html
@@ -2,8 +2,7 @@
Can I View the Disk Space Used by a Topic?
Yes. Use either of the following methods to check the disk space used by a topic:
- - Click
next to the Kafka instance name to go to the Cloud Eye console. On the Queues tab page, set Queue to the name of the topic whose disk space you want to view and Scope to Basic monitoring. The Message Size metric reflects the message size of the selected topic. - Click the desired Kafka instance to view its details. In the navigation pane, choose Monitoring. On the By Topic tab page, set Topic to the name of the topic whose disk space you want to view and Monitoring Type to Basic monitoring. The Message Size metric reflects the message size of the selected topic.
-
+ - In the row containing the desired Kafka instance, click View Metric to go to the Cloud Eye console. On the By Topic tab page, set Topic to the name of the desired topic, and Scope to Basic monitoring. The Message Size metric reflects the message size of the selected topic.
- Click the desired Kafka instance to view its details. In the navigation pane, choose Monitoring. On the By Topic tab page, set Topic to the name of the topic whose disk space you want to view and Monitoring Type to Basic monitoring. The Message Size metric reflects the message size of the selected topic.
diff --git a/docs/dms/umn/kafka-faq-200426032.html b/docs/dms/umn/kafka-faq-200426032.html
index f0ea93a0..a3da359c 100644
--- a/docs/dms/umn/kafka-faq-200426032.html
+++ b/docs/dms/umn/kafka-faq-200426032.html
@@ -2,7 +2,7 @@
Can I Add ACL Permissions for Topics?
If you have enabled SASL_SSL for your Kafka instance, you can configure ACL permissions for your topics. On the Topics tab page of the Kafka console, click Grant User Permission in the row that contains the topic for which you want to configure user permissions.
- For details, see Granting Permissions to a SASL_SSL User.
+ For details, see Configuring Kafka Topic Permissions.
diff --git a/docs/dms/umn/kafka-faq-200426033.html b/docs/dms/umn/kafka-faq-200426033.html
index 70825fca..8fd5cb08 100644
--- a/docs/dms/umn/kafka-faq-200426033.html
+++ b/docs/dms/umn/kafka-faq-200426033.html
@@ -1,8 +1,9 @@
Do I Need to Create Consumer Groups, Producers, and Consumers for Kafka Instances?
- No. They are generated automatically when you use the instance.
- For details about creating and retrieving messages after connecting to a Kafka instance, see Accessing a Kafka Instance.
+ - When parameter auto.create.groups.enable is set to true, you do not need to create a consumer group, producer, or consumer because they are generated automatically when you use the instance.
- When parameter auto.create.groups.enable is set to false, you need to create a consumer group, but do not need to create a producer or consumer.
+ To change the auto.create.groups.enable setting, see Modifying Kafka Instance Configuration Parameters.
+ For details about producing and consuming messages after connecting to a Kafka instance, see Connecting to Kafka Using the Client (Plaintext Access).
diff --git a/docs/dms/umn/kafka-faq-200426036.html b/docs/dms/umn/kafka-faq-200426036.html
index a1148d3f..a0bc91e4 100644
--- a/docs/dms/umn/kafka-faq-200426036.html
+++ b/docs/dms/umn/kafka-faq-200426036.html
@@ -8,10 +8,11 @@
The interval between retrievals is too long.The maximum number of messages that a consumer can retrieve at a time is specified by max.poll.records. In most cases, a client processes the retrieved data before starting the next retrieval. The processing may be prolonged when a large number of messages are retrieved at a time and cannot be processed within the time specified by max.poll.interval.ms, or when an exception occurs during the process (for example, data needs to be written to the backend database, but the backend database pressure is too high, resulting in high latency). If the consumer does not send the next retrieval request within the time specified by max.poll.interval.ms, the broker considers that the consumer is inactive and removes it from the consumer group, triggering rebalancing.
Solutions and Troubleshooting MethodsScenario 1: Heartbeat requests are not sent in time.
- Solution: Set the value of session.timeout.ms to three times the value of heartbeat.interval.ms.
+ Solution: On the consumer client, set the value of session.timeout.ms to three times the value of heartbeat.interval.ms.
Scenario 2: The interval between retrievals is too long.
Troubleshooting methods:
- Check the time required for processing a single message and whether the time required for processing a specified number (max.poll.records) of messages exceeds the time specified by max.poll.interval.ms.
- Check whether message processing requires network connections, such as writing data to the database and calling backend APIs, and whether the backend is normal in rebalancing scenarios.
+ Solution: On the consumer client, decrease the value of max.poll.records.
diff --git a/docs/dms/umn/kafka-faq-200426037.html b/docs/dms/umn/kafka-faq-200426037.html
index 723c7033..c4b3b4eb 100644
--- a/docs/dms/umn/kafka-faq-200426037.html
+++ b/docs/dms/umn/kafka-faq-200426037.html
@@ -5,7 +5,7 @@
Possible cause 2: The createTime timestamp of the message is incorrect.On the console, messages are queried based on the timestamp, which is generated by the client. Different clients have different processing policies. The default value may be 0 or -1. As a result, message may fail to be queried.
Solution: Check whether the value of createTime is correctly configured.
Possible cause 3: The disk usage exceeds 95%, and Capacity Threshold Policy is set to Automatically delete.If Capacity Threshold Policy is set to Automatically delete, the earliest 10% of messages will be deleted when 95% of the disk capacity is used, to ensure sufficient disk space. In this case, the messages that do not reach the aging time are also deleted and cannot be queried.
-Solution: Modify the capacity threshold policy. If Capacity Threshold Policy is set to Stop production, new messages will no longer be created when the disk usage reaches the disk capacity threshold (95%), but existing messages can still be retrieved until the aging time arrives. This policy is suitable for scenarios where no data losses can be tolerated.
+Solution: Modify the capacity threshold policy or expand the disk capacity. If Capacity Threshold Policy is set to Stop production, new messages will no longer be created when the disk usage reaches the disk capacity threshold (95%), but existing messages can still be retrieved until the aging time arrives. This policy is suitable for scenarios where no data losses can be tolerated.
diff --git a/docs/dms/umn/kafka-faq-200426041.html b/docs/dms/umn/kafka-faq-200426041.html
index 5c8d9a14..9b813e52 100644
--- a/docs/dms/umn/kafka-faq-200426041.html
+++ b/docs/dms/umn/kafka-faq-200426041.html
@@ -1,9 +1,14 @@
Why Can't I View the Monitoring Data?
- The possible causes are as follows:
- - The topic name starts with a special character, such as an underscore (_) or a number sign (#).
- The consumer group name starts with a special character, such as an underscore (_) or a number sign (#).
- To solve the problem, delete topics and consumer groups whose names contain the special character.
+ If topic monitoring data is not displayed, the possible causes are as follows:
+ - The topic name starts with a special character, such as an underscore (_) or a number sign (#).
- No topic is created in the Kafka instance.
+ Solution:
+ - Delete topics whose names contain special characters.
- Create a topic.
+ If consumer group monitoring data is not displayed, the possible causes are as follows:
+ - The consumer group name starts with a special character, such as an underscore (_) or a number sign (#).
- No consumers in the group have connected to the instance.
+ Solution:
+ - Delete consumer groups whose names contain special characters.
- Consume messages using this consumer group.
diff --git a/docs/dms/umn/kafka-faq-200426100.html b/docs/dms/umn/kafka-faq-200426100.html
new file mode 100644
index 00000000..4053b384
--- /dev/null
+++ b/docs/dms/umn/kafka-faq-200426100.html
@@ -0,0 +1,13 @@
+
+
+ What Can I Do If Kafka Messages Are Accumulated?
+ Symptom: An alarm is generated for the Accumulated Messages metric.
+ Solution:
+ - Log in to the Kafka console and click the instance for which the alarm is generated. The instance details page is displayed.
- In the navigation pane, choose Monitoring.
- On the By Consumer Group tab page, view Consumer Available Messages to find the consumer group with accumulated messages.
- In the navigation pane, choose Consumer Groups.
- Check whether there are consumers in the consumer group where messages are accumulated. If yes, contact the service party to accelerate their consumption. If no, contact the customer to delete unused consumer groups.
+
+
+
diff --git a/docs/dms/umn/kafka-faq-200426101.html b/docs/dms/umn/kafka-faq-200426101.html
index fae68a3a..41cd93c8 100644
--- a/docs/dms/umn/kafka-faq-200426101.html
+++ b/docs/dms/umn/kafka-faq-200426101.html
@@ -1,7 +1,7 @@
- Can I Change the Partition Quantity?
- No. If you want to use fewer partitions, delete the corresponding topic, create another one, and specify the desired number of partitions.
+ Can I Reduce the Partition Quantity?
+ No. If you want to use fewer partitions, delete the corresponding topic, create another one, and specify the desired number of partitions.
diff --git a/docs/dms/umn/kafka-faq-200708001.html b/docs/dms/umn/kafka-faq-200708001.html
index fd7f467e..ae00038e 100644
--- a/docs/dms/umn/kafka-faq-200708001.html
+++ b/docs/dms/umn/kafka-faq-200708001.html
@@ -2,7 +2,7 @@
Why Do Messages Still Exist After the Retention Period Elapses?
If the aging time has been set for a topic, the value of the log.retention.hours parameter does not take effect for the topic. The value of the log.retention.hours parameter takes effect only if the aging time has not been set for the topic.
- Possible cause 1: The segment files are in use.
+ Possible cause 1: Each partition of a topic consists of multiple segment files of the same size (500 MB). When the size of messages stored in a segment file reaches 500 MB, another segment file is created. Kafka deletes segment files instead of messages. Kafka requires that at least one segment file be reserved for storing messages. If the segment file in use contains aged messages, the segment file will not be deleted. Therefore, the aged messages will remain.
Solution: Wait until the segment is no longer in use or delete the topic where messages have reached their retention period.
Possible cause 2: In a topic, there is a message whose CreateTime is a future time. For example, assume that it is January 1, and the CreateTime is February 1. The message will not be aged after 72 hours from now. As a result, messages created subsequently will also not be aged.
Solution: Delete the topic where the CreateTime of a message is a future time.
diff --git a/docs/dms/umn/kafka-faq-200708002.html b/docs/dms/umn/kafka-faq-200708002.html
index efc87548..ffedf80a 100644
--- a/docs/dms/umn/kafka-faq-200708002.html
+++ b/docs/dms/umn/kafka-faq-200708002.html
@@ -1,7 +1,7 @@
Does DMS for Kafka Support Password-Free Access?
- Yes. No password is required for accessing a Kafka instance with SASL disabled. For details, see Accessing a Kafka Instance Without SASL.
+ Yes. No password is required when accessing a Kafka instance in plaintext. For details, see Connecting to Kafka Using the Client (Plaintext Access).
diff --git a/docs/dms/umn/kafka-pd-0052.html b/docs/dms/umn/kafka-pd-0052.html
new file mode 100644
index 00000000..c895a08e
--- /dev/null
+++ b/docs/dms/umn/kafka-pd-0052.html
@@ -0,0 +1,124 @@
+
+
+ Comparing Single-node and Cluster Kafka Instances
+ A single-node Kafka instance has only one broker. These instances do not guarantee performance or reliability and are for trial use or testing only. In the production environment, use cluster instances.
+ Parameters for CreationParameter settings that are unique to single-node instances are listed in Table 1.
+
+ Table 1 Parameters of a single-node instanceParameter
+ |
+Description
+ |
+
+
+AZ
+ |
+Only one AZ
+ |
+
+Version
+ |
+Only v2.7
+ |
+
+Broker Flavor
+ |
+kafka.2u4g.single.small and kafka.2u4g.single
+ |
+
+Brokers
+ |
+Only one
+ |
+
+Storage space per broker
+ |
+Disk types: high I/O and ultra-high I/O; Disk size: 100–10,000 GB
+ |
+
+Ciphertext Access
+ |
+Not supported
+ |
+
+
+
+
+
+ Comparing Instance FunctionsTable 2 compares the functions of single-node and cluster instances.
+
+ Table 2 Comparing functionsFunction
+ |
+Single-node Instance
+ |
+Cluster Instance
+ |
+
+
+Modifying instance specifications
+ |
+×
+ |
+√
+ |
+
+Changing the instance access mode
+ |
+Only enabling/disabling public plaintext access
+ |
+Options:
+- Enabling private ciphertext access
- Enabling/Disabling private plaintext access
- Enabling/Disabling public plaintext access
- Enabling/Disabling public ciphertext access
+ |
+
+Resetting Kafka password
+ |
+×
+ |
+√
+ |
+
+Viewing disk usage
+ |
+×
+ |
+√
+ |
+
+Reassigning partitions
+ |
+×
+ |
+√
+ |
+
+Configuring topic permissions
+ |
+×
+ |
+√
+ |
+
+Managing users
+ |
+×
+ |
+√
+ |
+
+Modifying configuration parameters
+ |
+×
+ |
+√
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-pd-190605001.html b/docs/dms/umn/kafka-pd-190605001.html
index aaef23ce..c856a0e9 100644
--- a/docs/dms/umn/kafka-pd-190605001.html
+++ b/docs/dms/umn/kafka-pd-190605001.html
@@ -4,7 +4,7 @@
Apache Kafka is distributed message middleware that features high throughput, data persistence, horizontal scalability, and stream data processing. It adopts the publish-subscribe pattern and is widely used for log collection, data streaming, online/offline system analytics, and real-time monitoring.
DMS is a message queuing service based on Apache Kafka. It provides Kafka instances with isolated compute, storage, and bandwidth resources. DMS allows you to apply and configure resources based on service requirements. It can be used out of the box and frees you from deployment and O&M so that you can focus on the agile development of your applications.
- Readers' GuideThis documentation introduces DMS for Kafka and its differences from Apache Kafka. You will learn about the detailed information about the specifications, console operations, and client access to instances of DMS for Kafka.
+ Readers' GuideThis documentation introduces DMS for Kafka and its differences from Apache Kafka. You will learn about the detailed information about the specifications, console operations, and client access to instances of DMS for Kafka.
For more information about the basic knowledge of Kafka or technical details about creating and retrieving messages, please go to the official Apache Kafka website.
diff --git a/docs/dms/umn/kafka-pd-190605002.html b/docs/dms/umn/kafka-pd-190605002.html
index 39db25f6..d31c925b 100644
--- a/docs/dms/umn/kafka-pd-190605002.html
+++ b/docs/dms/umn/kafka-pd-190605002.html
@@ -4,10 +4,11 @@
- Cloud Trace Service (CTS)
CTS generates traces to provide you with a history of operations performed on cloud service resources. The traces include operation requests sent using the management console or open APIs, as well as the operation results. You can view all generated traces to query, audit, and backtrack performed operations.
For details about the operations recorded by CTS, see Operations Logged by CTS.
- Virtual Private Cloud (VPC)
Kafka instances run in VPCs and use the IP addresses and bandwidth of VPC. Security groups of VPCs enhance the security of network access to the Kafka instances.
- - ECS
An ECS is a basic computing unit that consists of vCPUs, memory, OS, and EVS disks. Kafka instances run on ECSs. A broker corresponds to an ECS.
+ - Elastic Cloud Server (ECS)
An ECS is a basic computing unit that consists of vCPUs, memory, OS, and EVS disks. Kafka instances run on ECSs. A broker corresponds to an ECS.
- Elastic Volume Service (EVS)
EVS provides block storage services for ECSs. All Kafka data, such as messages, metadata, and logs, is stored in EVS disks.
- - Cloud Eye
Cloud Eye is an open platform that provides monitoring, alarm reporting, and alarm notification for your resources in real time.
-For details about DMS metrics monitored by Cloud Eye, see Kafka Metrics.
+ - Identity and Access Management (IAM)
IAM enables you to easily manage users and control their access to cloud services and resources. Grant different users different Kafka permissions required to perform a given task based on their job responsibilities.
+ - Cloud Eye (CES)
Cloud Eye is an open platform that provides monitoring, alarm reporting, and alarm notification for your resources in real time.
+For details about DMS metrics monitored by Cloud Eye, see Kafka Metrics.
The values of all Kafka instance metrics are reported to Cloud Eye every minute.
- Elastic IP (EIP)
The EIP service provides independent public IP addresses and bandwidth for Internet access. Kafka instances bound with EIPs can be accessed over public networks.
diff --git a/docs/dms/umn/kafka-pd-190605003.html b/docs/dms/umn/kafka-pd-190605003.html
index 3f0ad2ab..d8326a59 100644
--- a/docs/dms/umn/kafka-pd-190605003.html
+++ b/docs/dms/umn/kafka-pd-190605003.html
@@ -16,7 +16,7 @@
Version
|
-- The service version can be 1.1.0, 2.3.0, or 2.7. Kafka instances cannot be upgraded once they are created.
- Clients later than version 0.10 are supported. Use a version that is consistent with the service version.
+ | - The service version can be 2.3.0, 2.7, or 3.x. Kafka instances cannot be upgraded once they are created.
- Clients later than version 0.10 are supported. Use a version that is consistent with the service version.
|
Logging in to the VM where the Kafka brokers reside
@@ -24,9 +24,24 @@
| Not supported
|
-VPC and subnet
+ | Storage
|
-After an instance is created, its VPC and subnet cannot be modified.
+ | - The storage space can be expanded but cannot be reduced.
- You can expand the storage space up to 20 times.
+ |
+
+Broker quantity
+ |
+The broker quantity can be increased but cannot be decreased.
+ |
+
+Broker flavor
+ |
+- The broker flavor can be increased but cannot be decreased.
- Single-replica topics do not support message production during this period. Services will be interrupted.
- If a topic has multiple replicas, scaling up the broker flavor does not interrupt services, but may cause disorder of partition messages. Evaluate this impact and avoid peak hours.
- Broker rolling restarts will cause partition leader changes, interrupting connections for less than a minute when the network is stable. For multi-replica topics, configure the retry mechanism on the producer client.
+ |
+
+VPC, subnet, and AZ
+ |
+After an instance is created, its VPC, subnet, and AZ cannot be modified.
|
Kerberos authentication
@@ -34,6 +49,11 @@
| Not supported
|
+Client connections from each IP address
+ |
+Each Kafka broker allows a maximum of 1000 connections from each IP address by default. Excess connections will be rejected.
+ |
+
@@ -53,7 +73,7 @@
|
Number of partitions in a topic
|
-The number of partitions in a topic can be increased but cannot be decreased.
+ | Based on the open-source Kafka constraints, the number of partitions in a topic can be increased but cannot be decreased.
|
Topic quantity
@@ -63,13 +83,11 @@
|
Automatic topic creation
|
-Supported. If automatic topic creation is enabled, the system automatically creates a topic when a message is created in or retrieved from a topic that does not exist. This topic has the following default settings: 3 partitions, 3 replicas, aging time 72 hours, and synchronous replication and flushing disabled.
-After you change the value of the log.retention.hours, default.replication.factor, or num.partitions parameter, automatically created topics later use the new value. For example, if num.partitions is set to 5, an automatically created topic will have the following settings: 5 partitions, 3 replicas, aging time 72 hours, and synchronous replication and flushing disabled.
- |
-
-Decreasing partition quantity
- |
-The partition quantity cannot be decreased due to the limitations of Apache Kafka.
+ | Supported. If this option is enabled, a topic will be automatically created when a message is produced in or consumed from a topic that does not exist. By default, the topic has the following parameters:
+- Partitions: 3
- Replicas: 3
- Aging Time: 72
- Synchronous Replication and Synchronous Flushing disabled
- Message Timestamp: CreateTime
- Max.Message Size (bytes): 10,485,760
+After you change the value of the log.retention.hours, default.replication.factor, or num.partitions parameter, the value will be used in later topics that are automatically created.
+For example, assume that num.partitions is changed to 5, an automatically created topic has the following parameters:
+- Partitions: 5
- Replicas: 3
- Aging Time: 72
- Synchronous Replication and Synchronous Flushing disabled
- Message Timestamp: CreateTime
- Max.Message Size (bytes): 10,485,760
|
Synchronous replication
@@ -103,6 +121,11 @@
| Not supported
|
+Broker faults
+ |
+When some brokers of an instance are faulty, topics cannot be created, modified, or deleted, but can be queried.
+ |
+
@@ -129,6 +152,11 @@
If a consumer group name starts with a special character, for example, an underscore (_) or a number sign (#), monitoring data cannot be displayed.
|
+ Broker faults
+ |
+When some instance brokers are faulty, consumer groups cannot be created or deleted, or consumption progress cannot be reset, but consumer groups can be queried.
+ |
+
@@ -149,6 +177,27 @@
|
---|
+ User
+ Table 5 User notes and constraintsItem
+ |
+Notes and Constraints
+ |
+
+
+Number of users
+ |
+The maximum users that can be created for a Kafka instance is 20 or 500. Check the console for the actual limit.
+ |
+
+Broker faults
+ |
+When some instance brokers are faulty, users cannot be created or deleted, or password cannot be reset, but users can be queried.
+ |
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-pd-200720001.html b/docs/dms/umn/kafka-pd-200720001.html
index fe741458..145a9b9e 100644
--- a/docs/dms/umn/kafka-pd-200720001.html
+++ b/docs/dms/umn/kafka-pd-200720001.html
@@ -33,7 +33,7 @@
On-demand use
|
- Multiple specifications are available to suit different needs.
+ | Multiple specifications are available to suit different needs. The instance broker quantity, broker flavor, and disk space can be increased with a few clicks.
|
Expenses are incurred for setting up a message service and occupying underlying resources.
|
diff --git a/docs/dms/umn/kafka-qs-0409001.html b/docs/dms/umn/kafka-qs-0409001.html
new file mode 100644
index 00000000..d8cf5c17
--- /dev/null
+++ b/docs/dms/umn/kafka-qs-0409001.html
@@ -0,0 +1,347 @@
+
+
+ Getting Started with Kafka for Message Production and Consumption
+ This section takes the example of creating a Kafka 2.7 instance (ciphertext access and SASL_SSL) and accessing it on the client (private network, within a virtual private cloud (VPC)) for message production and consumption to get you quickly started with Distributed Message Service (DMS).
+ Figure 1 Procedure for using DMS for Kafka
+ - Step 1: Preparations
A Kafka instance runs in a VPC. Before creating a Kafka instance, ensure that a VPC is available.
+After a Kafka instance is created, download and install the Kafka open-source client on your ECS before producing and consuming messages.
+ - Step 2: Create a Kafka Instance
You can select the specification and quantity, and enable ciphertext access and SASL_SSL when creating a Kafka instance.
+When connecting to a Kafka instance with SASL_SSL enabled, SASL is used for authentication. Data is encrypted with SSL certificates for high-security transmission.
+ - Step 3: Create a Topic
Topics store messages created by producers and subscribed by consumers.
+This section uses the example of creating a topic on the console.
+ - Step 4: Connect to a Kafka Instance to Produce and Consume Messages
Before connecting to a Kafka instance with SASL_SSL enabled, download the certificate and configure the connection in the client configuration file.
+
+ Step 1: Preparations- Before creating a Kafka instance, ensure that your account has permissions to perform operations on Kafka instances.
To achieve fine-grained management of your cloud resources, create IAM user groups and users and grant specified permissions to the users. For more information, see Creating a User and Granting DMS for Kafka Permissions.
+ - Before creating a Kafka instance, ensure that a VPC and a subnet are available.
Configure the VPC and subnet for Kafka instances as required. You can use the current account's existing VPC and subnet, or create new ones. For details about how to create a VPC and a subnet, see Creating a VPC. Note that the VPC must be in the same region as the Kafka instance.
+ - Before creating a Kafka instance, ensure that a security group is available.
Configure the security group for Kafka instances as required. You can use the current account's existing security groups, or create new ones. For details about how to create a security group, see Creating a Security Group.
+To connect to Kafka instances, add the security group rules described in Table 1. Other rules can be added based on site requirements.
+ Table 1 Security group rulesDirection
+ |
+Protocol
+ |
+Port
+ |
+Source address
+ |
+Description
+ |
+
+
+Inbound
+ |
+TCP
+ |
+9093
+ |
+0.0.0.0/0
+ |
+Accessing a Kafka instance over a private network within a VPC (in ciphertext)
+ |
+
+
+
+
+ After a security group is created, it has a default inbound rule that allows communication among ECSs within the security group and a default outbound rule that allows all outbound traffic. If you access your Kafka instance using the private network within a VPC, you do not need to add the rules described in Table 1.
+
+
+ - Before connecting to a Kafka instance, ensure that you have created an elastic cloud server (ECS) that has an EIP, installed the JDK, configured environment variables, and downloaded an open-source Kafka client. The following steps describe how to complete these preparations.
A Linux ECS is taken as an example. For more information on how to install JDK and configure the environment variables for a Windows ECS, please search the Internet. - Log in to the console, click
in the upper left corner, click Elastic Cloud Server under Computing, and then create an ECS.For details about how to create an ECS, see Creating an ECS. If you already have an available ECS, skip this step.
+ - Log in to an ECS as user root.
- Install Java JDK and configure the environment variables JAVA_HOME and PATH.
- Download a JDK.
Use Oracle JDK instead of ECS's default JDK (for example, OpenJDK), because ECS's default JDK may not be suitable. Obtain Oracle JDK 1.8.111 or later from Oracle's official website.
+
+ - Decompress the JDK.
tar -zxvf jdk-8u321-linux-x64.tar.gz
+Change jdk-8u321-linux-x64.tar.gz to your JDK version.
+ - Open the .bash_profile file.
vim ~/.bash_profile
+ - Add the following content:
export JAVA_HOME=/opt/java/jdk1.8.0_321
+export PATH=$JAVA_HOME/bin:$PATH
+Change /opt/java/jdk1.8.0_321 to the path where you install JDK.
+ - Press Esc. Enter the following line and press Enter. Save the .bash_profile file and exit.
:wq
+ - Run the following command to make the change take effect:
source .bash_profile
+ - Check whether the JDK is installed.
java -version
+If the following message is returned, the JDK is installed. java version "1.8.0_321"
+
+
+ - Download an open-source Kafka client.
wget https://archive.apache.org/dist/kafka/2.7.2/kafka_2.12-2.7.2.tgz
+ - Run the following command to decompress the package:
tar -zxf kafka_2.12-2.7.2.tgz
+
+
+
+
+ Step 2: Create a Kafka Instance- Log in to the DMS console, then click Create Instance in the upper right corner of the page.
- Specify the basic instance settings. For details, see Table 2.
+ Table 2 Basic instance settingsParameter
+ |
+Description
+ |
+
+
+Region
+ |
+DMS for Kafka in different regions cannot communicate with each other over an intranet. Select a nearest location for low latency and fast access.
+Select eu-de.
+ |
+
+Project
+ |
+Projects isolate compute, storage, and network resources across geographical regions. For each region, a preset project is available.
+Select eu-de (default).
+ |
+
+AZ
+ |
+An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network.
+Select AZ1, AZ2, and AZ3.
+ |
+
+Instance Name
+ |
+You can customize a name that complies with the rules: 4–64 characters; starts with a letter; can contain only letters, digits, hyphens (-), and underscores (_).
+Enter kafka-test.
+ |
+
+Enterprise Project
+ |
+This parameter is for enterprise users. An enterprise project manages project resources in groups. Enterprise projects are logically isolated.
+Select default.
+ |
+
+Specifications
+ |
+Select Cluster to create a cluster Kafka instance.
+ |
+
+Version
+ |
+Kafka version. Cannot be changed once the instance is created.
+Select 2.7.
+ |
+
+CPU Architecture
+ |
+x86
+Retain the default value.
+ |
+
+Broker Flavor
+ |
+Select a broker flavor as required.
+Select kafka.2u4g.cluster.
+ |
+
+Brokers
+ |
+Specify the number of brokers as required.
+Enter 3.
+ |
+
+Storage Space per Broker
+ |
+Select the disk type and specify the disk size as required.
+Total storage space = Storage space per broker × Broker quantity. After the instance is created, you cannot change the disk type.
+Select Ultra-high I/O and enter 100.
+ |
+
+Disk Encryption
+ |
+Skip it.
+ |
+
+Capacity Threshold Policy
+ |
+Select Automatically delete: When the disk reaches the disk capacity threshold (95%), messages can still be produced and consumed, but the earliest 10% of messages will be deleted to ensure sufficient disk space. Use this policy for services intolerant of interruptions. However, data may be lost.
+ |
+
+
+
+
+ - Configure the instance network. For details, see Table 3.
+ Table 3 Configuring instance networkParameter
+ |
+Description
+ |
+
+
+VPC
+ |
+The VPC and subnet cannot be changed once the instance is created.
+Select the VPC and subnet prepared in 2.
+ |
+
+Security Group
+ |
+Select the security group prepared in 3.
+ |
+
+
+
+
+ - Set the instance access mode. For details, see Table 4.
+ Table 4 Setting the instance access modeParameter
+ |
+Sub-Parameter
+ |
+Description
+ |
+
+
+Private Network Access
+ |
+Plaintext Access
+ |
+Disable it.
+ |
+
+Ciphertext Access
+ |
+When this parameter is enabled, SASL authentication is required when a client connects to the Kafka instance.
+- Ciphertext Access is enabled.
- SASL_SSL is selected. Username and Password can be set.
- SASL/PLAIN is enabled.
+ |
+
+Public Network Access
+ |
+-
+ |
+Skip it.
+ |
+
+
+
+
+ - Click Advanced Settings. For more information, see Table 5.
+ Table 5 Advanced settingsParameter
+ |
+Description
+ |
+
+
+Automatic Topic Creation
+ |
+Skip it.
+ |
+
+Tags
+ |
+Skip it.
+ |
+
+Description
+ |
+Skip it.
+ |
+
+
+
+
+ - Click Create.
- Confirm the instance settings.
- Return to the DMS for Kafka page and check whether the instance has been created.
It takes 3 to 15 minutes to create an instance. During this period, the instance status is Creating.
+- If the instance is created successfully, its status changes to Running.
- If the instance is in the Creation failed state, delete it. Then create a new one. If the instance creation fails again, contact customer service.
Instances that fail to be created do not occupy other resources.
+
+
+
+
+ Step 3: Create a Topic- On the DMS for Kafka page, click a Kafka instance.
- In the navigation pane, choose Topics.
- Click Create Topic.
- Enter the topic name, specify other parameters by referring to Table 6, and click OK.
+ Table 6 Topic parametersParameter
+ |
+Description
+ |
+
+
+Topic Name
+ |
+Customize a name that contains 3 to 200 characters, starts with a letter or underscore (_), and contains only letters, digits, periods (.), hyphens (-), and underscores (_).
+The name must be different from preset topics:
+- _consumer_offsets
- _transaction_state
- _trace
- _connect-status
- _connect-configs
- _connect-offsets
+Cannot be changed once the topic is created.
+Enter topic-01.
+ |
+
+Partitions
+ |
+If the number of partitions is the same as that of consumers, the larger the partitions, the higher the consumption concurrency.
+Enter 3.
+ |
+
+Replicas
+ |
+Data is automatically backed up to each replica. When one Kafka broker becomes faulty, data is still available. A higher number of replicas delivers higher reliability.
+Enter 3.
+ |
+
+Aging Time (h)
+ |
+How long messages will be preserved in the topic. Messages older than this period cannot be consumed. They will be deleted, and can no longer be consumed.
+Enter 72.
+ |
+
+Synchronous Replication
+ |
+Skip it. When this option is disabled, leader replicas are independent from follower replica synchronization. They receive messages and write them to local logs, then immediately send the successfully written ones to the client.
+ |
+
+Synchronous Flushing
+ |
+Skip it. When this option is disabled, messages are produced and stored in memory instead of written to the disk immediately.
+ |
+
+Message Timestamp
+ |
+Select CreateTime: time when the producer created the message.
+ |
+
+Max. Message Size (bytes)
+ |
+Maximum batch processing size allowed by Kafka. If message compression is enabled in the client configuration file or code of producers, this parameter indicates the size after compression.
+Enter 10,485,760.
+ |
+
+
+
+
+
+
+ Step 4: Connect to a Kafka Instance to Produce and Consume Messages- Obtain the instance connection address.
- In the navigation pane, click Basic Information.
- In the Connection area, view the connection address.
Figure 2 Kafka instance addresses (private network) for intra-VPC access
+
+ - Prepare the file for production and consumption configuration.
- Log in to a Linux ECS.
- Download the client.jks certificate and upload it to the /root directory on the ECS.
To obtain the certificate: On the Kafka console, click the Kafka instance to go to the Basic Information page. Click Download next to SSL Certificate in the Connection area. Decompress the package to obtain the client certificate file client.jks.
+ /root is the path for storing the certificate. Change it to the actual path if needed.
+
+ - Go to the /config directory on the Kafka client.
cd kafka_2.12-2.7.2/config
+ - Add the following commands in both the consumer.properties and producer.properties files (PLAIN is used as an example).
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
+username="**********" \
+password="**********";
+sasl.mechanism=PLAIN
+
+security.protocol=SASL_SSL
+ssl.truststore.location={ssl_truststore_path}
+ssl.truststore.password=dms@kafka
+ssl.endpoint.identification.algorithm=
+Description:
+- username and password are specified when enabling ciphertext access during instance creation.
- ssl.truststore.location is the path for storing the certificate obtained in 2.b.
- ssl.truststore.password is certified by the server, which must be set to dms@kafka and cannot be changed.
- ssl.endpoint.identification.algorithm decides whether to verify the certificate domain name. In this example, leave this parameter blank, which indicates disabling domain name verification.
+
+ - Go to the /bin directory on the Kafka client.
cd ../bin
+ - Produce messages.
./kafka-console-producer.sh --broker-list ${connection addr} --topic ${topic name} --producer.config ../config/producer.properties
+Description:
+- {connection addr}: the address obtained in 1.
- {topic name}: the topic name obtained in 4.
+For example, 192.xxx.xxx.xxx:9093, 192.xxx.xxx.xxx:9093, 192.xxx.xxx.xxx:9093 are the connection addresses of the Kafka instance.
+After running this command, you can send messages to the Kafka instance by entering the information as prompted and pressing Enter. Each line of content will be sent as a message.
+[root@ecs-kafka bin]#./kafka-console-producer.sh --broker-list 192.xxx.xxx.xxx:9093,192.xxx.xxx.xxx:9093,192.xxx.xxx.xxx:9093 --topic topic-demo --producer.config ../config/producer.properties
+>Hello
+>DMS
+>Kafka!
+>^C[root@ecs-kafka bin]#
+Press Ctrl+C to cancel.
+ - Consume messages.
./kafka-console-consumer.sh --bootstrap-server ${connection addr} --topic ${topic name} --from-beginning --consumer.config ../config/consumer.properties
+Description:
+- {connection addr}: the address obtained in 1.
- {topic name}: the topic name obtained in 4.
+Sample:
+[root@ecs-kafka bin]# ./kafka-console-consumer.sh --bootstrap-server 192.xxx.xxx.xxx:9093,192.xxx.xxx.xxx:9093,192.xxx.xxx.xxx:9093 --topic topic-demo --from-beginning --consumer.config ../config/consumer.properties
+Hello
+Kafka!
+DMS
+^CProcessed a total of 3 messages
+[root@ecs-kafka bin]#
+Press Ctrl+C to cancel.
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-trouble-0001.html b/docs/dms/umn/kafka-trouble-0001.html
new file mode 100644
index 00000000..229417ea
--- /dev/null
+++ b/docs/dms/umn/kafka-trouble-0001.html
@@ -0,0 +1,17 @@
+
+
+ Troubleshooting Message Creation Failures
+ SymptomThe system displays the error message "Disk error when trying to access log file on the disk".
+
+ Root CauseThe disk usage of the broker is too high.
+
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-trouble-0002.html b/docs/dms/umn/kafka-trouble-0002.html
new file mode 100644
index 00000000..e1fe4931
--- /dev/null
+++ b/docs/dms/umn/kafka-trouble-0002.html
@@ -0,0 +1,16 @@
+
+
+ Troubleshooting Topic Deletion Failures
+ SymptomA deleted topic still exists.
+
+ Root CauseAutomatic topic creation has been enabled for the instance, and a consumer is connecting to the topic. If services are not stopped, message creation will continue, and new topics will be automatically created.
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-trouble-0004.html b/docs/dms/umn/kafka-trouble-0004.html
new file mode 100644
index 00000000..3cc7eaac
--- /dev/null
+++ b/docs/dms/umn/kafka-trouble-0004.html
@@ -0,0 +1,17 @@
+
+
+ Troubleshooting Error "Topic {{topic_name}} not present in metadata after 60000 ms" During Message Production or Consumption
+ SymptomFor a Kafka instance deployed in multiple AZs, if one of the AZs is faulty, error message "Topic {{topic_name}} not present in metadata after 60000 ms" may be reported on the Kafka client during message production or consumption, as shown in the following figure.
+ 
+
+ SolutionYou can use any of the following methods to solve this problem:
+ - Upgrade the Kafka client to v2.7 or later, and set socket.connection.setup.timeout.ms to a value greater than 1s and less than the value of request.timeout.ms divided by the number of Kafka server nodes. To obtain the number of Kafka server brokers:
- Log in to the DMS console.
- Click a Kafka instance. On the instance details page that is displayed, view the value of Private Network Access in the Connection area. The number of connection addresses next to Address is the number of Kafka server brokers.
+ - Change the value of request.timeout.ms of the Kafka client to a value greater than 127s.
- Change the Linux network parameter net.ipv4.tcp_syn_retries of the Kafka client to 3.
+
+
+
+
diff --git a/docs/dms/umn/kafka-trouble-0709001.html b/docs/dms/umn/kafka-trouble-0709001.html
new file mode 100644
index 00000000..ff949e06
--- /dev/null
+++ b/docs/dms/umn/kafka-trouble-0709001.html
@@ -0,0 +1,38 @@
+
+
+ Troubleshooting 6-Min Latency Between Message Creation and Retrieval
+ SymptomThe duration from message creation to retrieval occasionally reaches 6 minutes, which is not tolerable to services.
+
+ Possible Causes- Service requests are stacked and cannot be processed in time.
According to the monitoring data, only up to 50 messages are stacked and up to 10 messages are created per second, which is within the processing capability limit, so this is not the cause of the symptom.
+ - The EIP inbound traffic decreases.
If the EIP technical support personnel cannot find any problem, this is not the cause of the symptom.
+ - The consumer group is behaving abnormally.
According to the server logs, the consumer group is going through frequent rebalance operations. While most rebalance operations are completed within seconds, some can take several minutes. Messages cannot be retrieved until the rebalance is complete.
+This is the cause of the symptom.
+
+
+ Detailed AnalysisA consumer group may exhibit the following three types of behavior in the log:
+ - Preparing to rebalance group 1
The consumer group starts rebalance, and its status changes to REABLANCING.
+ - Stabilized group
The consumer group completes rebalance, and its status changes to STABILIZED.
+ - Member consumer-xxx in group 1 has failed
A consumer in a consumer group leaves the group if the consumer has not communicated with the server for a long time. This is usually triggered if the message processing is prolonged and the process is blocked.
+
+ The following figure shows the duration between Preparing and Stabilized. The time shown in the figure is UTC+0.
+ Figure 1 Consumer group rebalance
+ This set of data shows that rebalance performance of the consumer group deteriorates after 06:49 on July 1. As a result, the client becomes abnormal.
+
+ Root CauseSometimes, a consumer cannot respond to rebalancing in a timely manner. As a result, the entire consumer group is blocked until the consumer responds.
+
+ Workaround- Use different consumer groups for different services to reduce the impact of a single consumer blocking access.
- max.poll.interval.ms sets the maximum interval for a consumer group to request message consumption. If a consumer does not initiate another consumption request before timeout, the server triggers rebalancing. You can increase the default value of max.poll.interval.ms.
+
+ Solution- Use different consumer groups for different services.
- Optimize the service processing logic to improve the processing efficiency and reduce the blocking time.
+
+ Background KnowledgeA consumer group can be either REBALANCING or STABILIZED.
+ - REBALANCING: If a consumer joins or leaves a consumer group, the metadata of the consumer group changes and no consumers in the consumer group can retrieve messages.
- STABILIZED: The metadata has been synchronized by all consumers in the consumer group, including existing ones. Rebalancing has completed and the consumer group is stabilized. Consumers in the consumer group can retrieve messages normally.
+ A consumer group works as follows:
+ - A consumer leaves or joins the group, changing the consumer group metadata recorded at the server. The server updates the consumer group status to REBALANCING.
- The server waits for all consumers (including existing ones) to synchronize the latest metadata.
- After all consumers have synchronized the latest metadata, the server updates the consumer group status to STABILIZED.
- Consumers retrieve messages.
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-00001.html b/docs/dms/umn/kafka-ug-00001.html
index b4672fc6..8b67f2fe 100644
--- a/docs/dms/umn/kafka-ug-00001.html
+++ b/docs/dms/umn/kafka-ug-00001.html
@@ -8,28 +8,42 @@
-2022-10-30
+ | 2024-06-14
+ |
+Added the following content:
+
+Modified the following content:
+
+ |
+
+2022-12-01
+ |
+This release incorporates the following changes:
+
+ |
+
+2022-10-30
|
This release incorporates the following changes:
-- Removed support for normal queues.
- Added description about Kafka v2.7, automatic topic creation, and disk encryption in section Creating an Instance.
- Added two methods for connecting to a Kafka instance: Cross-VPC Access to a Kafka Instance and Using DNAT to Access a Kafka Instance.
- Added description about how to modify topic parameters, export topics, and view sample code for connecting to instances. Related sections are Modifying Topic Aging Time, Changing Partition Quantity, Modifying Synchronous Replication and Flushing Settings, Exporting Topics, and Viewing Sample Code.
- Added description about user management in sections Creating a SASL_SSL User, Granting Permissions to a SASL_SSL User, Resetting the SASL_SSL Password, and Deleting a SASL_SSL User.
- Added description about consumer group management in sections Querying Consumer Group Details, Deleting a Consumer Group, Resetting the Consumer Offset, and Viewing Consumer Connection Addresses.
- Added description about how to modify configuration parameters in section Modifying Kafka Parameters.
- Added description about tags in sections Creating an Instance, Viewing an Instance, and Managing Instance Tags.
- Added Instances, Connections, Topics and Partitions, Consumer Groups, Messages, and Monitoring & Alarm to FAQs.
+- Removed support for normal queues.
- Added description about Kafka v2.7, automatic topic creation, and disk encryption in section Creating a Kafka Instance.
- Added two methods for connecting to a Kafka instance: Accessing Kafka Using a VPC Endpoint Across VPCs and Accessing Kafka in a Public Network Using DNAT.
- Added description about how to modify topic parameters, export topics, and view sample code for connecting to instances. Related sections are Changing Kafka Message Retention Period, Changing Kafka Partition Quantity, "Modifying Synchronous Replication and Synchronous Flushing", Exporting the Kafka Topic List, and Viewing Sample Code of Kafka Production and Consumption.
- Added description about user management in sections Configuring Kafka Users, Configuring Kafka Topic Permissions, "Resetting SASL_SSL Password", and "Deleting SASL_SSL Users".
- Added description about consumer group management in sections Querying the Kafka Consumer Group List, Deleting a Kafka Consumer Group, Viewing and Resetting Kafka Consumption Offsets, and Viewing Kafka Consumer Details.
- Added description about how to modify configuration parameters in section Modifying Kafka Instance Configuration Parameters.
- Added description about tags in sections Creating a Kafka Instance, Viewing and Modifying Basic Information of a Kafka Instance, and Configuring Kafka Instance Tags.
- Added Instances, Connections, Topics and Partitions, Consumer Groups, Messages, and Monitoring & Alarm to FAQs.
|
2020-08-11
|
This release incorporates the following changes:
-
+
|
2020-07-29
|
This release incorporates the following changes:
-- Added the description that public access is not supported when the instance specification is 100 MB/s in section Creating an Instance.
- Deleted the description about public network bandwidth being 100 MB/s in section Configuring Public Access.
+
|
2020-02-20
|
This release incorporates the following changes:
-
+
|
2019-12-16
@@ -40,7 +54,7 @@
| 2019-11-30
|
This release incorporates the following changes:
-- Optimized description about Kafka premium instances in section What Is DMS?.
- Added description about DMS functions in section "Functions".
- Added description about the differences between Kafka queues and Kafka premium instances in section "Comparing Kafka Queues and Kafka Premium Instances".
- Added description about the specifications of Kafka queues and Kafka premium instances in section Specifications.
- Added explanation of the relationship between Kafka premium instances and VPCs in section Related Services.
- Optimized description about DMS queues and added description about Kafka premium instances in section Basic Concepts.
- Added recommendations on Kafka client configurations in section Best Practices.
- Removed the Region parameter in section "Creating a Queue".
- Optimized description about queue policy parameters in section "Managing Queue Policies".
- Optimized description about Kafka premium instance operations and parameters in chapter "Managing Kafka Premium Instances".
- Optimized description about DMS metrics in section Kafka Metrics.
- Added description about Kafka premium instance operations supported by CTS in section Operations Logged by CTS.
+- Optimized description about Kafka premium instances in section What Is DMS?.
- Added description about DMS functions in section "Functions".
- Added description about the differences between Kafka queues and Kafka premium instances in section "Comparing Kafka Queues and Kafka Premium Instances".
- Added description about the specifications of Kafka queues and Kafka premium instances in section Specifications.
- Added explanation of the relationship between Kafka premium instances and VPCs in section Related Services.
- Optimized description about DMS queues and added description about Kafka premium instances in section Basic Concepts.
- Added recommendations on Kafka client configurations in section Best Practices.
- Removed the Region parameter in section "Creating a Queue".
- Optimized description about queue policy parameters in section "Managing Queue Policies".
- Optimized description about Kafka premium instance operations and parameters in chapter "Managing Kafka Premium Instances".
- Optimized description about DMS metrics in section Kafka Metrics.
- Added description about Kafka premium instance operations supported by CTS in section Viewing Kafka Audit Logs.
|
2019-11-08
@@ -62,7 +76,7 @@
| 2018-04-28
|
Added the following content:
-
+
|
2018-04-03
diff --git a/docs/dms/umn/kafka-ug-00002.html b/docs/dms/umn/kafka-ug-00002.html
new file mode 100644
index 00000000..03d8f17c
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-00002.html
@@ -0,0 +1,122 @@
+
+
+Acronyms and Abbreviations
+
+ Abbreviation
+ |
+Full Name
+ |
+
+
+ACL
+ |
+Access Control List
+ |
+
+AK/SK
+ |
+Access Key ID/Secret Access Key
+ |
+
+AZ
+ |
+Availability Zone
+ |
+
+CRT
+ |
+Certificate
+ |
+
+CLI
+ |
+Command Line Interface
+ |
+
+CTS
+ |
+Cloud Trace Service
+ |
+
+DMS
+ |
+Distributed Message Service
+ |
+
+ECS
+ |
+Elastic Cloud Server
+ |
+
+EIP
+ |
+Elastic IP
+ |
+
+EOS
+ |
+End of Service & support
+ |
+
+EVS
+ |
+Elastic Volume Service
+ |
+
+GC
+ |
+Garbage Collection
+ |
+
+JKS
+ |
+Java Key Storage
+ |
+
+JVM
+ |
+Java Virtual Machine
+ |
+
+KMS
+ |
+Key Management Service
+ |
+
+PEM
+ |
+Privacy Enhanced Mail
+ |
+
+SASL
+ |
+Simple Authentication and Security Layer
+ |
+
+SSL
+ |
+Secure Sockets Layer
+ |
+
+TMS
+ |
+Tag Management Service
+ |
+
+TPS
+ |
+Transactions Per Second
+ |
+
+VPC
+ |
+Virtual Private Cloud
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-00003.html b/docs/dms/umn/kafka-ug-00003.html
new file mode 100644
index 00000000..1412ae60
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-00003.html
@@ -0,0 +1,15 @@
+
+
+
+ Getting Started
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-0001.html b/docs/dms/umn/kafka-ug-0001.html
index d31c02cb..34a928ca 100644
--- a/docs/dms/umn/kafka-ug-0001.html
+++ b/docs/dms/umn/kafka-ug-0001.html
@@ -1,8 +1,8 @@
-Cross-VPC Access to a Kafka Instance
+Accessing Kafka Using a VPC Endpoint Across VPCs
ContextVPCs are logically isolated from each other. If a Kafka instance and a Kafka client are in different VPCs within a region, they cannot communicate with each other. In this case, you can use one of the following methods to access a Kafka instance across VPCs:
- - Establish a VPC peering connection to allow two VPCs to communicate with each other. For details, see section "VPC Peering Connection" in Virtual Private Cloud User Guide.
- Use VPC Endpoint (VPCEP) to establish a cross-VPC connection.
+ - Establish a VPC peering connection to allow two VPCs to communicate with each other. For details, see VPC Peering Connection.
- Use VPC Endpoint (VPCEP) to establish a cross-VPC connection.
ScenarioThe following describes how to use VPCEP to implement cross-VPC access.
VPCEP provides two types of resources: VPC endpoint services and VPC endpoints.
@@ -11,33 +11,33 @@
ProcedureFigure 2 Process for accessing a Kafka instance across VPCs
- Creating a VPC Endpoint Service- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ Creating a VPC Endpoint Service- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the Advanced Settings section on the Basic Information tab page, obtain the listeners IP addresses and port IDs of the instance for Cross-VPC Access.
Figure 3 Cross-VPC access–related listeners IP addresses and corresponding port IDs of the Kafka instance
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view its details.
- In the Advanced Settings section on the Basic Information tab page, obtain the listeners IP addresses and port IDs of the instance for Cross-VPC Access.
Figure 3 Cross-VPC access–related listeners IP addresses and corresponding port IDs of the Kafka instance
- In the Network section on the Basic Information tab page, view the VPC to which the Kafka instance belongs.
Figure 4 Viewing the VPC to which the Kafka instance belongs
- Click the VPC to obtain the VPC ID on the VPC console.
Figure 5 Obtaining the VPC ID
- - Call the VPC Endpoint API to create a VPC endpoint service. For details, see "Creating a VPC Endpoint Service" in VPC Endpoint API Reference.
curl -i -k -H 'Accept:application/json' -H 'Content-Type:application/json;charset=utf8' -X POST -H "X-Auth-Token:$token" -d '{"port_id":"38axxxeac","vpc_id":"706xxx888","ports":[{"protocol":"TCP","client_port":9011,"server_port":9011 }],"approval_enabled":false,"service_type":"interface","server_type":"VM"}' https://{endpoint}/v1/{project_id}/vpc-endpoint-services
-Parameter description:
-- token: an access credential issued to an IAM user to bear its identity and permissions. For details on how to obtain a token, see Obtaining a User Token.
- port_id: one of the port IDs obtained in 5.
- vpc_id: VPC ID obtained in 7.
- endpoint: VPCEP endpoint obtained from Regions and Endpoints. The region must be the same as that of the Kafka instance.
- project_id: project ID obtained from "Obtaining a Project ID". The region must be the same as that of the Kafka instance. For details about how to obtain the value, see section "Common Parameters" > "Obtaining a Project ID" in the VPC Endpoint API Reference.
+ - Call the VPC Endpoint API to create a VPC endpoint service. For details, see Creating a VPC Endpoint Service.
POST https://{endpoint}/v1/{project_id}/vpc-endpoint-services
+Set the following request parameter to the specified values, and other parameters as required.
+- port_id: one of the port IDs obtained in 5.
- vpc_id: VPC ID obtained in 7.
- server_type: VM
- client_port: 9011
- server_port: 9011
- protocol: TCP
- approval_enabled: false
- service_type: interface
- endpoint: VPCEP endpoint obtained from Regions and Endpoints. The region must be the same as that of the Kafka instance.
- project_id: project ID obtained from Obtaining a Project ID. The region must be the same as that of the Kafka instance.
Record the value of service_name in the response. This parameter indicates the name of the VPC endpoint service.
- Repeat 8 to create VPC endpoint services for other port IDs obtained in 5 and record the VPC endpoint service names.
- (Optional) Adding a Whitelist RecordIf the Kafka client and Kafka instance belong to different accounts, add the ID of the account to which the Kafka client belongs to the whitelist of the endpoint service. For details, see Add a Whitelist Record.
+ (Optional) Adding a Whitelist RecordIf the Kafka client and Kafka instance belong to different accounts, add the ID of the account to which the Kafka client belongs to the whitelist of the endpoint service. For details, see Add a Whitelist Record.
- Creating a VPC Endpoint- Click Service List. Then choose Networking > VPC Endpoint.
- Click Create VPC Endpoint.
- Set the following parameters:
- Region: Select the region that the Kafka instance is in.
- Service Category: Select Find a service by name.
- VPC Endpoint Service Name: Enter the VPC endpoint service name recorded in 8 and click Verify. If Service name found is displayed, proceed with subsequent operations.
- VPC: Select the VPC that the Kafka instance is in.
- Subnet: Select the subnet that the Kafka instance is in.
- Private IP Address: Select Automatic.
+Creating a VPC Endpoint- Click Service List. Then choose Network > VPC Endpoint.
- Click Create VPC Endpoint.
- Set the following parameters:
- Region: Select the region that the Kafka instance is in.
- Service Category: Select Find a service by name.
- VPC Endpoint Service Name: Enter the VPC endpoint service name recorded in 8 and click Verify. If Service name found is displayed, proceed with subsequent operations.
- VPC: Select the VPC that the Kafka client is in.
- Subnet: Select the subnet that the Kafka client is in.
- Private IP Address: Select Automatic.
Retain the default values for other parameters. For details, see Creating a VPC Endpoint.
- - Click Create Now.
- Confirm the configurations and submit the request.
- Go back to the VPC endpoint list and check whether the status of the created VPC endpoint has changed to Accepted. The Accepted state means that the VPC endpoint has been connected to the VPC endpoint service.
Figure 6 Checking the VPC endpoint status
+ - Click Create Now.
- Confirm the configurations and submit the request.
- Go back to the VPC endpoint list and check whether the status of the created VPC endpoint has changed to Accepted. The Accepted state means that the VPC endpoint has been connected to the VPC endpoint service.
Figure 6 Checking the VPC endpoint status
- Click the VPC endpoint ID. On the Summary tab page, obtain the private IP address.
You can use the private IP address to access the VPC endpoint service.
Figure 7 Viewing the private IP address
- Repeat 1 to 7 to create a VPC endpoint for each VPC endpoint service created in 9, and view and record the private IP addresses of the VPC endpoint services.
-Changing the advertised.listeners IP Address- Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- On the Advanced Settings section of the Basic Information tab page, click Modify for Cross-VPC Access to change the value of advertised.listeners IP address to the private IP addresses recorded in 7 and 8. Click Save.
Each IP address must match the corresponding port ID. Otherwise, the network will be disconnected.
+ Changing the advertised.listeners IP Addresses- Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view its details.
- On the Advanced Settings section of the Basic Information tab page, click Modify for Cross-VPC Access to change the value of advertised.listeners IP address to the private IP addresses recorded in 7 and 8. Click Save.
Each IP address must match the corresponding port ID. Otherwise, the network will be disconnected.
Figure 8 Changing the advertised.listeners IP addresses
- Verifying ConnectivityCheck whether messages can be created and retrieved by referring to Accessing a Kafka Instance Without SASL or Accessing a Kafka Instance with SASL.
+ Verifying ConnectivityCheck whether messages can be created and retrieved by referring to Connecting to Kafka Using the Client (Plaintext Access) or Connecting to Kafka Using the Client (Ciphertext Access).
Notes:
- The address for connecting to a Kafka instance is in the format of "advertised.listeners IP:9011". For example, the addresses for connecting to the Kafka instance shown in Figure 8 are 10.158.0.151:9011,10.158.0.162:9011,10.158.0.164:9011.
- Configure inbound rules for the security group of the Kafka instance to allow access from 198.19.128.0/17 over port 9011.
- If a network access control list (ACL) has been configured for the subnet of this instance, configure inbound rules for the network ACL to allow access from 198.19.128.0/17 and from the subnet used by the VPC endpoint.
198.19.128.0/17 is the network segment allocated to the VPCEP service. To use VPCEP, allow access from this network segment.
@@ -46,7 +46,7 @@
diff --git a/docs/dms/umn/kafka-ug-0002.html b/docs/dms/umn/kafka-ug-0002.html
index 3dddf617..4cde3d02 100644
--- a/docs/dms/umn/kafka-ug-0002.html
+++ b/docs/dms/umn/kafka-ug-0002.html
@@ -1,29 +1,34 @@
- Granting Permissions to a SASL_SSL User
- DMS supports ACL permission management for topics. You can differentiate the operations that different users are allowed to perform on a topic by granting the users different permissions.
- This section describes how to grant topic permissions to a SASL_SSL user. For details about how to create a SASL_SSL user, see Creating a SASL_SSL User.
- If no SASL_SSL user is granted any permission for a topic, all users can subscribe to or publish messages to the topic.
- If one or more SASL_SSL users are granted permissions for a topic, only the authorized users can subscribe to or publish messages to the topic.
- Prerequisites- SASL_SSL has been enabled when you create the Kafka instance.
- (Optional) A SASL_SSL user has been created. For details, see Creating a SASL_SSL User.
-
- Granting Permissions to a SASL_SSL User- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ Configuring Kafka Topic Permissions
+ DMS supports access control list (ACL) for topics. You can differentiate user permissions by granting users different permissions in a topic.
+ This section describes how to grant topic permissions to users after ciphertext access is enabled for Kafka instances. For details about how to create a user, see Configuring Kafka Users.
+ This function is unavailable for single-node instances.
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose the Topics tab.
- In the row that contains the topic for which you want to configure user permissions, click Grant User Permission.
In the upper part of the Grant User Permission dialog box, the topic information is displayed, including the topic name, number of partitions, aging time, number of replicas, and whether synchronous flushing is enabled. In the middle part, you can use the search box to search for a user if there are many SASL_SSL users. In the Users area, the list of created SASL_SSL users is displayed. In the Selected area, you can grant permissions to the SASL_SSL users.
- - In the Users area of the Grant User Permission dialog box, select target users. In the Selected area, configure permissions (Subscribe, Publish, and Publish/Subscribe) for the users.
Figure 1 Granting user permissions
-As shown in Figure 1, only the test, send, and receive users can subscribe to or publish messages to topic-01. The send_receive user cannot subscribe to or publish messages to topic-01.
+Constraints- If no user is granted any permission for a topic and allow.everyone.if.no.acl.found is set to true, all users can subscribe to or publish messages to the topic.
- If allow.everyone.if.no.acl.found is set to false, only the authorized users can subscribe to or publish messages to the topic. The value of allow.everyone.if.no.acl.found can be modified.
- If one or more users are granted permissions for a topic, only the authorized users can subscribe to or publish messages to the topic.
- If both the default and individual user permissions are configured for a topic, the union of the permissions is used.
+
+
+Configuring Topic Permissions- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose Topics.
- In the row containing the desired topic, click Grant User Permission.
In the upper part of the Grant User Permission dialog box, the topic information is displayed, including the topic name, number of partitions, aging time, number of replicas, and whether synchronous flushing and replication are enabled. You can enable Default permissions to grant the same permissions for all users. You can use the search box to search for a user if there are many users. In the Users area, the list of created users is displayed. In the Selected area, grant permissions to the users.
+ - Grant topic permissions to users.
- To grant the same permissions to all users, select Default permissions and then select permissions. As shown in the following figure, all users have the permission to publish messages to this topic.
Figure 1 Granting the same permissions to all users
+ - To grant different permissions to different users, do not select Default permissions. In the Users area of the Grant User Permission dialog box, select target users. In the Selected area, configure permissions (Subscribe, Publish, or Publish/Subscribe) for the users. As shown in the following figure, only the test, send, and receive users can subscribe to or publish messages to this topic. The send_receive user cannot subscribe to or publish messages to this topic.
Figure 2 Granting permissions to individual users
+
+If both the default and individual user permissions are configured for a topic, the union of the permissions is used. As shown in the following figure, the test and receive users can subscribe to and publish messages to this topic.
+Figure 3 Granting topic permissions to users
- Click OK.
On the Topics tab page, click next to the topic name to view the authorized users and their permissions.
-Figure 2 Viewing authorized users and their permissions
+Figure 4 Viewing authorized users and their permissions
-(Optional) Removing Permissions from a SASL_SSL User- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ (Optional) Deleting Topic Permissions- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose the Topics tab.
- In the row that contains the topic for which you want to remove user permissions, click Grant User Permission.
- In the Selected area of the displayed Grant User Permission dialog box, locate the row that contains the SASL_SSL user whose permissions are to be removed, click Delete, and click OK.
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose Topics.
- In the row containing the desired topic, click Grant User Permission.
- In the Selected area of the displayed Grant User Permission dialog box, locate the row that contains the user whose permissions are to be removed, click Delete, and click OK.
diff --git a/docs/dms/umn/kafka-ug-0003.html b/docs/dms/umn/kafka-ug-0003.html
index affec0b4..2485753a 100644
--- a/docs/dms/umn/kafka-ug-0003.html
+++ b/docs/dms/umn/kafka-ug-0003.html
@@ -1,19 +1,46 @@
- Creating a SASL_SSL User
- DMS supports ACL permission management for topics. You can differentiate the operations that different users are allowed to perform on a topic by granting the users different permissions.
- This section describes how to create a SASL_SSL user after SASL_SSL is enabled for a Kafka instance. For details about how to grant user permissions, see Granting Permissions to a SASL_SSL User.
- PrerequisitesSASL_SSL has been enabled when you create the Kafka instance.
+ Configuring Kafka Users
+ DMS supports access control list (ACL) for topics. You can differentiate user permissions by granting users different permissions in a topic.
+ This section describes how to create users, reset the password, and delete users with ciphertext access enabled. For details about how to grant topic permissions for users, see Configuring Kafka Topic Permissions.
+ The maximum number of users that can be created for a Kafka instance is 20 or 500. Check the console for the actual limit.
+ There are two ways to create a user on the console. Accordingly, there are two ways to reset the user's password:
+
+ Prerequisites- Ciphertext access has been enabled for the Kafka instance.
- Kafka users can be configured only for Kafka instances in the Running state.
- Procedure- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ Constraints- This function is unavailable for single-node instances.
- Resetting a user password will interrupt services. Change the user password in the client configuration file or code as soon as possible.
+
+ Creating a User- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- On the Users tab page, click Create User.
- In the displayed Create User dialog box, set the username and password, and click OK.
After the SASL_SSL user is created, grant permissions to the user by referring to Granting Permissions to a SASL_SSL User.
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view its details.
- On the Users page, click Create User.
- In the displayed Create User dialog box, set the username and password, and click OK.
After the user is created, grant permissions to the user by referring to Configuring Kafka Topic Permissions.
+ Resetting the Password (for the Initial User)- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Reset the password for the initial user in either of the following ways.
- Choose More > Reset Kafka Password in the row containing the desired Kafka instance.
- Click the desired Kafka instance to view its details. Choose More > Reset Kafka Password in the upper left corner.
- Click the desired Kafka instance to view its details. On the Basic Information page, click Reset Password next to Username in the Connection section.
- Click the desired Kafka instance to view its details. On the Users page, click Reset Password in the row containing the desired user.
+ - Enter and confirm a new password, and click OK.
- If the password is successfully reset, a success message is displayed.
- If the password fails to be reset, a failure message is displayed. In this case, reset the password again. If you still fail to reset the password after multiple attempts, contact customer service.
+ The system will display a success message only after the password is successfully reset on all brokers.
+
+
+
+ Resetting the User Password (for Non-initial Users)- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view its details.
- On the Users page, click Reset Password in the row containing the desired user.
- Enter and confirm a new password, and click OK.
- If the password is successfully reset, a success message is displayed.
- If the password fails to be reset, a failure message is displayed. In this case, reset the password again. If you still fail to reset the password after multiple attempts, contact customer service.
+ The system will display a success message only after the password is successfully reset on all brokers.
+
+
+
+ Deleting a User- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view its details.
- Delete a user in either of the following ways:
- On the Users page, click Delete in the row containing the desired user.
- On the Users page, select one or more users and click Delete above the list.
+ The initial user set when ciphertext access is enabled for the first time cannot be deleted.
+
+ - In the displayed Delete User dialog box, click OK to delete the user.
+
diff --git a/docs/dms/umn/kafka-ug-0004.html b/docs/dms/umn/kafka-ug-0004.html
index 17a6c961..b6a6de2b 100644
--- a/docs/dms/umn/kafka-ug-0004.html
+++ b/docs/dms/umn/kafka-ug-0004.html
@@ -1,10 +1,12 @@
- Viewing Disk Usage
- On the Kafka console, you can view the disk usage of each broker.
- Procedure- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ Viewing Kafka Disk Usage
+ This section describes how to view the disk usage of each broker on the Kafka console.
+ This function is unavailable for single-node instances.
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click a Kafka instance to go to the Basic Information tab page.
- Click the Disk Usage Statistics tab.
Figure 1 Viewing disk usage
+Viewing Kafka Disk Usage- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click a Kafka instance to go to the Basic Information page.
- Go to the Disk Usage Statistics page.
Figure 1 Viewing disk usage
You can query topics that use the most disk space or topics that have used a specified amount or percentage of disk space.
In the upper right corner of the page, click View Metric. On the displayed Cloud Eye page, you can view metrics of Kafka instances.
diff --git a/docs/dms/umn/kafka-ug-0006.html b/docs/dms/umn/kafka-ug-0006.html
index e7d13e0a..37b68942 100644
--- a/docs/dms/umn/kafka-ug-0006.html
+++ b/docs/dms/umn/kafka-ug-0006.html
@@ -1,29 +1,31 @@
- Changing Partition Quantity
- After creating a topic, you can increase the number of partitions based on service requirements.
- Changing the number of partitions does not affect services.
+ Changing Kafka Partition Quantity
+ After creating a topic, you can increase the number of partitions as required.
+ Changing the number of partitions does not restart the instance or affect services.
Methods for changing the partition quantity:
-
- Method 1: By Using the Console- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ Method 1: By using Kafka console- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- Click the Topics tab.
- Modify the number of partitions using either of the following methods:
- Select one or more topics and click Edit Topic in the upper left corner.
- In the row containing the desired topic, click Edit.
- - In the Edit Topic dialog box, enter the number of partitions and click OK.
- The number of partitions can only be increased.
- To ensure performance, the Kafka console allows a maximum of 100 partitions for each topic.
- The total number of partitions of all topics cannot exceed the maximum number of partitions allowed by the instance.
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose Topics.
- Modify the number of partitions using either of the following methods:
- Select one or more topics and click Edit Topic in the upper left corner.
- In the row containing the desired topic, click Edit.
+ - In the Edit Topic dialog box, enter the number of partitions and click OK.
- The number of partitions can only be increased.
- To ensure performance, the Kafka console allows a maximum of 200 partitions for each topic.
- The total number of partitions of all topics cannot exceed the maximum number of partitions allowed by the instance.
Method 2: By Using Kafka CLIIf your Kafka client version is later than 2.2, you can use kafka-topics.sh to change the partition quantity.
-
diff --git a/docs/dms/umn/kafka-ug-0007.html b/docs/dms/umn/kafka-ug-0007.html
index 0d9d30b8..215d0b40 100644
--- a/docs/dms/umn/kafka-ug-0007.html
+++ b/docs/dms/umn/kafka-ug-0007.html
@@ -1,235 +1,312 @@
-Modifying Kafka Parameters
+Modifying Kafka Instance Configuration Parameters
ScenarioYour Kafka instances, topics, and consumers come with default configuration parameter settings. You can modify common parameters on the DMS console. For details about parameters that are not listed on the console, see the Kafka official website.
- Parameters of v1.1.0 instances are all static parameters. v2.3.0/2.7 instances have both dynamic and static parameters.
+ Kafka instances have dynamic and static parameters:
- Dynamic parameters: Modifying dynamic parameters will not restart the instance.
- Static parameters: After static parameters are modified, you must manually restart the instance.
- Configuration parameters of some old instances cannot be modified. Check whether your instance parameters can be modified on the console. If they cannot be modified, contact customer service.
+ - Configuration parameters of some old instances cannot be modified. Check whether your instance parameters can be modified on the console. If they cannot be modified, contact customer service.
- This function is not available for single-node instances.
PrerequisitesYou can modify configuration parameters of a Kafka instance when the instance is in the Running state.
- Procedure- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ Modifying Kafka Instance Configuration Parameters- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- On the Parameters tab page, click Edit in the row containing the parameter to modify. Table 1 describes the parameters of v1.1.0 instances. Table 2 and Table 3 describe the parameters of v2.3.0/2.7 instances.
- Table 1 Static parameters (v1.1.0 instances)Parameter
+- Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- On the Parameters page, click Edit in the row containing the parameter to modify.
Parameters of v1.1.0 instances are described in Table 2 and Table 1. Parameters of v2.3.0/v2.7/3.x instances are described in Table 3 and Table 4.
+
+Table 1 Dynamic parameters (v1.1.0 instances)Parameter
|
-Description
+ | Description
|
-Value Range
+ | Value Range
|
-Default Value
+ | Default Value
|
-min.insync.replicas
+ | auto.create.groups.enable
|
-If a producer sets the acks parameter to all (or -1), the min.insync.replicas parameter specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
+ | Whether to automatically create consumer groups.
+This parameter is not displayed on the console for some earlier instances. The function of automatically creating consumer groups is enabled by default and cannot be disabled on the console.
|
-1–3
+ | true/false
|
-1
+ | true
|
-message.max.bytes
+ | offsets.retention.minutes
|
-Maximum length of a single message, in bytes.
+ | The longest period a consumption position can be retained starts from the time of submission. Positions retained beyond this duration will be deleted. Each time a consumption position is submitted to a topic partition, its retention period resets to 0. The unit is minute.
+This parameter is displayed as a static one for certain earlier instances.
|
-0–10,485,760
+ | 1440–30240
|
-10,485,760
+ | 20160
|
-unclean.leader.election.enable
+ |
+
+
+
+Table 2 Static parameters (v1.1.0 instances)Parameter
+ |
+Description
+ |
+Value Range
+ |
+Default Value
+ |
+
+
+min.insync.replicas
|
-Indicates whether to allow replicas not in the ISR set to be elected as the leader as a last resort, even though doing so may result in data loss.
+ | If a producer sets the acks parameter to all (or -1), the min.insync.replicas parameter specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
|
-true or false
+ | 1–3
|
-true
+ | 1
|
-connections.max.idle.ms
+ | message.max.bytes
|
-Idle connection timeout (in ms). Connections that are idle for the duration specified by this parameter will be closed.
+ | Maximum length of a single message, in bytes.
|
-5000–600,000
+ | 0–10,485,760
|
-600,000
+ | 10,485,760
|
-log.retention.hours
+ | unclean.leader.election.enable
|
-Duration (in hours) for retaining a log file.
+ | Indicates whether to allow replicas not in the ISR set to be elected as the leader as a last resort, even though doing so may result in data loss.
+ |
+true or false
+ |
+false
+ |
+
+connections.max.idle.ms
+ |
+Idle connection timeout (in ms). Connections that are idle for the duration specified by this parameter will be closed.
+ |
+5000–600,000
+ |
+600,000
+ |
+
+log.retention.hours
+ |
+Duration (in hours) for retaining a log file.
This parameter takes effect only for topics that have no aging time configured. If there is aging time configured for topics, it overrides this parameter.
|
-1–168
+ | 1–168
|
-72
+ | 72
|
-max.connections.per.ip
+ | max.connections.per.ip
|
-The maximum number of connections allowed from each IP address. Request for new connections will be rejected once the limit is reached. The limit set using this parameter will be replaced if there are overrides configured using the max.connections.per.ip.overrides parameter.
+ | The maximum number of connections allowed from each IP address. Request for new connections will be rejected once the limit is reached.
|
-100–20,000
+ | 100–20,000
|
-1000
+ | 1000
|
-group.max.session.timeout.ms
+ | group.max.session.timeout.ms
|
-The maximum session timeout (in ms) for consumers. A longer timeout gives consumers more time to process messages between heartbeats but results in a longer time to detect failures.
+ | The maximum session timeout (in ms) for consumers. A longer timeout gives consumers more time to process messages between heartbeats but results in a longer time to detect failures.
|
-6000–1,800,000
+ | 6000–1,800,000
|
-1,800,000
+ | 1,800,000
|
-default.replication.factor
+ | default.replication.factor
|
-The default number of replicas configured for an automatically created topic.
+ | The default number of replicas configured for an automatically created topic.
|
-1–3
+ | 1–3
|
-3
+ | 3
|
-num.partitions
+ | allow.everyone.if.no.acl.found
|
-The default number of partitions configured for each automatically created topic.
+ | When this parameter is set to true, all users can access resources without ACL rules.
+This parameter is displayed only when is enabled for the instance or ciphertext access is used.
+This parameter cannot be modified for certain earlier instances.
|
-1–100
+ | true/false
|
-3
+ | true
|
-group.min.session.timeout.ms
+ | num.partitions
|
-The minimum session timeout (in ms) for consumers. A shorter timeout enables quicker failure detection but results in more frequent consumer heartbeating, which can overwhelm broker resources.
+ | The default number of partitions configured for each automatically created topic.
|
-6000–300,000
+ | 1 ~ 200
|
-6000
+ | 3
+ |
+
+group.min.session.timeout.ms
+ |
+The minimum session timeout (in ms) for consumers. A shorter timeout enables quicker failure detection but results in more frequent consumer heartbeating, which can overwhelm broker resources.
+ |
+6000–300,000
+ |
+6000
|
-Table 2 Dynamic parameters (v2.3.0/2.7 instances)Parameter
+Table 3 Dynamic parameters (2.3.0/2.7/3.x instances)Parameter
|
-Description
+ | Description
|
-Value Range
+ | Value Range
|
-Default Value
+ | Default Value
|
-min.insync.replicas
+ | min.insync.replicas
|
-If a producer sets the acks parameter to all (or -1), the min.insync.replicas parameter specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
+ | If a producer sets the acks parameter to all (or -1), the min.insync.replicas parameter specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
|
-1–3
+ | 1–3
|
-1
+ | 1
|
-message.max.bytes
+ | message.max.bytes
|
-Maximum length of a single message, in bytes.
+ | Maximum length of a single message, in bytes.
|
-0–10,485,760
+ | 0–10,485,760
|
-10,485,760
+ | 10,485,760
|
-unclean.leader.election.enable
+ | auto.create.groups.enable
|
-Indicates whether to allow replicas not in the ISR set to be elected as the leader as a last resort, even though doing so may result in data loss.
+ | Whether to automatically create consumer groups.
+This parameter is not displayed on the console for some earlier instances. The function of automatically creating consumer groups is enabled by default and cannot be disabled on the console.
|
-true or false
+ | true/false
|
-true
+ | true
+ |
+
+max.connections.per.ip
+ |
+The maximum number of connections allowed from each IP address. Request for new connections will be rejected once the limit is reached.
+ |
+100–20,000
+ |
+1000
+ |
+
+unclean.leader.election.enable
+ |
+Indicates whether to allow replicas not in the ISR set to be elected as the leader as a last resort, even though doing so may result in data loss.
+ |
+true or false
+ |
+false
+ |
+
+offsets.retention.minutes
+ |
+The longest period a consumption position can be retained starts from the time of submission. Positions retained beyond this duration will be deleted. Each time a consumption position is submitted to a topic partition, its retention period resets to 0. The unit is minute.
+This parameter is displayed as a static one for certain earlier instances.
+ |
+1440–30240
+ |
+20160
|
-Table 3 Static parameters (v2.3.0/2.7 instances)Parameter
+Table 4 Static parameters (2.3.0/2.7/3.x instances)Parameter
|
-Description
+ | Description
|
-Value Range
+ | Value Range
|
-Default Value
+ | Default Value
|
-connections.max.idle.ms
+ | connections.max.idle.ms
|
-Idle connection timeout (in ms). Connections that are idle for the duration specified by this parameter will be closed.
+ | Idle connection timeout (in ms). Connections that are idle for the duration specified by this parameter will be closed.
|
-5000–600,000
+ | 5000–600,000
|
-600,000
+ | 600,000
|
-log.retention.hours
+ | log.retention.hours
|
-Duration (in hours) for retaining a log file.
+ | Duration (in hours) for retaining a log file.
This parameter takes effect only for topics that have no aging time configured. If there is aging time configured for topics, it overrides this parameter.
|
-1–168
+ | 1–168
|
-72
+ | 72
|
-max.connections.per.ip
+ | group.max.session.timeout.ms
|
-The maximum number of connections allowed from each IP address. Request for new connections will be rejected once the limit is reached. The limit set using this parameter will be replaced if there are overrides configured using the max.connections.per.ip.overrides parameter.
+ | The maximum session timeout (in ms) for consumers. A longer timeout gives consumers more time to process messages between heartbeats but results in a longer time to detect failures.
|
-100–20,000
+ | 6000–1,800,000
|
-1000
+ | 1,800,000
|
-group.max.session.timeout.ms
+ | default.replication.factor
|
-The maximum session timeout (in ms) for consumers. A longer timeout gives consumers more time to process messages between heartbeats but results in a longer time to detect failures.
+ | The default number of replicas configured for an automatically created topic.
|
-6000–1,800,000
+ | 1–3
|
-1,800,000
+ | 3
|
-default.replication.factor
+ | allow.everyone.if.no.acl.found
|
-The default number of replicas configured for an automatically created topic.
+ | When this parameter is set to true, all users can access resources without ACL rules.
+This parameter is displayed only when is enabled for the instance or ciphertext access is used.
+This parameter of some earlier instances cannot be modified.
|
-1–3
+ | true/false
|
-3
+ | true
|
-num.partitions
+ | num.partitions
|
-The default number of partitions configured for each automatically created topic.
+ | The default number of partitions configured for each automatically created topic.
|
-1–100
+ | 1 ~ 200
|
-3
+ | 3
|
-group.min.session.timeout.ms
+ | group.min.session.timeout.ms
|
-The minimum session timeout (in ms) for consumers. A shorter timeout enables quicker failure detection but results in more frequent consumer heartbeating, which can overwhelm broker resources.
+ | The minimum session timeout (in ms) for consumers. A shorter timeout enables quicker failure detection but results in more frequent consumer heartbeating, which can overwhelm broker resources.
|
-6000–300,000
+ | 6000–300,000
|
-6000
+ | 6000
|
@@ -242,4 +319,9 @@
+
diff --git a/docs/dms/umn/kafka-ug-0009.html b/docs/dms/umn/kafka-ug-0009.html
deleted file mode 100644
index f10e2323..00000000
--- a/docs/dms/umn/kafka-ug-0009.html
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
- Managing Users
-
-
-
-
-
diff --git a/docs/dms/umn/kafka-ug-0011.html b/docs/dms/umn/kafka-ug-0011.html
index 057742bb..f13db34a 100644
--- a/docs/dms/umn/kafka-ug-0011.html
+++ b/docs/dms/umn/kafka-ug-0011.html
@@ -1,20 +1,21 @@
-
- Managing Consumer Groups
-
-
+ Managing Consumer Groups
+
-
diff --git a/docs/dms/umn/kafka-ug-0012.html b/docs/dms/umn/kafka-ug-0012.html
index 2914fa65..a43395e4 100644
--- a/docs/dms/umn/kafka-ug-0012.html
+++ b/docs/dms/umn/kafka-ug-0012.html
@@ -1,30 +1,32 @@
- Deleting a Consumer Group
- You can delete a consumer group using either of the following methods:
+ Deleting a Kafka Consumer Group
+ You can delete a consumer group in either of the following ways:
- Method 1: Delete a consumer group on the console.
- Method 2: Use Kafka CLI to delete a consumer group. (Ensure that the Kafka instance version is the same as the CLI version.)
PrerequisitesThe status of the consumer group to be deleted is EMPTY.
- Method 1: Deleting a Consumer Group on the Console- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ Constraints- If auto.create.groups.enable is set to true, the consumer group status is EMPTY, and no offset has been submitted, the system automatically deletes the consumer group 10 minutes later.
- If auto.create.groups.enable is set to false, the system does not automatically delete consumer groups. You can manually delete them.
- If a consumer group has never committed an offset, the group will be deleted after the Kafka instance restarts.
+
+ Method 1: Deleting a Consumer Group on the Console- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
- Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose the Consumer Groups tab.
- Delete consumer groups using either of the following methods:
- Select one or more consumer groups and click Delete Consumer Group above the consumer group list.
- In the row containing the consumer group you want to delete, click Delete.
A consumer group can be deleted only when its status is EMPTY.
Consumer group statuses include:
- DEAD: The consumer group has no member or metadata.
- EMPTY: The consumer group has metadata but has no member.
- PREPARING_REBALANCE: The consumer group is to be rebalanced.
- COMPLETING_REBALANCE: All members have joined the consumer group.
- STABLE: Members in the consumer group can consume messages normally.
- - In the displayed Delete Consumer Group dialog box, click Yes.
+ - In the displayed Delete Consumer Group dialog box, click OK.
Method 2: Using the CLI to Delete a Consumer GroupThe following uses Linux as an example.
- - Download Kafka CLI v1.1.0, v2.7.2, or v2.3.0. Ensure that the Kafka instance and the CLI are of the same version.
- Use the CLI to connect to the Kafka instance. For details, see Accessing a Kafka Instance Without SASL or Accessing a Kafka Instance with SASL.
- In the /{directory where the CLI is located}/kafka_{version}/bin/ directory, run the following command to delete a consumer group:
kafka-consumer-groups.sh --bootstrap-server {Kafka instance connection address} --delete --group {consumer group name}
+- Download Kafka CLI v1.1.0, v2.3.0, or v2.7.2, or v3.4.0. Ensure that the Kafka instance and the CLI are of the same version.
- Use the CLI to connect to the Kafka instance. For details, see Connecting to Kafka Using the Client (Plaintext Access) or Connecting to Kafka Using the Client (Ciphertext Access).
- In the /bin directory of the Kafka client, run the following command:
./kafka-consumer-groups.sh --bootstrap-server ${connection-address} --delete --group ${consumer-group-name}
+Example:
[root@zk-server-1 bin]# ./kafka-consumer-groups.sh --bootstrap-server 192.168.1.245:9091,192.168.1.86:9091,192.168.1.128:9091 --delete --group bbbb
Note: This will not show information about old Zookeeper-based consumers.
Deletion of requested consumer groups ('bbbb') was successful.
- If SASL authentication is enabled for the Kafka instance, the --command-config {consumer.properties file with SASL authentication} parameter must be added to the preceding commands. For details about the consumer.properties file, see Accessing a Kafka Instance with SASL.
+ If ciphertext access is enabled for the Kafka instance, the --command-config {consumer.properties file with SASL authentication} parameter must be added to the preceding commands. For details about the consumer.properties file, see Connecting to Kafka Using the Client (Ciphertext Access).
-
diff --git a/docs/dms/umn/kafka-ug-0014.html b/docs/dms/umn/kafka-ug-0014.html
index 916b6b9c..659d939e 100644
--- a/docs/dms/umn/kafka-ug-0014.html
+++ b/docs/dms/umn/kafka-ug-0014.html
@@ -1,37 +1,50 @@
- Resetting the Consumer Offset
- Resetting the consumer offset is to change the retrieval position of a consumer.
+ Viewing and Resetting Kafka Consumption Offsets
+ This section describes how to view and reset consumption offsets. Resetting consumption offsets is to change the consumption position for consumers.
Messages may be retrieved more than once after the offset is reset. Exercise caution when performing this operation.
PrerequisitesThe consumer offset cannot be reset on the fly. You must first stop retrieval of the desired consumer group.
-
- Procedure- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ After a client is stopped, the server considers the client offline only after the time period specified in ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG (1000 ms by default).
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose the Consumer Groups tab.
- Click the name of the desired consumer group.
- On the Consumer Offset tab page, you can perform the following operations:
- To reset the consumer offset of all partitions of a topic, click Reset Consumer Offset in the row containing the desired topic.
- To reset the consumer offset of a single partition of a topic, click Reset Consumer Offset in the row containing the desired partition.
+
+ Viewing Consumer Offsets (Console)- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose Consumer Groups.
- Click the name of the desired consumer group.
- On the Consumer Offset tab page, view the list of topics that the consumer group has subscribed to, total number of messages accumulated in the topic, message consumption progress in each partition of the topic (accumulated messages, offset, latest offset, consumer ID, consumer address, and client ID).
- (Optional) To query the consumer offsets of a specific topic, enter the topic name in the search box and press Enter.
+
+ Viewing Consumer Offsets (Kafka CLI)
+
+ Resetting Consumer Offsets- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose the Consumer Groups tab.
- Click the name of the desired consumer group.
- On the Consumer Offset tab page, you can perform the following operations:
- To reset the consumer offset of all partitions of a single topic, click Reset Consumer Offset in the row containing the desired topic.
- To reset the consumer offset of a single partition of a single topic, click Reset Consumer Offset in the row containing the desired partition.
- To reset the consumer offset of all partitions in all topics, click One-touch Reset Consumer Offset above the list.
- In the displayed Reset Consumer Offset dialog box, set the parameters by referring to Table 1.
- Table 1 Parameters for resetting the consumer offsetParameter
+Table 1 Parameters for resetting the consumer offsetParameter
|
-Description
+ | Description
|
-Reset By
+ | Reset By
|
-You can reset an offset by:
+ | You can reset an offset by:
- Time: Reset the offset to the specified time.
- Offset: Reset the offset to the specified position.
+If you reset offsets in batches, they can only be reset to the specified time.
|
-Time
+ | Time
|
-Set this parameter if Reset By is set to Time.
+ | Set this parameter if Reset By is set to Time.
Select a time point. After the reset is complete, retrieval starts from this time point.
-- Earliest: earliest offset
- Custom Time Range: a custom time point
- Latest: latest offset
+- Earliest: earliest offset
- Custom: a custom time point
- Latest: latest offset
|
-Offset
+ | Offset
|
-Set this parameter if Reset By is set to Offset.
+ | Set this parameter if Reset By is set to Offset.
Enter an offset, which is greater than or equal to 0. After the reset is complete, retrieval starts from this offset.
|
diff --git a/docs/dms/umn/kafka-ug-0015.html b/docs/dms/umn/kafka-ug-0015.html
index dd56601a..9d4bcef7 100644
--- a/docs/dms/umn/kafka-ug-0015.html
+++ b/docs/dms/umn/kafka-ug-0015.html
@@ -1,15 +1,24 @@
-Viewing Consumer Connection Addresses
-View consumer connection addresses on the DMS console.
- The connection address of a consumer can be viewed only when the consumer is connected to a Kafka instance.
+ Viewing Kafka Consumer Details
+ This section describes how to view the consumer list and consumer connection addresses.
+ PrerequisitesThe consumer list and connection address can be viewed only when consumers in a consumer group are connected to the Kafka instance (that is, the consumer group is in the STABLE state).
+
+ Viewing the Consumer List (Console)- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
-Viewing Consumer Addresses on Console- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose Consumer Groups.
- Click the name of the desired consumer group.
- On the Consumers tab page, view the consumer list.
In the consumer list, you can view the consumer ID, consumer address, and client ID.
+ - (Optional) To query a specific consumer, enter the consumer ID in the search box and press Enter.
+
+ Viewing the Consumer List (Kafka CLI)
+
+ Viewing Consumer Connection Addresses (Console)- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
- Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose Consumer Groups.
- Click the desired consumer group.
- On the Consumers tab page, view the consumer addresses.
-
-
diff --git a/docs/dms/umn/kafka-ug-0038.html b/docs/dms/umn/kafka-ug-0038.html
new file mode 100644
index 00000000..eecbf6cf
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-0038.html
@@ -0,0 +1,63 @@
+
+
+ Modifying Kafka Topic Configurations
+ This section describes how to modify following configurations of a Kafka topic on the console.
+
+ Table 1 Kafka topic configuration parametersParameter
+ |
+Description
+ |
+
+
+Partitions
+ |
+Number of partitions in a topic. For details about how to change, see Changing Kafka Partition Quantity.
+ |
+
+Aging Time (h)
+ |
+Maximum message retention. For details about how to change, see Changing Kafka Message Retention Period.
+ |
+
+Synchronous Replication
+ |
+A message is returned to the client only after the message creation request has been received and the message has been acknowledged by all replicas.
+ |
+
+Synchronous Flushing
+ |
+- Enabled: A message is immediately flushed to disk once it is created, bringing higher reliability.
- Disabled: A message is stored in the memory instead of being immediately flushed to disk once created.
+ |
+
+Message Timestamp
+ |
+Timestamp type of a message. Options: - CreateTime: time when the producer created the message.
- LogAppendTime: time when the broker appended the message to the log.
+
+ |
+
+Max. Message Size
+ |
+Maximum size of messages to be processed in batches. If message compression is enabled, this parameter indicates the size after compression.
+If this value is increased and the consumer version is earlier than 0.10.2, the consumers' fetch size must also be increased so that they can obtain the latest value.
+ |
+
+
+
+
+ Modifying Synchronous Replication, Synchronous Flushing, Message Timestamp, or Max. Message Size does not require instance restart.
+
+ Modifying Topic Configurations- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose Topics.
- Modify topic configurations in either of the following ways:
- Select one or more topics and click Edit Topic above the topic list.
- In the row containing the desired topic, click Edit.
+ - In the Edit Topic dialog box, change configurations and click OK.
- If there is only one replica, Synchronous Replication cannot be enabled.
- After enabling synchronous replication, set acks to all or –1 on the client. Otherwise, this function will not take effect.
+
+
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-0041.html b/docs/dms/umn/kafka-ug-0041.html
new file mode 100644
index 00000000..2b7312a6
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-0041.html
@@ -0,0 +1,36 @@
+
+
+ Creating a Kafka Consumer Group
+ Create a consumer group on the console.
+ auto.create.groups.enable: a consumer group is automatically created when a consumer attempts to enter a group that does not exist.
+ - This operation is optional when auto.create.groups.enable is true in Configuring Parameters.
- A consumer group is required before consuming messages when auto.create.groups.enable is false in Configuring Parameters. Otherwise, consumption will fail.
+ - If auto.create.groups.enable is set to true, the consumer group status is EMPTY, and no offset has been submitted, the system automatically deletes the consumer group 10 minutes later.
- If auto.create.groups.enable is set to false, the system does not automatically delete consumer groups. You can manually delete them.
- If a consumer group has never committed an offset, the group will be deleted after the Kafka instance restarts.
- Creating a consumer group on the console does not require instance restart.
+
+ Creating a Kafka Consumer Group- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose Consumer Groups.
- Click Create Consumer Group.
- Set consumer group parameters by referring to Table 1 and click OK.
+ Table 1 Consumer group parametersParameter
+ |
+Description
+ |
+
+
+Consumer Group Name
+ |
+Enter 3 to 64 characters, starting with a letter or underscore (_). Use only letters, digits, periods (.), hyphens (-), and underscores (_).
+If a consumer group name starts with a special character, for example, an underscore (_) or a number sign (#), monitoring data cannot be displayed.
+ |
+
+
+
+
+View the new consumer group in the consumer group list.
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-0053.html b/docs/dms/umn/kafka-ug-0053.html
new file mode 100644
index 00000000..4a5b1b8f
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-0053.html
@@ -0,0 +1,17 @@
+
+
+ Exporting the Kafka Instance List
+ ScenarioThis section describes how to export the Kafka instance list from the console.
+
+ Exporting the Kafka Instance List- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Export the instance list using either of the following methods:
- Select the desired instances and choose Export > Export selected data to an XLSX file to export specified instances.
- Choose Export > Export all data to an XLSX file to export all instances.
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-0056.html b/docs/dms/umn/kafka-ug-0056.html
new file mode 100644
index 00000000..f9ba335c
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-0056.html
@@ -0,0 +1,17 @@
+
+
+ Exporting Kafka Consumer Groups
+ ScenarioExport the consumer group list from the console.
+
+ Exporting Kafka Consumer Groups- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view its details.
- In the navigation pane, choose Consumer Groups.
- Export consumer groups in either of the following ways:
- Select the desired consumer groups and choose Export > Export selected data to an XLSX file to export specified consumer groups.
- Choose Export > Export all data to an XLSX file to export all consumer groups.
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-0058.html b/docs/dms/umn/kafka-ug-0058.html
new file mode 100644
index 00000000..34616522
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-0058.html
@@ -0,0 +1,31 @@
+
+
+
+ Managing Topics
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-0059.html b/docs/dms/umn/kafka-ug-0059.html
new file mode 100644
index 00000000..8b79b9fd
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-0059.html
@@ -0,0 +1,25 @@
+
+
+
+ Configuring Kafka Network Connections
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-0061.html b/docs/dms/umn/kafka-ug-0061.html
new file mode 100644
index 00000000..2624f327
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-0061.html
@@ -0,0 +1,21 @@
+
+
+
+ Configuring Kafka Access Control
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-0062.html b/docs/dms/umn/kafka-ug-0062.html
new file mode 100644
index 00000000..ae1c00c8
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-0062.html
@@ -0,0 +1,21 @@
+
+
+
+ Configuring the Kafka Client
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-0069.html b/docs/dms/umn/kafka-ug-0069.html
new file mode 100644
index 00000000..560f0e56
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-0069.html
@@ -0,0 +1,14 @@
+
+
+ Process of Using Kafka
+ The following figure shows the process of using a Kafka instance to produce and consume messages.
+ Figure 1 Process of using Kafka
+ - Creating a User and Granting DMS for Kafka Permissions
Create IAM users and grant them only the DMS for Kafka permissions required to perform a given task based on their job responsibilities.
+ - Creating a Kafka Instance
Kafka instances are tenant-exclusive, and physically isolated in deployment.
+ - Creating a Kafka Topic
Create a topic for storing messages so that producers can produce messages and consumers can subscribe to messages.
+ - Connecting to an Instance
The client uses commands to connect to Kafka instances in a private or public network, and produces and consumes messages.
+ - Managing Messages
View messages on the console.
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-0319001.html b/docs/dms/umn/kafka-ug-0319001.html
index e67676ff..616f1639 100644
--- a/docs/dms/umn/kafka-ug-0319001.html
+++ b/docs/dms/umn/kafka-ug-0319001.html
@@ -1,64 +1,104 @@
- Configuring Public Access
+ Configuring Kafka Public Access
To access a Kafka instance over a public network, enable public access and configure EIPs for the instance.
If you no longer need public access to the instance, you can disable it as required.
- Procedure- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ Prerequisites- You can change the public access setting only when the Kafka instance is in the Running state.
- Kafka instances only support IPv4 EIPs. IPv6 EIPs are not supported.
+
+ Enabling Public Access- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click a Kafka instance to go to the Basic Information tab page.
- Configure public access.
- You can change the public access setting only when the Kafka instance is in the Running state.
- Only IPv4 EIPs can be bound to Kafka instances.
-
-Enabling public access
-Click next to Public Access to enable public access. For Elastic IP Address, select an EIP for each broker and then click .
-You can view the operation progress on the Background Tasks page. If the task status is Successful, the modification has succeeded.
-Figure 1 Configuring public access
-After public access is enabled, configure security group rules listed in Table 1 before attempting to access Kafka. For details about accessing Kafka, see Accessing a Kafka Instance.
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click a Kafka instance to go to the Basic Information page.
- Click
next to Public Access to enable public access. For Elastic IP Address, select an EIP for each broker. If the number of EIPs is insufficient, click Create Elastic IP to go to the Create EIP page and create EIPs. For details, see Assigning an EIP. After the creation is complete, return to the page for enabling the public access. Click next to Elastic IP Address, select EIPs from the drop-down list. The number of EIPs must be the same as the number of brokers. Then click and the Background Tasks page is displayed.If the status of the task turns to Successful, public access is successfully enabled. Figure 1 Enabling public access
+
+After public access is enabled, configure the access mode (plaintext or ciphertext) and security group rules listed in Table 1 before attempting to access Kafka. For details about accessing Kafka, see Connecting to an Instance.
-Table 1 Security group rulesDirection
+Table 1 Security group rulesDirection
|
-Protocol
+ | Protocol
|
-Port
+ | Port
|
-Source
+ | Source
|
-Description
+ | Description
|
-Inbound
+ | Inbound
|
-TCP
+ | TCP
|
-9094
+ | 9094
|
-0.0.0.0/0
+ | 0.0.0.0/0
|
-Access Kafka through the public network (without SSL encryption).
+ | Accessing Kafka over a public network (in plaintext)
|
-Inbound
+ | Inbound
|
-TCP
+ | TCP
|
-9095
+ | 9095
|
-0.0.0.0/0
+ | 0.0.0.0/0
|
-Access Kafka through the public network (with SSL encryption).
+ | Accessing Kafka over a public network (in ciphertext)
|
-Disabling public access
-Click next to Public Access.
-You can view the operation progress on the Background Tasks page. If the task status is Successful, the modification has succeeded.
+
+
+Disabling Public Access- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click a Kafka instance to go to the Basic Information page.
- Before disabling public access, disable Plaintext Access and Ciphertext Access next to Public Network Access. Then click
next to Public Access. - Click OK. The Background Tasks page is displayed. If the status of the task turns to Successful, public access is successfully disabled.
After public access is disabled, configure security group rules listed in Table 2 before attempting to access Kafka in a VPC. For details about accessing Kafka, see Connecting to an Instance.
+ Table 2 Security group rules (private network access)Direction
+ |
+Protocol
+ |
+Port
+ |
+Source
+ |
+Description
+ |
+
+
+Inbound
+ |
+TCP
+ |
+9092
+ |
+0.0.0.0/0
+ |
+Accessing a Kafka instance over a private network within a VPC (in plaintext)
+ |
+
+Inbound
+ |
+TCP
+ |
+9093
+ |
+0.0.0.0/0
+ |
+Accessing a Kafka instance over a private network within a VPC (in ciphertext)
+ |
+
+
+
+
+ After a security group is created, its default inbound rule allows communication among ECSs within the security group and its default outbound rule allows all outbound traffic. In this case, you can access a Kafka instance within a VPC, and do not need to add rules according to Table 2.
+
+
diff --git a/docs/dms/umn/kafka-ug-0720001.html b/docs/dms/umn/kafka-ug-0720001.html
index 94f4a896..2d1e10a5 100644
--- a/docs/dms/umn/kafka-ug-0720001.html
+++ b/docs/dms/umn/kafka-ug-0720001.html
@@ -1,26 +1,14 @@
-
- Managing Topics
-
-
-
+Configuring Topics
+
diff --git a/docs/dms/umn/kafka-ug-0720002.html b/docs/dms/umn/kafka-ug-0720002.html
index 7198c52e..5d9085d5 100644
--- a/docs/dms/umn/kafka-ug-0720002.html
+++ b/docs/dms/umn/kafka-ug-0720002.html
@@ -1,14 +1,15 @@
-
- Managing Messages
-
-
+ Managing Messages
+
-
diff --git a/docs/dms/umn/kafka-ug-0723001.html b/docs/dms/umn/kafka-ug-0723001.html
index 67d5e797..5976de14 100644
--- a/docs/dms/umn/kafka-ug-0723001.html
+++ b/docs/dms/umn/kafka-ug-0723001.html
@@ -13,6 +13,8 @@
- Specifications
+ - Comparing Single-node and Cluster Kafka Instances
+
- Comparing DMS for Kafka and Open-Source Kafka
- Notes and Constraints
@@ -23,7 +25,7 @@
- Best Practices
- - Permissions
+ - Permission
diff --git a/docs/dms/umn/kafka-ug-0723004.html b/docs/dms/umn/kafka-ug-0723004.html
index 3863de83..739a67bc 100644
--- a/docs/dms/umn/kafka-ug-0723004.html
+++ b/docs/dms/umn/kafka-ug-0723004.html
@@ -10,6 +10,8 @@
- Instances
+- Specification Modification
+
- Connections
- Topics and Partitions
diff --git a/docs/dms/umn/kafka-ug-0723006.html b/docs/dms/umn/kafka-ug-0723006.html
new file mode 100644
index 00000000..ddfaeafc
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-0723006.html
@@ -0,0 +1,23 @@
+
+
+
+ Troubleshooting
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-180413001.html b/docs/dms/umn/kafka-ug-180413001.html
index 62b30b1e..bc4eee52 100644
--- a/docs/dms/umn/kafka-ug-180413001.html
+++ b/docs/dms/umn/kafka-ug-180413001.html
@@ -1,12 +1,14 @@
-Monitoring
+Monitoring and Alarms
diff --git a/docs/dms/umn/kafka-ug-180413002.html b/docs/dms/umn/kafka-ug-180413002.html
new file mode 100644
index 00000000..0e2248b4
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-180413002.html
@@ -0,0 +1,684 @@
+
+
+Kafka Metrics
+IntroductionThis section describes metrics reported by DMS to Cloud Eye as well as their namespaces and dimensions. You can use the Cloud Eye console or APIs to query the Kafka metrics and alarms, or view Kafka instance metrics on the Monitoring page of the DMS console.
+ For example, you can call the API to query the monitoring data of the Disk Capacity Usage metric.
+
+
+ Instance Metrics
+ Table 1 Instance metricsMetric ID
+ |
+Metric Name
+ |
+Description
+ |
+Value Range
+ |
+Monitored Object
+ |
+Monitoring Period (Raw Data)
+ |
+
+
+current_partitions
+ |
+Partitions
+ |
+Number of used partitions in the instance
+Unit: count
+ |
+0~100000
+ |
+Kafka instance
+ |
+1 minute
+ |
+
+current_topics
+ |
+Topics
+ |
+Number of created topics in the instance
+Unit: count
+ |
+0~100000
+ |
+Kafka instance
+ |
+1 minute
+ |
+
+group_msgs
+ |
+Accumulated Messages
+ |
+Total number of accumulated messages in all consumer groups of the instance
+Unit: count
+ |
+0–1,000,000,000
+ |
+Kafka instance
+ |
+1 minute
+ |
+
+
+
+
+
+ Broker Metrics
+ Table 2 Broker metricsMetric ID
+ |
+Metric Name
+ |
+Description
+ |
+Value Range
+ |
+Monitored Object
+ |
+Monitoring Period (Raw Data)
+ |
+
+
+broker_data_size
+ |
+Message Size
+ |
+Total size of messages in the broker
+Unit: byte, KB, MB, GB, TB or PB
+ |
+0–5,000,000,000,000
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_messages_in_rate
+ |
+Message Creation Rate
+ |
+Number of messages created per second
+Unit: count/s
+ |
+0–500,000
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_bytes_out_rate
+ |
+Message Retrieval
+ |
+Number of bytes retrieved per second
+Unit: byte/s, KB/s, MB/s, or GB/s
+ |
+0–500,000,000
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_bytes_in_rate
+ |
+Message Creation
+ |
+Number of bytes created per second
+Unit: byte/s, KB/s, MB/s, or GB/s
+ |
+0–500,000,000
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_public_bytes_in_rate
+ |
+Public Inbound Traffic
+ |
+Inbound traffic over public networks per second
+Unit: byte/s, KB/s, MB/s, or GB/s
+ NOTE: You can view this metric on the EIP console if public access has been enabled and EIPs have been assigned to the instance.
+
+ |
+0–500,000,000
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_public_bytes_out_rate
+ |
+Public Outbound Traffic
+ |
+Outbound traffic over public networks per second
+Unit: byte/s, KB/s, MB/s, or GB/s
+ NOTE: You can view this metric on the EIP console if public access has been enabled and EIPs have been assigned to the instance.
+
+ |
+0–500,000,000
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_fetch_mean
+ |
+Average Message Retrieval Processing Duration
+ |
+Average time that the broker spends processing message retrieval requests
+Unit: ms
+ |
+0–10,000
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_produce_mean
+ |
+Average Message Creation Processing Duration
+ |
+Average time that the broker spends processing message creation requests
+Unit: ms
+ |
+0–10,000
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_cpu_core_load
+ |
+Average Load per CPU Core
+ |
+Average load of each CPU core of the Kafka VM
+Unit: %
+ |
+0–20
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_disk_usage
+ |
+Disk Capacity Usage
+ |
+Disk usage of the Kafka VM
+Unit: %
+ |
+0–100
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_memory_usage
+ |
+Memory Usage
+ |
+Memory usage of the Kafka VM
+Unit: %
+ |
+0–100
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_heap_usage
+ |
+JVM Heap Memory Usage of Kafka
+ |
+Heap memory usage of the Kafka JVM
+Unit: %
+ |
+0–100
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_alive
+ |
+Broker Alive
+ |
+Whether the Kafka broker is alive
+ |
+
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_connections
+ |
+Connections
+ |
+Total number of TCP connections on the Kafka broker
+Unit: count
+ |
+0–65,535
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_cpu_usage
+ |
+CPU Usage
+ |
+CPU usage of the Kafka VM
+Unit: %
+ |
+0–100
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_disk_read_await
+ |
+Average Disk Read Time
+ |
+Average time for each disk I/O read in the monitoring period
+Unit: ms
+ |
+> 0
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_disk_write_await
+ |
+Average Disk Write Time
+ |
+Average time for each disk I/O write in the monitoring period
+Unit: ms
+ |
+> 0
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_total_bytes_in_rate
+ |
+Inbound Traffic
+ |
+Inbound traffic per second
+Unit: byte/s
+ |
+0–1,000,000,000
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_total_bytes_out_rate
+ |
+Outbound Traffic
+ |
+Outbound traffic per second
+Unit: byte/s
+ |
+0–1,000,000,000
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_disk_read_rate
+ |
+Disk Read Speed
+ |
+Read traffic on the disk
+Unit: byte/s, KB/s, MB/s, or GB/s
+ |
+≥ 0
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+broker_disk_write_rate
+ |
+Disk Write Speed
+ |
+Write traffic on the disk
+Unit: byte/s, KB/s, MB/s, or GB/s
+ |
+≥ 0
+ |
+Kafka instance broker
+ |
+1 minute
+ |
+
+
+
+
+
+ Topic Metrics
+ Table 3 Topic metricsMetric ID
+ |
+Metric Name
+ |
+Description
+ |
+Value Range
+ |
+Monitored Object
+ |
+Monitoring Period (Raw Data)
+ |
+
+
+topic_bytes_in_rate
+ |
+Message Creation
+ |
+Number of bytes created per second
+Unit: byte/s, KB/s, MB/s, or GB/s
+ NOTE: - On the Cloud Eye console, this metric is available only when Scope is set to Basic monitoring on the Queues tab page.
- On the Monitoring page of the DMS console, this metric is available only when Monitoring Type is set to Basic monitoring on the By Topic tab page.
+
+ |
+0–500,000,000
+ |
+Topic in a Kafka instance
+ |
+1 minute
+ |
+
+topic_bytes_out_rate
+ |
+Message Retrieval
+ |
+Number of bytes retrieved per second
+Unit: byte/s, KB/s, MB/s, or GB/s
+ NOTE: - On the Cloud Eye console, this metric is available only when Scope is set to Basic monitoring on the Queues tab page.
- On the Monitoring page of the DMS console, this metric is available only when Monitoring Type is set to Basic monitoring on the By Topic tab page.
+
+ |
+0–500,000,000
+ |
+Topic in a Kafka instance
+ |
+1 minute
+ |
+
+topic_data_size
+ |
+Message Size
+ |
+Total size of messages in the queue
+Unit: byte, KB, MB, GB, TB or PB
+ NOTE: - On the Cloud Eye console, this metric is available only when Scope is set to Basic monitoring on the Queues tab page.
- On the Monitoring page of the DMS console, this metric is available only when Monitoring Type is set to Basic monitoring on the By Topic tab page.
+
+ |
+0–5,000,000,000,000
+ |
+Topic in a Kafka instance
+ |
+1 minute
+ |
+
+topic_messages
+ |
+Total Messages
+ |
+Total number of messages in the queue
+Unit: count
+ NOTE: - On the Cloud Eye console, this metric is available only when Scope is set to Basic monitoring on the Queues tab page.
- On the Monitoring page of the DMS console, this metric is available only when Monitoring Type is set to Basic monitoring on the By Topic tab page.
+
+ |
+≥ 0
+ |
+Topic in a Kafka instance
+ |
+1 minute
+ |
+
+topic_messages_in_rate
+ |
+Message Creation Rate
+ |
+Number of messages created per second
+Unit: count/s
+ NOTE: - On the Cloud Eye console, this metric is available only when Scope is set to Basic monitoring on the Queues tab page.
- On the Monitoring page of the DMS console, this metric is available only when Monitoring Type is set to Basic monitoring on the By Topic tab page.
+
+ |
+0–500,000
+ |
+Topic in a Kafka instance
+ |
+1 minute
+ |
+
+partition_messages
+ |
+Partition Messages
+ |
+Total number of messages in the partition
+Unit: count
+ NOTE: - On the Cloud Eye console, this metric is available only when Scope is set to Partition monitoring on the Queues tab page.
- On the Monitoring page of the DMS console, this metric is available only when Monitoring Type is set to Partition monitoring on the By Topic tab page.
+
+ |
+≥ 0
+ |
+Topic in a Kafka instance
+ |
+1 minute
+ |
+
+produced_messages
+ |
+Created Messages
+ |
+Number of messages that have been created
+Unit: count
+ NOTE: - On the Cloud Eye console, this metric is available only when Scope is set to Partition monitoring on the Queues tab page.
- On the Monitoring page of the DMS console, this metric is available only when Monitoring Type is set to Partition monitoring on the By Topic tab page.
+
+ |
+≥ 0
+ |
+Topic in a Kafka instance
+ |
+1 minute
+ |
+
+
+
+
+
+ Consumer Group Metrics
+ Table 4 Consumer group metricsMetric ID
+ |
+Metric Name
+ |
+Description
+ |
+Value Range
+ |
+Monitored Object
+ |
+Monitoring Period (Raw Data)
+ |
+
+
+messages_consumed
+ |
+Retrieved Messages
+ |
+Number of messages that have been retrieved in the consumer group
+Unit: count
+ NOTE: - On the Cloud Eye console, this metric is available only when Queue is set to a specific topic name and Scope is set to Partition monitoring on the Consumer Groups tab page.
- On the Monitoring page of the DMS console, this metric is available only when Topic is set to a specific topic name and Monitoring Type is set to Partition monitoring on the By Consumer Group tab page.
+
+ |
+≥ 0
+ |
+Consumer group of a Kafka instance
+ |
+1 minute
+ |
+
+messages_remained
+ |
+Available Messages
+ |
+Number of messages that can be retrieved in the consumer group
+Unit: count
+ NOTE: - On the Cloud Eye console, this metric is available only when Queue is set to a specific topic name and Scope is set to Partition monitoring on the Consumer Groups tab page.
- On the Monitoring page of the DMS console, this metric is available only when Topic is set to a specific topic name and Monitoring Type is set to Partition monitoring on the By Consumer Group tab page.
+
+ |
+≥ 0
+ |
+Consumer group of a Kafka instance
+ |
+1 minute
+ |
+
+topic_messages_remained
+ |
+Topic Available Messages
+ |
+Number of remaining messages that can be retrieved from the specified topic in the consumer group
+Unit: Count
+ NOTE: - On the Cloud Eye console, this metric is available only when Queue is set to a specific topic name and Scope is set to Basic monitoring on the Consumer Groups tab page.
- On the Monitoring page of the DMS console, this metric is available only when Topic is set to a specific topic name and Monitoring Type is set to Basic monitoring on the By Consumer Group tab page.
+
+ |
+0 to 263–1
+ |
+Consumer group of a Kafka instance
+ |
+1 minute
+ |
+
+topic_messages_consumed
+ |
+Topic Retrieved Messages
+ |
+Number of messages that have been retrieved from the specified topic in the consumer group
+Unit: Count
+ NOTE: - On the Cloud Eye console, this metric is available only when Queue is set to a specific topic name and Scope is set to Basic monitoring on the Consumer Groups tab page.
- On the Monitoring page of the DMS console, this metric is available only when Topic is set to a specific topic name and Monitoring Type is set to Basic monitoring on the By Consumer Group tab page.
+
+ |
+0 to 263–1
+ |
+Consumer group of a Kafka instance
+ |
+1 minute
+ |
+
+consumer_messages_remained
+ |
+Consumer Available Messages
+ |
+Number of remaining messages that can be retrieved in the consumer group
+Unit: Count
+ NOTE: - On the Cloud Eye console, this metric is available only when Queues is set to All queues on the Consumer Groups tab page.
- On the Monitoring page of the DMS console, this metric is available only when Topic is set to All topics on the By Consumer Group tab page.
+
+ |
+0 to 263–1
+ |
+Consumer group of a Kafka instance
+ |
+1 minute
+ |
+
+consumer_messages_consumed
+ |
+Consumer Retrieved Messages
+ |
+Number of messages that have been retrieved in the consumer group
+Unit: Count
+ NOTE: - On the Cloud Eye console, this metric is available only when Queues is set to All queues on the Consumer Groups tab page.
- On the Monitoring page of the DMS console, this metric is available only when Topic is set to All topics on the By Consumer Group tab page.
+
+ |
+0 to 263–1
+ |
+Consumer group of a Kafka instance
+ |
+1 minute
+ |
+
+
+
+
+
+ Dimension
+ Key
+ |
+Value
+ |
+
+
+kafka_instance_id
+ |
+Kafka instance
+ |
+
+kafka_broker
+ |
+Kafka instance broker
+ |
+
+kafka_topics
+ |
+Kafka instance topic
+ |
+
+kafka_partitions
+ |
+Partition in a Kafka instance
+ |
+
+kafka_groups-partitions
+ |
+Partition consumer group in a Kafka instance
+ |
+
+kafka_groups_topics
+ |
+Topic consumer group in a Kafka instance
+ |
+
+kafka_groups
+ |
+Consumer group of a Kafka instance
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-180418001.html b/docs/dms/umn/kafka-ug-180418001.html
deleted file mode 100644
index a8890231..00000000
--- a/docs/dms/umn/kafka-ug-180418001.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-Auditing
-
-
-
diff --git a/docs/dms/umn/kafka-ug-180418002.html b/docs/dms/umn/kafka-ug-180418002.html
index 606839da..bd88e26b 100644
--- a/docs/dms/umn/kafka-ug-180418002.html
+++ b/docs/dms/umn/kafka-ug-180418002.html
@@ -1,282 +1,395 @@
-Operations Logged by CTS
-With Cloud Trace Service (CTS), you can record operations associated with DMS for later query, audit, and backtrack operations.
-
- Table 1 DMS operations that can be recorded by CTSOperation
+Viewing Kafka Audit Logs
+ScenarioWith Cloud Trace Service (CTS), you can record operations associated with DMS for later query, audit, and backtrack operations.
+
+ PrerequisiteCTS has been enabled.
+
+ DMS Operations Supported by CTS
+ Table 1 DMS operations that can be recorded by CTSOperation
|
-Resource Type
+ | Resource Type
|
-Trace Name
+ | Trace Name
|
-Successfully deleting a background task
+ | Successfully creating an instance
|
-kafka
+ | kafka
|
-deleteDMSBackendJobSuccess
+ | createDMSInstanceTaskSuccess
|
-Failing to delete a background task
+ | Failing to create an instance
|
-kafka
+ | kafka
|
-deleteDMSBackendJobFailure
+ | createDMSInstanceTaskFailure
|
-Successfully creating an order for creating an instance
+ | Successfully deleting an instance that failed to be created
|
-kafka
+ | kafka
|
-createDMSInstanceOrderSuccess
+ | deleteDMSCreateFailureInstancesSuccess
|
-Failing to create an order for creating an instance
+ | Failing to delete an instance that failed to be created
|
-kafka
+ | kafka
|
-createDMSInstanceOrderFailure
+ | deleteDMSCreateFailureInstancesFailure
|
-Successfully submitting a request to modify an instance order
+ | Successfully deleting an instance
|
-kafka
+ | kafka
|
-modifyDMSInstanceOrderSuccess
+ | deleteDMSInstanceTaskSuccess
|
-Failing to submit a request to modify an instance order
+ | Failing to delete an instance
|
-kafka
+ | kafka
|
-modifyDMSInstanceOrderFailure
+ | deleteDMSInstanceTaskFailure
|
-Successfully submitting a request to scale up an instance
+ | Deleting multiple instance tasks at a time
|
-kafka
+ | kafka
|
-extendDMSInstanceSuccess
+ | batchDeleteDMSInstanceTask
|
-Failing to submit a request to scale up an instance
+ | Successfully submitting a request to delete multiple instances at a time
|
-kafka
+ | kafka
|
-extendDMSInstanceFailure
+ | batchDeleteDMSInstanceSuccess
|
-Successfully submitting a request to reset instance password
+ | Successfully deleting multiple instances at a time
|
-kafka
+ | kafka
|
-resetDMSInstancePasswordSuccess
+ | batchDeleteDMSInstanceTaskSuccess
|
-Failing to submit a request to reset instance password
+ | Failing to submit a request to delete multiple instances at a time
|
-kafka
+ | kafka
|
-resetDMSInstancePasswordFailure
+ | batchDeleteDMSInstanceFailure
|
-Successfully creating a topic for a Kafka instance
+ | Failing to delete multiple instances at a time
|
-kafka
+ | kafka
|
-Kafka_platinum_create_topicSuccess
+ | batchDeleteDMSInstanceTaskFailure
|
-Failing to create a topic for a Kafka instance
+ | Successfully submitting a request to scale up an instance
|
-kafka
+ | kafka
|
-Kafka_platinum_create_topicFailure
+ | extendDMSInstanceSuccess
|
-Successfully deleting a topic from a Kafka instance
+ | Successfully scaling up an instance
|
-kafka
+ | kafka
|
-Kafka_platinum_delete_topicsSuccess
+ | extendDMSInstanceTaskSuccess
|
-Failing to delete a topic for a Kafka instance
+ | Failing to submit a request to scale up an instance
|
-kafka
+ | kafka
|
-Kafka_platinum_delete_topicsFailure
+ | extendDMSInstanceFailure
|
-Successfully deleting an instance that failed to be created
+ | Failing to scale up an instance
|
-kafka
+ | kafka
|
-deleteDMSCreateFailureInstancesSuccess
+ | extendDMSInstanceTaskFailure
|
-Failing to delete an instance that failed to be created
+ | Successfully submitting a request to reset instance password
|
-kafka
+ | kafka
|
-deleteDMSCreateFailureInstancesFailure
+ | resetDMSInstancePasswordSuccess
|
-Successfully submitting a request to restart an instance
+ | Failing to submit a request to reset instance password
|
-kafka
+ | kafka
|
-restartDMSInstanceSuccess
+ | resetDMSInstancePasswordFailure
|
-Failing to submit a request to restart an instance
+ | Successfully submitting a request to restart an instance
|
-kafka
+ | kafka
|
-restartDMSInstanceFailure
+ | restartDMSInstanceSuccess
|
-Successfully submitting a request to delete multiple instances at a time
+ | Successfully restarting an instance
|
-kafka
+ | kafka
|
-batchDeleteDMSInstanceSuccess
+ | restartDMSInstanceTaskSuccess
|
-Failing to submit a request to delete multiple instances at a time
+ | Failing to submit a request to restart an instance
|
-kafka
+ | kafka
|
-batchDeleteDMSInstanceFailure
+ | restartDMSInstanceFailure
|
-Successfully submitting a request to restart multiple instances at a time
+ | Failing to restart an instance
|
-kafka
+ | kafka
|
-batchRestartDMSInstanceSuccess
+ | restartDMSInstanceTaskFailure
|
-Failing to submit a request to restart multiple instances at a time
+ | Successfully submitting a request to restart multiple instances at a time
|
-kafka
+ | instance
|
-batchRestartDMSInstanceFailure
+ | batchRestartDMSInstanceSuccess
|
-Successfully submitting a request to modify instance information
+ | Successfully restarting multiple instances at a time
|
-kafka
+ | kafka
|
-modifyDMSInstanceInfoSuccess
+ | batchRestartDMSInstanceTaskSuccess
|
-Failing to submit a request to modify instance information
+ | Failing to submit a request to restart multiple instances at a time
|
-kafka
+ | instance
|
-modifyDMSInstanceInfoFailure
+ | batchRestartDMSInstanceFailure
|
-Deleting multiple instance tasks at a time
+ | Failing to restart multiple instances at a time
|
-kafka
+ | kafka
|
-batchDeleteDMSInstanceTask
+ | batchRestartDMSInstanceTaskFailure
|
-Successfully deleting an instance
+ | Successfully submitting a request to modify instance information
|
-kafka
+ | kafka
|
-deleteDMSInstanceTaskSuccess
+ | modifyDMSInstanceInfoSuccess
|
-Failing to delete an instance
+ | Successfully modifying instance information
|
-kafka
+ | kafka
|
-deleteDMSInstanceTaskFailure
+ | modifyDMSInstanceInfoTaskSuccess
|
-Successfully creating an instance
+ | Failing to submit a request to modify instance information
|
-kafka
+ | kafka
|
-createDMSInstanceTaskSuccess
+ | modifyDMSInstanceInfoFailure
|
-Failing to create an instance
+ | Failing to modify instance information
|
-kafka
+ | kafka
|
-createDMSInstanceTaskFailure
+ | modifyDMSInstanceInfoTaskFailure
|
-Successfully scaling up an instance
+ | Successfully deleting a background task
|
-kafka
+ | kafka
|
-extendDMSInstanceTaskSuccess
+ | deleteDMSBackendJobSuccess
|
-Failing to scale up an instance
+ | Failing to delete a background task
|
-kafka
+ | kafka
|
-extendDMSInstanceTaskFailure
+ | deleteDMSBackendJobFailure
|
-Successfully restarting an instance
+ | Successfully creating a topic for a Kafka instance
|
-kafka
+ | kafka
|
-restartDMSInstanceTaskSuccess
+ | Kafka_create_topicSuccess
|
-Failing to restart an instance
+ | Failing to create a topic for a Kafka instance
|
-kafka
+ | kafka
|
-restartDMSInstanceTaskFailure
+ | Kafka_create_topicFailure
|
-Successfully restarting multiple instances at a time
+ | Successfully deleting a topic from a Kafka instance
|
-kafka
+ | kafka
|
-batchRestartDMSInstanceTaskSuccess
+ | Kafka_delete_topicsSuccess
|
-Failing to restart multiple instances at a time
+ | Failing to delete a topic for a Kafka instance
|
-kafka
+ | kafka
|
-batchRestartDMSInstanceTaskFailure
+ | Kafka_delete_topicsFailure
|
-Successfully modifying instance information
+ | Successfully enabling automatic topic creation
|
-kafka
+ | kafka
|
-modifyDMSInstanceInfoTaskSuccess
+ | enable_auto_topicSuccess
|
-Failing to modify instance information
+ | Failing to enable automatic topic creation
|
-kafka
+ | kafka
|
-modifyDMSInstanceInfoTaskFailure
+ | enable_auto_topicFailure
+ |
+
+Successfully modifying a topic
+ |
+kafka
+ |
+Kafka_alter_topicsSuccess
+ |
+
+Failing to modify a topic
+ |
+kafka
+ |
+Kafka_alter_topicsFailure
+ |
+
+Successfully reassigning partitions
+ |
+kafka
+ |
+kafka_reassignmentTaskSuccess
+ |
+
+Failing to reassign partitions
+ |
+kafka
+ |
+kafka_reassignmentTaskFailure
+ |
+
+Successfully submitting a partition reassignment request
+ |
+kafka
+ |
+kafka_reassignmentSuccess
+ |
+
+Failing to submit a partition reassignment request
+ |
+kafka
+ |
+kafka_reassignmentFailure
+ |
+
+Successfully resetting the consumer offset
+ |
+kafka
+ |
+Kafka_reset_consumer_offsetSuccess
+ |
+
+Failing to reset the consumer offset
+ |
+kafka
+ |
+Kafka_reset_consumer_offsetFailure
+ |
+
+Successfully deleting consumer groups in batches
+ |
+kafka
+ |
+Kafka_batch_delete_groupSuccess
+ |
+
+Failing to delete consumer groups in batches
+ |
+kafka
+ |
+Kafka_batch_delete_groupFailure
+ |
+
+Successfully creating a user
+ |
+kafka
+ |
+createUserSuccess
+ |
+
+Failing to create a user
+ |
+kafka
+ |
+createUserFailure
+ |
+
+Successfully deleting a user
+ |
+kafka
+ |
+deleteUserSuccess
+ |
+
+Failing to delete a user
+ |
+kafka
+ |
+deleteUserFailure
+ |
+
+Successfully updating user policies
+ |
+kafka
+ |
+updateUserPoliciesTaskSuccess
+ |
+
+Failing to update user policies
+ |
+kafka
+ |
+updateUserPoliciesTaskFailure
|
-
-
diff --git a/docs/dms/umn/kafka-ug-180418003.html b/docs/dms/umn/kafka-ug-180418003.html
deleted file mode 100644
index 46d2a6d2..00000000
--- a/docs/dms/umn/kafka-ug-180418003.html
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
- Viewing Audit Logs
- ScenarioThis section describes how to view operation records of the last 7 days on the CTS console.
-
- Procedure- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
-
- - Click Service List and choose Management & Deployment > Cloud Trace Service.
- In the navigation pane, choose Trace List.
- Set filters to search for desired traces. The following filters are available:
- Trace Source: Select DMS.
- Resource Type: Select kafka or Instance.
- Search By: Select an option from the drop-down list.
- When you select Trace name, you also need to select a specific trace name.
- If you select Resource ID for Search By, you need to enter a specific resource ID. The corresponding operation trace can be queried only when the resource ID is completely matched.
- When you select Resource name, you also need to select a specific resource name.
- - Operator: Select a specific operator (a user other than tenant).
- Trace Status: Available options include All trace statuses, normal, warning, and incident. You can only select one of them.
- Time Range: In the upper right corner, choose Last 1 hour, Last 1 day, or Last 1 week, or specify a custom time range. If you select Customize, you also need to select the start time and end time, and then click OK.
- - Click
on the left of a trace to expand its details.Figure 1 Expanding trace details
- - Click View Trace in the Operation column. In the dialog box, the trace details are displayed, as shown in Figure 2.
Figure 2 Viewing a trace
-
-
-
-
-
diff --git a/docs/dms/umn/kafka-ug-180524001.html b/docs/dms/umn/kafka-ug-180524001.html
new file mode 100644
index 00000000..4c0edf43
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-180524001.html
@@ -0,0 +1,143 @@
+
+
+ Configuring a Kafka Alarm Rule
+ This section describes the alarm rules of some metrics and how to configure them. In actual services, you are advised to configure alarm rules for metrics based on the following alarm policies:
+
+ Table 1 Alarm policies and handling of Kafka instancesMetric ID
+ |
+Metric
+ |
+Monitored Object
+ |
+Alarm Policy
+ |
+Description
+ |
+Handling Suggestion
+ |
+
+
+broker_disk_usage
+ |
+Disk Capacity Usage
+ |
+Broker
+ |
+Alarm threshold: original value > 80%
+Number of consecutive periods: 1
+Alarm severity: critical
+ |
+Disk usage of the Kafka VM
+ |
+Modify the instance storage space. For details, see Modifying Instance Specifications.
+ |
+
+broker_cpu_core_load
+ |
+Average Load per CPU Core
+ |
+Broker
+ |
+Alarm threshold: original value > 2
+Number of consecutive periods: 3
+Alarm severity: major
+ |
+Average load of each CPU core of the Kafka VM.
+ |
+Check whether the metric has been approaching or exceeding the alarm threshold for a long time. If yes, modify the number of brokers. For details, see Modifying Instance Specifications.
+ |
+
+broker_memory_usage
+ |
+Memory Usage
+ |
+Broker
+ |
+Alarm threshold: original value > 90%
+Number of consecutive periods: 3
+Alarm severity: critical
+ |
+Memory usage of the Kafka VM.
+ |
+Modify the number of brokers. For details, see Modifying Instance Specifications.
+ |
+
+current_partitions
+ |
+Partitions
+ |
+Instance
+ |
+Alarm threshold: original value > 90% of the maximum allowed number of partitions. The partition limit varies depending on instance specifications. For details, see Specifications.
+Number of consecutive periods: 1
+Alarm severity: major
+ |
+Number of used partitions in the instance.
+ |
+If new topics are required, modify the number of brokers, or split the service to multiple instances. For details about how to modify the number of brokers, see Modifying Instance Specifications.
+ |
+
+broker_cpu_usage
+ |
+CPU Usage
+ |
+Broker
+ |
+Alarm threshold: original value > 90%
+Number of consecutive periods: 3
+Alarm severity: major
+ |
+CPU usage of the Kafka VM.
+ |
+Check whether the metric has been approaching or exceeding the alarm threshold for a long time. If yes, modify the number of brokers. For details, see Modifying Instance Specifications.
+ |
+
+group_msgs
+ |
+Accumulated Messages
+ |
+Instance
+ |
+Alarm threshold: original value > 90% of the upper limit. The upper limit is customized.
+Number of consecutive periods: 1
+Alarm severity: major
+ |
+Total number of accumulated messages in all consumer groups of the instance
+ |
+Delete idle consumer groups, if any. You can also accelerate message retrieval, for example, by increasing the number of consumers.
+ |
+
+topic_messages_remained
+ |
+Topic Available Messages
+ |
+Consumer group
+ |
+Alarm threshold: original value > 90% of the upper limit. The upper limit is customized.
+Number of consecutive periods: 1
+Alarm severity: major
+ |
+Number of remaining messages that can be retrieved from the specified topic in the consumer group.
+ |
+Check whether the consumer code logic is correct, for example, by checking whether the consumer stops consuming messages due to an exception. You can also accelerate message retrieval, for example, by adding topic consumers. Ensure that the number of partitions is greater than or equal to the number of consumers.
+ |
+
+
+
+
+ Configuring a Kafka Alarm Rule- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- In the row containing the desired instance, click View Metric.
You are redirected to the Cloud Eye console page displaying metrics of the selected instance.
+ - Hover the mouse pointer over a metric and click
to create an alarm rule for the metric. - Specify the alarm details.
For more information about creating alarm rules, see Creating an Alarm Rule.
+- Set the alarm name and description.
- Specify the alarm policy and alarm severity.
As shown in the following figure, if the original disk capacity usage is equal to or higher than 85% for three consecutive periods, an alarm is generated. If the alarm is not handled on time, an alarm notification is sent.
+Figure 1 Setting the alarm policy and alarm severity
+ - Set the alarm notification configurations. If you enable Alarm Notification, specify Notification Window, Notification Object, and Trigger Condition.
- Click Create.
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-180604011.html b/docs/dms/umn/kafka-ug-180604011.html
index 6b0ed21d..e04ae183 100644
--- a/docs/dms/umn/kafka-ug-180604011.html
+++ b/docs/dms/umn/kafka-ug-180604011.html
@@ -4,23 +4,23 @@
diff --git a/docs/dms/umn/kafka-ug-180604012.html b/docs/dms/umn/kafka-ug-180604012.html
index 1e98d929..a88004fd 100644
--- a/docs/dms/umn/kafka-ug-180604012.html
+++ b/docs/dms/umn/kafka-ug-180604012.html
@@ -1,139 +1,139 @@
- Preparing Required Resources
- OverviewBefore creating a Kafka instance, ensure the availability of resources, including a virtual private cloud (VPC), subnet, security group, and security group rules. Each Kafka instance is deployed in a VPC and bound to a specific subnet and security group. In this way, Kafka provides an isolated virtual network environment and security protection policies that you can easily configure and manage.
- To access a Kafka instance over a public network, prepare an elastic IP address (EIP) in advance.
- To encrypt the disk, prepare a KMS key in advance.
-
- Required ResourcesTable 1 lists the resources required by a Kafka instance.
+ Kafka Network Connection Conditions
+ A client can connect to a Kafka instance in public or private networks. Notes before using a private network:
+ - By default, a client and a Kafka instance are interconnected when they are deployed in a VPC.
- If they are not, you need to interconnect them because of isolation among VPCs.
+ Table 1 lists how a client can connect to a Kafka instance.
- Table 1 Kafka resourcesResource
+Table 1 Connection modesMode
|
-Requirement
+ | How To Do
|
-Operations
+ | Reference
|
-VPC and subnet
+ | Public access
|
-Different Kafka instances can use the same or different VPCs and subnets based on site requirements. Note the following when creating a VPC and a subnet:
-- The VPC must be created in the same region as the Kafka instance.
- Use the default settings when creating a VPC and subnet.
+ | Enable public access on the Kafka console and configure elastic IPs (EIPs). The client can connect to the Kafka instance through EIPs.
|
-For details about how to create a VPC and subnet, see the Virtual Private Cloud User Guide.
+ | Configuring Kafka Public Access
|
-Security group
+ | Configure port mapping using DNAT. The client can connect to the Kafka instance in a public network.
|
-Different Kafka instances can use the same or different security groups. Note the following when creating a security group:
-- To use Kafka instances, add the security group rules described in Table 2. Other rules can be added based on site requirements.
NOTE: After a security group is created, its default inbound rule allows communication among ECSs within the security group and its default outbound rule allows all outbound traffic. In this case, you can access a Kafka instance within a VPC, and do not need to add rules according to Table 2.
+ Accessing Kafka in a Public Network Using DNAT
+ |
+
|
+Private access
+ |
+A client and a Kafka instance are interconnected when they are deployed in a VPC.
+ |
+-
+ |
+
+When a client and a Kafka instance are deployed in different VPCs of the same region, connect the client and the Kafka instance across VPCs using a VPC endpoint.
+ |
+Accessing Kafka Using a VPC Endpoint Across VPCs
+ |
+
+When a client and a Kafka instance are deployed in different VPCs of the same region, interconnect two VPCs using a VPC peering connection.
+ |
+VPC Peering Connection
+ |
+
+
+
+
+Before connecting a client to a Kafka instance, allow accesses for the following security groups.
+ After a security group is created, its default inbound rule allows communication among ECSs within the security group and its default outbound rule allows all outbound traffic. In this case, you can access a Kafka instance within a VPC, and do not need to add rules according to Table 2.
-
-
- | For details about how to create a security group and configure security group rules, see the Virtual Private Cloud User Guide.
- |
-
-EIP
- |
-Note the following when creating EIPs:
-- The EIPs must be created in the same region as the Kafka instance.
- The number of EIPs must be the same as the number of Kafka instance brokers.
- |
-For details about how to create an EIP, see "Assigning an EIP" in Elastic IP User Guide.
- |
-
-KMS key
- |
-To encrypt the disk for a Kafka instance, prepare a KMS key in advance.
-The KMS key must be created in the same region as the Kafka instance.
- |
-For details about how to create a KMS key, see "Creating a Key" in the Key Management Service User Guide.
- |
-
-
-
-
- Table 2 Security group rulesDirection
+Table 2 Security group rulesDirection
|
-Protocol
+ | Protocol
|
-Port
+ | Port
|
-Source
+ | Source
|
-Description
+ | Description
|
-Inbound
+ | Inbound
|
-TCP
+ | TCP
|
-9094
+ | 9094
|
-0.0.0.0/0
+ | 0.0.0.0/0
|
-Access a Kafka instance through the public network (without SSL encryption).
+ | Accessing a Kafka instance over a public network (in plaintext)
|
-Inbound
+ | Inbound
|
-TCP
+ | TCP
|
-9092
+ | 9092
|
-0.0.0.0/0
+ | 0.0.0.0/0
|
-Access a Kafka instance within a VPC (without SSL encryption).
+ | - Accessing a Kafka instance over a private network within a VPC (in plaintext)
- Accessing a Kafka instance using a peering connection across VPCs (in plaintext)
|
-Inbound
+ | Inbound
|
-TCP
+ | TCP
|
-9095
+ | 9095
|
-0.0.0.0/0
+ | 0.0.0.0/0
|
-Access a Kafka instance through the public network (with SSL encryption).
+ | Accessing a Kafka instance over a public network (in ciphertext)
|
-Inbound
+ | Inbound
|
-TCP
+ | TCP
|
-9093
+ | 9093
|
-0.0.0.0/0
+ | 0.0.0.0/0
|
-Access a Kafka instance within a VPC (with SSL encryption).
+ | - Accessing a Kafka instance over a private network within a VPC (in ciphertext)
- Accessing a Kafka instance using a peering connection across VPCs (in ciphertext)
|
-Inbound
+ | Inbound
|
-TCP
+ | TCP
|
-9011
+ | 9011
|
-198.19.128.0/17
+ | 198.19.128.0/17
|
-Access a Kafka instance using VPC Endpoint (VPCEP).
+ | Accessing a Kafka instance using a VPC endpoint across VPCs (in cipher- or plaintext)
|
-Inbound
+ | Inbound
|
-TCP
+ | TCP
|
-9011
+ | 9011
|
-0.0.0.0/0
+ | 0.0.0.0/0
|
-Access a Kafka instance using DNAT.
+ | Accessing a Kafka instance using DNAT (in cipher- or plaintext)
|
+
diff --git a/docs/dms/umn/kafka-ug-180604013.html b/docs/dms/umn/kafka-ug-180604013.html
new file mode 100644
index 00000000..a6a817ed
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-180604013.html
@@ -0,0 +1,265 @@
+
+
+Creating a Kafka Instance
+ScenarioKafka instances are tenant-exclusive, and physically isolated in deployment. You can customize the computing capabilities and storage space of a Kafka instance as required.
+
+ PrerequisitesBefore creating a Kafka instance, prepare the resources listed in Table 1.
+
+ Table 1 Kafka resourcesResource
+ |
+Requirement
+ |
+Operations
+ |
+
+
+VPC and subnet
+ |
+You need to configure a VPC and subnet for the Kafka instance as required. You can use the current account's existing VPC and subnet, or create new ones.
+Note: VPCs must be created in the same region as the Kafka instance.
+ |
+For details on how to create a VPC and a subnet, see Creating a VPC. If you need to create and use a new subnet in an existing VPC, see Creating a Subnet for the VPC.
+ |
+
+Security group
+ |
+Different Kafka instances can use the same or different security groups.
+Before accessing a Kafka instance, configure security groups based on the access mode. For details, see Table 2.
+ |
+For details on how to create a security group, see Creating a Security Group. For details on how to add rules to a security group, see Adding a Security Group Rule.
+ |
+
+EIP
+ |
+To create a Kafka instance and access it over a public network, create EIPs in advance.
+Note the following when creating EIPs:
+- The EIPs must be created in the same region as the Kafka instance.
- The number of EIPs must be the same as the number of Kafka instance brokers.
- The Kafka console cannot identify IPv6 EIPs.
+ |
+For details about how to create an EIP, see Assigning an EIP.
+ |
+
+KMS key
+ |
+To encrypt the disk for a Kafka instance, prepare a KMS key.
+The KMS key must be created in the same region as the Kafka instance.
+ |
+For details about how to create a KMS key, see Creating a Key.
+ |
+
+
+
+
+
+ Creating a Kafka Instance- Log in to the console.
- Click
in the upper left corner to select a region.Select a region near you to ensure the lowest latency possible.
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click Create Instance in the upper right corner of the page.
- Specify Region, Project, and AZ.
Select one AZ or at least three AZs.
+ - Enter an Instance Name.
You can customize a name that complies with the rules: 4–64 characters; starts with a letter; can contain only letters, digits, hyphens (-), and underscores (_).
+ - Select an Enterprise Project.
This parameter is for enterprise users. An enterprise project manages cloud resources. The enterprise project management service unifies cloud resources in projects, and resources and members in a project. The default project is default.
+ - Configure the following instance parameters:
Specifications: Select Cluster or Single-node.
+- If you select Cluster, specify the version, broker flavor and quantity, disk type, and storage space to be supported by the cluster Kafka instance as required. Cluster instances support Kafka versions 2.3.0, 2.7, and 3.x.
- Single-node: Create a Kafka 2.7 instance with one broker. For details about single-node instances, see Comparing Single-node and Cluster Kafka Instances.
+If you select Cluster, specify the version, broker flavor and quantity, disk type, and storage space to be supported by the Kafka instance as required.
+- Version: Cluster instances support Kafka 2.3.0, 2.7, and 3.x. The version cannot be changed once the instance is created.
- CPU Architecture: The x86 architecture is supported.
- Broker Flavor: Select a broker flavor that best fit your needs.
Maximum number of partitions per broker x Number of brokers = Maximum number of partitions of an instance. If the total number of partitions of all topics exceeds the upper limit of partitions, topic creation fails.
+ - For Brokers, specify the broker quantity. A 3.x instance can only have three brokers (not changeable).
- Storage space per broker: Disk type and size for storing the instance data.
The disk type cannot be changed once the Kafka instance is created.
+The storage space is consumed by message replicas, logs, and metadata. Specify the storage space based on the expected service message size, the number of replicas, and the reserved disk space. Each Kafka broker reserves 33 GB disk space for storing logs and metadata.
+Disks are formatted when an instance is created. As a result, the actual available disk space is 93% to 95% of the total disk space.
+The disk supports high I/O and ultra-high I/O types. For more information, see Disk Types and Performance.
+Each broker of a 3.x instance can only have 100 GB storage space (not changeable).
+ - Disk Encryption: Specify whether to enable disk encryption.
Enabling disk encryption improves data security, but slows down disk read/write. Disk encryption depends on Key Management Service (KMS). If you enable disk encryption, select a KMS key. This parameter cannot be modified once the Kafka instance is created.
+ - Capacity Threshold Policy: Policy used when the disk usage reaches the threshold. The capacity threshold is 95%.
- Automatically delete: Messages can be created and retrieved, but 10% of the earliest messages will be deleted to ensure sufficient disk space. This policy is suitable for scenarios where no service interruption can be tolerated. Data may be lost.
- Stop production: New messages cannot be created, but existing messages can still be retrieved. This policy is suitable for scenarios where no data loss can be tolerated.
+
+If you select Single-node, a v2.7 instance with one broker will be created.
+- Version: Kafka version, which can only be 2.7.
- CPU Architecture: The x86 architecture is supported.
- Broker Flavor: Select a broker flavor that best fit your needs.
- Brokers: The instance can have only one broker.
- Storage space per broker: Disk type and size for storing the instance data.
The disk type cannot be changed once the Kafka instance is created.
+The storage space is consumed by message replicas, logs, and metadata. Specify the storage space based on the expected service message size, the number of replicas, and the reserved disk space. Each Kafka broker reserves 33 GB disk space for storing logs and metadata.
+Disks are formatted when an instance is created. As a result, the actual available disk space is 93% to 95% of the total disk space.
+The disk supports high I/O and ultra-high I/O types. For more information, see Disk Types and Performance.
+ - Disk Encryption: Specify whether to enable disk encryption.
Enabling disk encryption improves data security, but slows down disk read/write. Disk encryption depends on Key Management Service (KMS). If you enable disk encryption, select a KMS key. This parameter cannot be modified once the Kafka instance is created.
+ - Capacity Threshold Policy: Policy used when the disk usage reaches the threshold. The capacity threshold is 95%.
- Automatically delete: Messages can be created and retrieved, but 10% of the earliest messages will be deleted to ensure sufficient disk space. This policy is suitable for scenarios where no service interruption can be tolerated. Data may be lost.
- Stop production: New messages cannot be created, but existing messages can still be retrieved. This policy is suitable for scenarios where no data loss can be tolerated.
+
+Figure 1 Single-node instances
+ - Configure the instance network parameters.
- Select the created VPC and subnet from the VPC drop-down list.
A VPC provides an isolated virtual network for your Kafka instances. You can configure and manage the network as required.
+ After the Kafka instance is created, its VPC and subnet cannot be changed.
+
+ - Select a security group.
A security group is a set of rules for accessing a Kafka instance. You can click Manage Security Group to view or create security groups on the network console.
+Before accessing a Kafka instance on the client, configure security group rules based on the access mode. For details about security group rules, see Table 2.
+
+ - Configure the instance access mode.
+ Table 2 Instance access modesPublic or Private Network
+ |
+Plaintext or Ciphertext
+ |
+Description
+ |
+
+
+Private Network Access
+
+
+ |
+Plaintext Access
+ |
+Clients connect to the Kafka instance without SASL authentication.
+Once enabled, private network access cannot be disabled. Enable plaintext or ciphertext access, or both.
+ |
+
+Ciphertext Access
+ |
+Clients connect to the Kafka instance with SASL authentication.
+Once enabled, private network access cannot be disabled. Enable plaintext or ciphertext access, or both. To disable ciphertext access, contact customer service.
+If you enable Ciphertext Access, specify a security protocol, SASL/PLAIN, username, and password.
+After an instance is created, disabling and re-enabling Ciphertext Access do not affect users.
+ |
+
+Public Network Access
+
+
+ |
+Plaintext Access
+ |
+Clients connect to the Kafka instance without SASL authentication.
+Enable or disable plaintext access, and configure addresses for public network access.
+ |
+
+Ciphertext Access
+ |
+Clients connect to the Kafka instance with SASL authentication.
+Enable or disable ciphertext access, and configure addresses for public network access.
+If you enable Ciphertext Access, specify a security protocol, SASL/PLAIN, username, and password.
+After an instance is created, disabling and re-enabling Ciphertext Access do not affect users.
+ |
+
+Public IP Addresses
+ |
+Select the number of public IP addresses as required.
+If EIPs are insufficient, click Create Elastic IP to create EIPs. Then, return to the Kafka console and click next to Public IP Address to refresh the public IP address list.
+Kafka instances only support IPv4 EIPs.
+ |
+
+
+
+
+ Ciphertext access is unavailable for single-node instances.
+
+The security protocol, SASL/PLAIN mechanism, username, and password are described as follows.
+
+Table 3 Ciphertext access parametersParameter
+ |
+Value
+ |
+Description
+ |
+
+
+Security Protocol
+
+ |
+SASL_SSL
+ |
+SASL is used for authentication. Data is encrypted with SSL certificates for high-security transmission.
+SCRAM-SHA-512 is enabled by default. To use PLAIN, enable SASL/PLAIN.
+What are SCRAM-SHA-512 and PLAIN mechanisms?
+- SCRAM-SHA-512: uses the hash algorithm to generate credentials for usernames and passwords to verify identities. SCRAM-SHA-512 is more secure than PLAIN.
- PLAIN: a simple username and password verification mechanism.
+ |
+
+SASL_PLAINTEXT
+ |
+SASL is used for authentication. Data is transmitted in plaintext for high performance.
+SCRAM-SHA-512 is enabled by default. To use PLAIN, enable SASL/PLAIN. SCRAM-SHA-512 authentication is recommended for plaintext transmission.
+ |
+
+SASL/PLAIN
+ |
+-
+ |
+- If SASL/PLAIN is disabled, the SCRAM-SHA-512 mechanism is used for username and password authentication.
- If SASL/PLAIN is enabled, both the SCRAM-SHA-512 and PLAIN mechanisms are supported. You can select either of them as required.
+The SASL/PLAIN setting cannot be changed once ciphertext access is enabled.
+ |
+
+Username and Password
+ |
+-
+ |
+Username and password used by the client to connect to the Kafka instance.
+A username should contain 4 to 64 characters, start with a letter, and contain only letters, digits, hyphens (-), and underscores (_).
+A password must meet the following requirements:
+- Contains 8 to 32 characters.
- Contains at least three types of the following characters: uppercase letters, lowercase letters, digits, and special characters `~! @#$ %^&*()-_=+\|[{}];:'",<.>? and spaces, and cannot start with a hyphen (-).
- Cannot be the username spelled forwards or backwards.
+The username cannot be changed once ciphertext access is enabled.
+ |
+
+
+
+
+ - Click Advanced Settings to configure more parameters.
- Configure Automatic Topic Creation.
This setting is disabled by default. You can enable or disable it as required.
+If this option is enabled, a topic will be automatically created when a message is produced in or consumed from a topic that does not exist. By default, the topic has parameters listed in Table 4.
+After you change the value of the log.retention.hours, default.replication.factor, or num.partitions parameter, the value will be used in later topics that are automatically created. For example, assume that num.partitions is changed to 5, an automatically created topic has parameters listed in Table 4.
+
+Table 4 Topic parametersParameter
+ |
+Default Value
+ |
+Modified Value
+ |
+
+
+Partitions
+ |
+3
+ |
+5
+ |
+
+Replicas
+ |
+3
+ |
+3
+ |
+
+Aging Time (h)
+ |
+72
+ |
+72
+ |
+
+Synchronous Replication
+ |
+Disabled
+ |
+Disabled
+ |
+
+Synchronous Flushing
+ |
+Disabled
+ |
+Disabled
+ |
+
+Message Timestamp
+ |
+CreateTime
+ |
+CreateTime
+ |
+
+Max. Message Size (bytes)
+ |
+10,485,760
+ |
+10,485,760
+ |
+
+
+
+
+ - Specify Tags.
Tags are used to identify cloud resources. When you have multiple cloud resources of the same type, you can use tags to classify them based on usage, owner, or environment.
+- If you have predefined tags, select a predefined pair of tag key and value. You can click View predefined tags to go to the Tag Management Service (TMS) console and view or create tags.
- You can also create new tags by specifying Tag key and Tag value.
+Up to 20 tags can be added to each Kafka instance. For details about the requirements on tags, see Configuring Kafka Instance Tags.
+ - Enter a Description of the instance for 0–1024 characters.
+ - Click Create.
- Confirm the instance information, and click Submit.
- Return to the instance list and check whether the Kafka instance has been created.
It takes 3 to 15 minutes to create an instance. During this period, the instance status is Creating.
+- If the instance is created successfully, its status changes to Running.
- If the instance is in the Creation failed state, delete it by referring to Deleting Kafka Instances. Then create a new one. If the instance creation fails again, contact customer service.
Instances that fail to be created do not occupy other resources.
+
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-180604014.html b/docs/dms/umn/kafka-ug-180604014.html
index 7e13f30c..6c762356 100644
--- a/docs/dms/umn/kafka-ug-180604014.html
+++ b/docs/dms/umn/kafka-ug-180604014.html
@@ -1,77 +1,264 @@
-Viewing an Instance
-ScenarioView detailed information about a Kafka instance on the DMS console, for example, the IP addresses and port numbers for accessing the instance.
-
- Procedure- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ Viewing and Modifying Basic Information of a Kafka Instance
+ ScenarioThis section describes how to view the details, and modify the basic information of a Kafka instance on the console.
+ After creating a Kafka instance, you can modify some parameters of it as required, including the instance name, description, security group, and capacity threshold policy.
+ Single-node instances do not support reconfiguration of private network access.
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Search for a Kafka instance by tag, status, name, ID, or connection address. Table 1 describes the various possible statuses of a Kafka instance.
- Table 1 Kafka instance status descriptionStatus
+
+PrerequisiteYou can modify basic information of a Kafka instance when the instance is in the Running state.
+
+Viewing Kafka Instance Details- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Search for a Kafka instance by specifying filters. You can filter instances by tag, status, name, version, flavor, used/available storage space, maximum partitions, billing mode, and enterprise project.Only enterprise users can filter instances by enterprise projects. For Kafka instance statuses, see Table 1.
+ Table 1 Kafka instance status descriptionStatus
|
-Description
+ | Description
|
-Creating
+ | Creating
|
-The instance is being created.
+ | The instance is being created.
|
-Running
+ | Creation failed
|
-The instance is running properly.
+ | The instance failed to be created.
+ |
+
+Running
+ |
+The instance is running properly.
Only instances in the Running state can provide services.
|
-Faulty
+ | Faulty
|
-The instance is not running properly.
+ | The instance is not running properly.
|
-Starting
+ | Restarting
|
-The status between Frozen and Running.
+ | The instance is being restarted.
|
-Restarting
+ | Changing
|
-The instance is being restarted.
+ | The instance specifications or public access configurations are being modified.
|
-Changing
+ | Change failed
|
-The instance specifications or public access configurations are being modified.
- |
-
-Change failed
- |
-The instance specifications or public access configurations failed to be modified.
- |
-
-Frozen
- |
-The instance is frozen.
- |
-
-Freezing
- |
-The status between Running and Frozen.
- |
-
-Upgrading
- |
-The instance is being upgraded.
- |
-
-Rolling back
- |
-The instance is being rolled back.
+ | The instance specifications or public access configurations failed to be modified.
+You cannot restart, delete, or modify an instance in the Change failed state. Contact customer service.
|
- - Click the name of the desired Kafka instance and view detailed information about the instance on the Basic Information tab page.
+ - Click the name of the desired Kafka instance and view detailed information about the instance on the Basic Information tab page.
Table 2 describes the parameters for connecting to a Kafka instance. For details about other parameters, see the Basic Information tab page of the Kafka instance on the console.
+
+Table 2 Connection parametersSection
+ |
+Parameter
+ |
+Sub-Parameter
+ |
+Description
+ |
+
+
+Connection
+
+
+
+
+
+ |
+Username
+ |
+-
+ |
+Username for accessing the instance with ciphertext access enabled.
+ |
+
+Private Network Access
+
+
+
+
+ |
+Plaintext Access
+ |
+Indicates whether plaintext access is enabled.
+ |
+
+Address (Private Network, Plaintext)
+ |
+This parameter is displayed only after you enable Plaintext Access.
+ |
+
+Ciphertext Access
+ |
+Indicates whether ciphertext access is enabled.
+This function is unavailable for single-node instances.
+ |
+
+Address (Private Network, Ciphertext)
+ |
+This parameter is displayed only after you enable Ciphertext Access.
+ |
+
+Security Protocol
+ |
+This parameter is displayed only after you enable Ciphertext Access.
+ |
+
+Public Network Access
+
+
+
+
+
+ |
+Toggle switch
+ |
+Indicates whether public access has been enabled.
+ |
+
+Plaintext Access
+ |
+This parameter is displayed only when Public Access is enabled.
+Indicates whether plaintext access is enabled.
+ |
+
+Address (Public Network, Plaintext)
+ |
+This parameter is displayed only after you enable Plaintext Access.
+ |
+
+Ciphertext Access
+ |
+This parameter is displayed only when Public Access is enabled.
+Indicates whether ciphertext access is enabled.
+This function is unavailable for single-node instances.
+ |
+
+Address (Public Network, Ciphertext)
+ |
+This parameter is displayed only after you enable Ciphertext Access.
+ |
+
+Security Protocol
+ |
+This parameter is displayed only after you enable Ciphertext Access.
+ |
+
+SASL Mechanism
+ |
+-
+ |
+This parameter is displayed only after you enable Ciphertext Access.
+ |
+
+SSL Certificate
+ |
+-
+ |
+This parameter is displayed only when SASL_SSL is enabled.
+Click Download to download the SSL certificate for accessing the instance.
+ |
+
+
+
+
+
+
+Modifying Basic Information of a Kafka Instance- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view its details.
- Modify the following parameters if needed:
+ Table 3 Modifiable Kafka parametersParameter
+ |
+How to Modify
+ |
+Result
+ |
+
+
+Instance Name
+ |
+Click , enter a new name, and click .
+Naming rules: 4–64 characters; starts with a letter; can contain only letters, digits, hyphens (-), and underscores (_).
+ |
+The modification result is displayed in the upper right corner of the page.
+ |
+
+Enterprise Project
+ |
+Click , select a new enterprise project from the drop-down list, and click .
+Only for enterprise users. Modifying this parameter does not restart the instance.
+ |
+The modification result is displayed in the upper right corner of the page.
+ |
+
+Description
+ |
+Click , enter a new description, and click .
+0 to 1024 characters.
+ |
+The modification result is displayed in the upper right corner of the page.
+ |
+
+Security Group
+ |
+Click , select a new security group from the drop-down list, and click .
+Modifying this parameter does not restart the instance.
+ |
+The modification result is displayed in the upper right corner of the page.
+ |
+
+Private Network Access
+ |
+See Configuring Plaintext or Ciphertext Access to Kafka Instances.
+ |
+You will be redirected to the Background Tasks page, which displays the modification progress and result.
+ |
+
+Public Access
+ |
+See Configuring Kafka Public Access.
+ |
+You will be redirected to the Background Tasks page, which displays the modification progress and result.
+ |
+
+Capacity Threshold Policy
+ |
+Click the desired policy. In the displayed Confirm dialog box, click OK.
+Modifying this parameter does not restart the instance.
+ |
+You will be redirected to the Background Tasks page, which displays the modification progress and result.
+ |
+
+Automatic Topic Creation
+ |
+Enable/Disable this Automatic Topic Creation. In the displayed Confirm dialog box, click OK.
+Changing this option may restart the instance.
+ |
+You will be redirected to the Background Tasks page, which displays the modification progress and result.
+ |
+
+Cross-VPC Access
+ |
+See Accessing Kafka Using a VPC Endpoint Across VPCs and Accessing Kafka in a Public Network Using DNAT.
+ |
+The modification result is displayed in the upper right corner of the page.
+ |
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-180604015.html b/docs/dms/umn/kafka-ug-180604015.html
index 20640e21..ee836265 100644
--- a/docs/dms/umn/kafka-ug-180604015.html
+++ b/docs/dms/umn/kafka-ug-180604015.html
@@ -1,16 +1,16 @@
- Restarting an Instance
+ Restarting a Kafka Instance
ScenarioRestart one or more Kafka instances at a time on the DMS console.
When a Kafka instance is being restarted, message retrieval and creation requests of clients will be rejected.
PrerequisitesThe status of the Kafka instance you want to restart is either Running or Faulty.
- Procedure- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ Restarting a Kafka Instance- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Restart Kafka instances using one of the following methods:
- Select one or more Kafka instances and click Restart in the upper left corner.
- In the row containing the desired instance, click Restart.
- - In the Restart Instance dialog box, click Yes to restart the Kafka instance.
It takes 3 to 15 minutes to restart a Kafka instance. After the instance is successfully restarted, its status should be Running.
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Restart Kafka instances using one of the following methods:
- Select one or more Kafka instances and click Restart in the upper left corner.
- In the row containing the desired instance, click Restart.
- Click the desired Kafka instance to view its details. In the upper right corner, click Restart.
+ - In the Restart Instance dialog box, click Yes to restart the Kafka instance.
It takes 3 to 15 minutes to restart a Kafka instance. After the instance is successfully restarted, its status should be Running.
Restarting a Kafka instance only restarts the instance process and does not restart the VM where the instance is located.
diff --git a/docs/dms/umn/kafka-ug-180604016.html b/docs/dms/umn/kafka-ug-180604016.html
index 71495983..77f93ffe 100644
--- a/docs/dms/umn/kafka-ug-180604016.html
+++ b/docs/dms/umn/kafka-ug-180604016.html
@@ -1,25 +1,18 @@
- Deleting an Instance
- ScenarioOn the DMS console, you can delete one or more Kafka instances that have been created or failed to be created.
+ Deleting Kafka Instances
+ ScenarioDelete one or more Kafka instances at a time on the DMS console.
Deleting a Kafka instance will delete the data in the instance without any backup. Exercise caution when performing this operation.
PrerequisitesThe status of the Kafka instance you want to delete is Running or Faulty.
- Deleting Kafka Instances- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+ Deleting Kafka Instances- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Delete Kafka instances using one of the following methods:
- Select one or more Kafka instances and click Delete in the upper left corner.
- In the row containing the Kafka instance to be deleted, choose More > Delete.
- Kafka instances in the Creating, Starting, Changing, Change failed, or Restarting state cannot be deleted.
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Delete pay-per-use Kafka instances in either of the following ways:
- Select one or more Kafka instances and click Delete in the upper left corner.
- In the row containing the Kafka instance to be deleted, choose More > Delete.
- Click the desired Kafka instance to view its details. In the upper right corner, choose More > Delete.
+ Kafka instances in the Creating, Changing, Change failed, or Restarting state cannot be deleted.
- - In the Delete Instance dialog box, click Yes to delete the Kafka instance.
It takes 1 to 60 seconds to delete a Kafka instance.
-
-
- Deleting Kafka Instances That Failed to Be Created- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
-
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- If there are Kafka instances that failed to be created, Instance Creation Failures and quantity information will be displayed.
Instances that fail to be created do not occupy other resources.
-
- - Click Instance Creation Failures or the icon or quantity next to it.
- Delete Kafka instances that failed to be created in either of the following ways:
- To delete all Kafka instances that failed to be created at once, click Clear Failed Instance.
- To delete a single Kafka instance that failed to be created, click Delete in the row containing the chosen Kafka instance.
+ - In the Delete Instance dialog box, enter DELETE and click OK to delete the Kafka instance.
It takes 1 to 60 seconds to delete a Kafka instance.
diff --git a/docs/dms/umn/kafka-ug-180604017.html b/docs/dms/umn/kafka-ug-180604017.html
deleted file mode 100644
index fe0359a0..00000000
--- a/docs/dms/umn/kafka-ug-180604017.html
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-Modifying the Information About an Instance
-After creating a Kafka instance, you can modify some parameters of the instance based on service requirements, including the instance name, description, security group, and capacity threshold policy.
- Procedure- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
-
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- Modify the following parameters if needed:
- Instance Name
- Description
- Security Group
- Public Access (For details about how to change the public access configuration, see Configuring Public Access.)
- Capacity Threshold Policy (Modifying this setting will not restart the instance.)
- Automatic Topic Creation (Modifying this setting will restart the instance.)
-After the parameters are modified, view the modification result in one of the following ways:
-- If Capacity Threshold Policy, Public Access, or Automatic Topic Creation has been modified, you will be redirected to the Background Tasks page, which displays the modification progress and result.
- If Instance Name, Description, or Security Group has been modified, the modification result will be displayed on the upper right corner of the page.
-
-
-
-
-
diff --git a/docs/dms/umn/kafka-ug-180604018.html b/docs/dms/umn/kafka-ug-180604018.html
new file mode 100644
index 00000000..f2f20c35
--- /dev/null
+++ b/docs/dms/umn/kafka-ug-180604018.html
@@ -0,0 +1,160 @@
+
+
+Creating a Kafka Topic
+Topics store messages created by producers and subscribed by consumers. If automatic topic creation is not enabled during Kafka instance creation, you need to manually create topics. If automatic topic creation has been enabled for the instance, this operation is optional.
+ Automatic topic creation: A topic will be automatically created when a message is produced in or consumed from a topic that does not exist. By default, the topic has parameters listed in Table 1.
+ After you change the value of the log.retention.hours, default.replication.factor, or num.partitions parameter, the value will be used in later topics that are automatically created. For example, assume that num.partitions is changed to 5, an automatically created topic has parameters listed in Table 1.
+
+ Table 1 Topic parametersParameter
+ |
+Default Value
+ |
+Modified Value
+ |
+
+
+Partitions
+ |
+3
+ |
+5
+ |
+
+Replicas
+ |
+3
+ |
+3
+ |
+
+Aging Time (h)
+ |
+72
+ |
+72
+ |
+
+Synchronous Replication
+ |
+Disabled
+ |
+Disabled
+ |
+
+Synchronous Flushing
+ |
+Disabled
+ |
+Disabled
+ |
+
+Message Timestamp
+ |
+CreateTime
+ |
+CreateTime
+ |
+
+Max. Message Size (bytes)
+ |
+10,485,760
+ |
+10,485,760
+ |
+
+
+
+
+ Methods that can be used to manually create a topic:
+
+ Constraints- The total number of partitions in topics is limited. When the partition quantity limit is reached, you can no longer create topics. The total number of partitions varies by instance specifications. For details, see Specifications.
- If an instance node is faulty, an internal service error may be reported when you query messages in a topic with only one replica. Therefore, you are not advised using a topic with only one replica.
+
+ Method 1: Creating a Topic on the Console- Log in to the console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
+
+ - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- In the navigation pane, choose Topics. Then click Create Topic.
Figure 1 Creating a topic
+ - Specify the topic parameters listed in the following table.
+ Table 2 Topic parametersParameter
+ |
+Description
+ |
+
+
+Topic Name
+ |
+Customize a name that contains 3 to 200 characters, starts with a letter or underscore (_), and contains only letters, digits, periods (.), hyphens (-), and underscores (_).
+The name must be different from preset topics:
+- _consumer_offsets
- _transaction_state
- _trace
- _connect-status
- _connect-configs
- _connect-offsets
+Once the topic is created, you cannot modify its name.
+ |
+
+Partitions
+ |
+Number of partitions in the topic.
+If the number of partitions is the same as that of consumers, the larger the partitions, the higher the consumption concurrency.
+If this parameter is set to 1, messages will be retrieved in the FIFO order.
+Value range: 1 to 200
+Default value: 3
+ |
+
+Replicas
+ |
+A higher number of replicas delivers higher reliability. Data is automatically backed up on each replica. When one Kafka broker becomes faulty, data is still available on other brokers.
+If this parameter is set to 1, only one set of data is available.
+Value range: 1 to number of brokers
+ NOTE: If an instance node is faulty, an internal service error may be reported when you query messages in a topic with only one replica. Therefore, you are not advised using a topic with only one replica.
+
+ |
+
+Aging Time (h)
+ |
+The period that messages are retained for. Consumers must retrieve messages before this period ends. Otherwise, the messages will be deleted and can no longer be consumed.
+Value range: 1–720
+Default value: 72
+ |
+
+Synchronous Replication
+ |
+A message is returned to the client only after the message creation request has been received and the message has been acknowledged by all replicas.
+After enabling this, set the parameter acks to all or -1 in the configuration file or production code on the producer client.
+If there is only one replica, synchronous replication cannot be enabled.
+ |
+
+Synchronous Flushing
+ |
+- Enabled: A message is immediately flushed to disk once it is created, bringing higher reliability.
- Disabled: A message is stored in the memory instead of being immediately flushed to disk once created.
+ |
+
+Message Timestamp
+ |
+Timestamp type of a message. Options:
+- CreateTime: time when the producer created the message.
- LogAppendTime: time when the broker appended the message to the log.
+ |
+
+Max. Message Size
+ |
+Maximum batch processing size allowed by Kafka. If message compression is enabled in the client configuration file or code of producers, this parameter indicates the size after compression.
+If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that they can fetch record batches this large.
+Value range: 0 to 10,485,760
+ |
+
+
+
+
+ - Click OK.
+
+ Method 2: Creating a Topic by Using Kafka CLIIf your client is v2.2 or later, you can use kafka-topics.sh to create topics and manage topic parameters.
+ - If a topic name starts with a special character, for example, an underscore (_) or a number sign (#), monitoring data cannot be displayed.
- For an instance with ciphertext access enabled, if allow.everyone.if.no.acl.found is set to false, topics cannot be created through the client.
+
+
+
+
+
+
diff --git a/docs/dms/umn/kafka-ug-180604019.html b/docs/dms/umn/kafka-ug-180604019.html
index 75800fc9..dcbc91cc 100644
--- a/docs/dms/umn/kafka-ug-180604019.html
+++ b/docs/dms/umn/kafka-ug-180604019.html
@@ -1,26 +1,30 @@
-Deleting a Topic
+Deleting a Kafka Topic
Delete a topic using either of the following methods:
-
+
Prerequisites- A Kafka instance has been created, and a topic has been created in this instance.
- The Kafka instance is in the Running state.
- Deleting a Topic on the Console- Log in to the management console.
- Click
in the upper left corner to select a region. Select the region where your Kafka instance is located.
-
- - Click Service List and choose Application > Distributed Message Service. The Kafka instance list is displayed.
- Click the desired Kafka instance to view the instance details.
- Click the Topics tab.
- Delete topics using either of the following methods:
- Select one or more topics and click Delete Topic in the upper left corner.
- In the row containing the topic you want to delete, click Delete.
- - In the Delete Topic dialog box that is displayed, click Yes to delete the topic.
+ ConstraintIf your Kafka instances are connected using Logstash, stop Logstash before deleting topics. Otherwise, services may crash.
- Deleting a Topic with the Kafka CLIIf your Kafka client version is later than 2.2, you can use kafka-topics.sh to delete topics.
-
diff --git a/docs/dms/umn/kafka-ug-180604020.html b/docs/dms/umn/kafka-ug-180604020.html
index f94200c3..f2216070 100644
--- a/docs/dms/umn/kafka-ug-180604020.html
+++ b/docs/dms/umn/kafka-ug-180604020.html
@@ -1,15 +1,15 @@
- Accessing a Kafka Instance Without SASL
- This section describes how to use an open-source Kafka client to access a Kafka instance if SASL access is not enabled for the instance. There are two scenarios. For cross-VPC access, see Cross-VPC Access to a Kafka Instance. For DNAT-based access, see Using DNAT to Access a Kafka Instance.
- For details on how to use Kafka clients in different languages, visit https://cwiki.apache.org/confluence/display/KAFKA/Clients.
-
|
---|
|
---|
|
---|
|
---|
|
---|
|
---|
|
---|
|
---|
| |
---|
|
---|