From 3de68429940a9342620262799bff7a0cfa199d01 Mon Sep 17 00:00:00 2001 From: "Chen, Junjie" Date: Fri, 5 Jul 2024 08:59:46 +0000 Subject: [PATCH] DCS UMN 20240521 version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Mützel, Andrea Co-authored-by: Chen, Junjie Co-committed-by: Chen, Junjie --- docs/dcs/umn/ALL_META.TXT.json | 4 +- docs/dcs/umn/CLASS.TXT.json | 4 +- docs/dcs/umn/dcs-faq-022025.html | 4 +- docs/dcs/umn/dcs-faq-0521005.html | 2 +- docs/dcs/umn/dcs-faq-0730047.html | 101 ++++++++++++++++++++++- docs/dcs/umn/dcs-faq-0730050.html | 2 +- docs/dcs/umn/dcs-faq-210721002.html | 5 +- docs/dcs/umn/dcs-ug-0326008.html | 3 +- docs/dcs/umn/dcs-ug-0326011.html | 120 +++++++++++++++++++++++++--- docs/dcs/umn/dcs-ug-0326019.html | 38 ++++----- docs/dcs/umn/dcs-ug-0326026.html | 7 +- docs/dcs/umn/dcs-ug-190812001.html | 2 +- docs/dcs/umn/dcs-ug-210330002.html | 5 +- 13 files changed, 250 insertions(+), 47 deletions(-) diff --git a/docs/dcs/umn/ALL_META.TXT.json b/docs/dcs/umn/ALL_META.TXT.json index 2fa3a692..93394609 100644 --- a/docs/dcs/umn/ALL_META.TXT.json +++ b/docs/dcs/umn/ALL_META.TXT.json @@ -2953,7 +2953,7 @@ "node_id":"dcs-faq-022025.xml", "product_code":"dcs", "code":"144", - "des":"Single-node and Proxy Cluster:Each instance has only one IP address and one domain name address. The addresses remain unchanged before and after master/standby switchover", + "des":"Single-node, and Proxy Cluster:Each instance has only one IP address and one domain name address. The addresses remain unchanged before and after master/standby switchove", "doc_type":"usermanual", "kw":"Should I Use a Domain Name or an IP Address to Connect to a DCS Redis Instance?,Client and Network C", "search_title":"", @@ -3730,7 +3730,7 @@ "node_id":"dcs-faq-0730047.xml", "product_code":"dcs", "code":"181", - "des":"You are advised to change the instance specifications during off-peak hours because specification modification has the following impacts:Impact of scaling:Single-node and", + "des":"You are advised to change the instance specifications during off-peak hours because specification modification has the following impacts:Supported instance type changes:F", "doc_type":"usermanual", "kw":"Are Services Interrupted During Specification Modification?,Instance Scaling and Upgrade,User Guide", "search_title":"", diff --git a/docs/dcs/umn/CLASS.TXT.json b/docs/dcs/umn/CLASS.TXT.json index 0e0126c0..be635a08 100644 --- a/docs/dcs/umn/CLASS.TXT.json +++ b/docs/dcs/umn/CLASS.TXT.json @@ -1287,7 +1287,7 @@ "code":"143" }, { - "desc":"Single-node and Proxy Cluster:Each instance has only one IP address and one domain name address. The addresses remain unchanged before and after master/standby switchover", + "desc":"Single-node, and Proxy Cluster:Each instance has only one IP address and one domain name address. The addresses remain unchanged before and after master/standby switchove", "product_code":"dcs", "title":"Should I Use a Domain Name or an IP Address to Connect to a DCS Redis Instance?", "uri":"dcs-faq-022025.html", @@ -1620,7 +1620,7 @@ "code":"180" }, { - "desc":"You are advised to change the instance specifications during off-peak hours because specification modification has the following impacts:Impact of scaling:Single-node and", + "desc":"You are advised to change the instance specifications during off-peak hours because specification modification has the following impacts:Supported instance type changes:F", "product_code":"dcs", "title":"Are Services Interrupted During Specification Modification?", "uri":"dcs-faq-0730047.html", diff --git a/docs/dcs/umn/dcs-faq-022025.html b/docs/dcs/umn/dcs-faq-022025.html index e1b5c659..186df6f0 100644 --- a/docs/dcs/umn/dcs-faq-022025.html +++ b/docs/dcs/umn/dcs-faq-022025.html @@ -1,8 +1,8 @@

Should I Use a Domain Name or an IP Address to Connect to a DCS Redis Instance?

-
  • Single-node and Proxy Cluster:

    Each instance has only one IP address and one domain name address. The addresses remain unchanged before and after master/standby switchover. You can use either address to connect to the instance.

    -
  • Master/standby:

    Each instance has one IP address and two domain name addresses. One of the domain name addresses is used only for processing read requests. The addresses remain unchanged after master/standby switchover. You can use any address to connect to the instance.

    +
    • Single-node, and Proxy Cluster:

      Each instance has only one IP address and one domain name address. The addresses remain unchanged before and after master/standby switchover. You can use either address to connect to the instance.

      +
    • Master/Standby:

      Each instance has one IP address and two domain name addresses. One of the domain name addresses is used only for processing read requests. The addresses remain unchanged after master/standby switchover. You can use any address to connect to the instance.

      When you use a domain name address, distinguish between read and write requests. If you use Connection Address or IP Address, functions are not affected. If you use Read-only Address, only read requests are processed.

    • Redis Cluster:

      A Redis Cluster instance has multiple pairs of master and replica IP addresses and one domain name address. You can use any address to connect to the instance.

      The connected node sends requests to the correct node. All nodes in the cluster can receive requests. Configure multiple or all IP addresses to prevent single points of failure.

      diff --git a/docs/dcs/umn/dcs-faq-0521005.html b/docs/dcs/umn/dcs-faq-0521005.html index f965a681..9eb062cf 100644 --- a/docs/dcs/umn/dcs-faq-0521005.html +++ b/docs/dcs/umn/dcs-faq-0521005.html @@ -13,7 +13,7 @@

      Split big keys.

      Scenarios:

      -
      • If the big key is a String, you can split it into several key-value pairs and use MGET or a pipeline consisting of multiple GET operations to obtain the values. In this way, the pressure of a single operation can be split. For a cluster instance, the operation pressure can be evenly distributed to multiple shards, reducing the impact on a single shard.
      • If the big key contains multiple elements, and the elements must be operated together, the big key cannot be split. You can remove the big key from Redis and store it on other storage media instead. This scenario should be avoided by design.
      • If the big key contains multiple elements, and only some elements need to be operated each time, separate the elements. Take a Hash key as an example. Each time you run the HGET or HSET command, the result of the hash value modulo N (customized on the client) determines which key the field falls on. This algorithm is similar to that used for calculating slots in Redis Cluster.
      +
      • If the big key is a String, you can split it into several key-value pairs and use MGET or a pipeline consisting of multiple GET operations to obtain the values. In this way, the pressure of a single operation can be split. For a cluster instance, key-value pairs can be automatically distributed to multiple shards, reducing the impact on a single shard.
      • If the big key contains multiple elements, and the elements must be operated together, the big key cannot be split. You can remove the big key from Redis and store it on other storage media instead. This scenario should be avoided by design.
      • If the big key contains multiple elements, and only some elements need to be operated each time, separate the elements. Take a Hash key as an example. Each time you run the HGET or HSET command, the result of the hash value modulo N (customized on the client) determines which key the field falls on. This algorithm is similar to that used for calculating slots in Redis Cluster.

      Store big keys on other storage media.

      diff --git a/docs/dcs/umn/dcs-faq-0730047.html b/docs/dcs/umn/dcs-faq-0730047.html index e239a13c..4a65b263 100644 --- a/docs/dcs/umn/dcs-faq-0730047.html +++ b/docs/dcs/umn/dcs-faq-0730047.html @@ -2,13 +2,108 @@

      Are Services Interrupted During Specification Modification?

      You are advised to change the instance specifications during off-peak hours because specification modification has the following impacts:

      -
      • Impact of scaling:
        • Single-node and master/standby
          • A DCS Redis 4.0/5.0/6.0 instance will be disconnected for several seconds and remain read-only for about 1 minute.
          • A DCS Redis 3.0 instance will be disconnected and remain read-only for 5 to 30 minutes.
          • For scaling up, only the memory of the instance is expanded. The CPU processing capability is not improved.
          • Data of single-node instances may be lost because they do not support data persistence. After scaling, check whether the data is complete and import data if required.
          • Backup records of master/standby instances cannot be used after scaling down.
          -
        • Cluster
          • If the shard quantity is not decreased, the instance can always be connected, but the CPU usage will increase, compromising performance by up to 20%, and the latency will increase during data migration.
          • During scaling up, new Redis Server nodes are added, and data is automatically balanced to the new nodes.
          • Nodes will be deleted if the shard quantity decreases. To prevent disconnection, ensure that the deleted nodes are not directly referenced in your application.
          • Ensure that the used memory of each node is less than 70% of the maximum memory per node of the new flavor. Otherwise, you cannot perform the scale-in.
          • If the memory becomes full during scaling due to a large amount of data being written, scaling will fail. Modify specifications during off-peak hours.
          • Scaling involves data migration. The latency for accessing the key being migrated increases. For a Redis Cluster instance, ensure that the client can properly process the MOVED and ASK commands. Otherwise, requests will fail.
          • Before scaling, perform cache analysis to ensure that no big keys (≥ 512 MB) exist in the instance. Otherwise, scaling may fail.
          • Backup records created before scaling cannot be restored.
          +

          Change of the Instance Type

          • Supported instance type changes:
            • From single-node to master/standby: Supported by Redis 3.0, and not by Redis 4.0/5.0/6.0.
            • From master/standby to Proxy Cluster: Supported by Redis 3.0, and not by Redis 4.0/5.0/6.0.

              If the data of a master/standby DCS Redis 3.0 instance is stored in multiple databases, or in non-DB0 databases, the instance cannot be changed to the Proxy Cluster type. A master/standby instance can be changed to the Proxy Cluster type only if its data is stored only on DB0.

              +
            • From cluster types to other types: Not supported.
            +
          • Impact of instance type changes:
            • From single-node to master/standby for a DCS Redis 3.0 instance:

              The instance cannot be connected for several seconds and remains read-only for about 1 minute.

              +
            • From master/standby to Proxy Cluster for a DCS Redis 3.0 instance:

              The instance cannot be connected and remains read-only for 5 to 30 minutes.

          -
          • Notes on changing the number of replicas of a DCS Redis instance:

            Deleting replicas interrupts connections. If your application cannot reconnect to Redis or handle exceptions, you need to restart the application after scaling.

            +
          +

          Scaling

          • The following table lists scaling options supported by different DCS instances. +
            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Table 1 Scaling options supported by different DCS instances

            Cache Engine

            +

            Single-Node

            +

            Master/Standby

            +

            Redis Cluster

            +

            Proxy Cluster

            +

            Redis 3.0

            +

            Scaling up/down

            +

            Scaling up/down

            +

            N/A

            +

            Scaling out

            +

            Redis 4.0

            +

            Scaling up/down

            +

            Scaling up/down

            +

            Scaling up/out, down/in, and replica quantity change

            +

            N/A

            +

            Redis 5.0

            +

            Scaling up/down

            +

            Scaling up/down

            +

            Scaling up/out, down/in, and replica quantity change

            +

            N/A

            +

            Redis 6.0

            +

            Scaling up/down

            +

            Scaling up/down

            +

            Scaling up/down, out/in, and replica quantity change

            +

            N/A

            +
            +
            +

            If the reserved memory of a DCS Redis 3.0 instance is insufficient, the scaling may fail when the memory is used up.

            +

            Change the replica quantity and capacity separately.

            +
            +
          • Impact of scaling +
            + + + + + + + + + + + + + +
            Table 2 Impact of scaling

            Instance Type

            +

            Scaling Type

            +

            Impact

            +

            Single-node and master/standby

            +

            Scaling up/down

            +
            • During scaling up, a DCS Redis 4.0/5.0/6.0 instance will be disconnected for several seconds and remain read-only for about 1 minute. During scaling down, connections will not be interrupted.
            • A DCS Redis 3.0 instance will be disconnected for several seconds and remain read-only for 5 to 30 minutes.
            • For scaling up, only the memory of the instance is expanded. The CPU processing capability is not improved.
            • Single-node DCS instances do not support data persistence. Scaling may compromise data reliability. After scaling, check whether the data is complete and import data if required. If there is important data, use a migration tool to migrate the data to other instances for backup.
            • For master/standby instances, backup records created before scale-down cannot be used after scale-down. If necessary, download the backup file in advance or back up the data again after scale-down.
            +

            Proxy Cluster and Redis Cluster

            +

            Scaling up/down

            +
            • Scaling out by adding shards:
              • Scaling out does not interrupt connections but will occupy CPU resources, decreasing performance by up to 20%.
              • If the shard quantity increases, new Redis Server nodes are added, and data is automatically balanced to the new nodes, increasing the access latency.
              +
            • Scaling in by reducing shards:
              • If the shard quantity decreases, nodes will be deleted. Before scaling in a Redis Cluster instance, ensure that the deleted nodes are not directly referenced in your application, to prevent service access exceptions.
              • Nodes will be deleted, and connections will be interrupted. If your application cannot reconnect to Redis or handle exceptions, you may need to restart the application after scaling.
              +
            • Scaling up by shard size without changing the shard quantity:
              • Insufficient memory of the node's VM will cause the node to migrate. Service connections may stutter and the instance may become read-only during the migration.
              • Increasing the node capacity when the VM memory is sufficient does not affect services.
              +
            • Scaling down by reducing the shard size without changing the shard quantity has no impact.
            • To scale down an instance, ensure that the used memory of each node is less than 70% of the maximum memory per node of the new flavor.
            • The flavor changing operation may involve data migration, and the latency may increase. For a Redis Cluster instance, ensure that the client can process the MOVED and ASK commands. Otherwise, the request will fail.
            • If the memory becomes full during scaling due to a large amount of data being written, scaling will fail.
            • Before scaling, check for big keys through Cache Analysis. Redis has a limit on key migration. If the instance has any single key greater than 512 MB, scaling will fail when big key migration between nodes times out. The bigger the key, the more likely the migration will fail.
            • Before scaling a Redis Cluster instance, ensure that automated cluster topology refresh is enabled. If it is disabled, you will need to restart the client after scaling. For details about how to enable automated refresh if you use Lettuce, see an example of using Lettuce to connect to a Redis Cluster instance.
            • Backup records created before scaling cannot be used. If necessary, download the backup file in advance or back up the data again after scaling.
            +
            +
          +
    • Set Instance Type to Single-node, Master/Standby, Proxy Cluster or Redis Cluster.
    • Set CPU Architecture to x86.
    • Set Replicas. The default value is 2 (including the master).

      This parameter is displayed only when you select Redis 4.0/5.0/6.0 and the instance type is master/standby or Redis Cluster.

      +
    • If 4.0 or later, and Proxy Cluster or Redis Cluster are selected, the Sharding parameter is displayed. Options:
      • Use default: Use the default sharding specifications.
      • Customize: Customize the size of each shard and then select corresponding instance specifications.
    • Select an AZ.

      If the instance type is master/standby, Proxy Cluster, or Redis Cluster, Standby AZ is displayed. Select a standby AZ for the standby node of the instance.

      • To accelerate access, deploy your instance and your application in the same AZ.
      • There are multiple AZs in each region. If resources are insufficient in an AZ, the AZ will be unavailable. In this case, select another AZ.
      @@ -31,7 +32,7 @@
      • On the instance creation page, the default parameter templates are used by default.
      • If you use a custom template, the selected cache engine version and instance type must match those of the template. For details about using custom templates, see Creating a Custom Parameter Template.

    • Choose whether to enable Auto Backup.

      This parameter is displayed only when the instance type is master/standby or cluster. For more information on how to configure a backup policy, see Overview.

      -

    • Specify the number of instances to create.
    • Enter an instance name.

      The value of Name contains at least 4 characters. When you create multiple instances at a time, the instances are named in the format of custom name-n, where n starts from 000 and is incremented by 1. For example, if you create two instances and set name to dcs_demo, the two instances are respectively named as dcs_demo-000 and dcs_demo-001.

      +

    • Specify the number of instances to create.
    • Enter an instance name.

      The value of Name contains at least 4 characters. When you create multiple instances at a time, the instances are named in the format of custom name-n, where n starts from 000 and is incremented by 1. For example, if you create two instances and set name to dcs_demo, the two instances are respectively named as dcs_demo-000 and dcs_demo-001.

    • Click More Settings to configure more parameters.

      1. Enter a description of the instance.
      2. Rename critical commands.

        Command Renaming is displayed for Redis 4.0 and later. Currently, you can only rename the COMMAND, KEYS, FLUSHDB, FLUSHALL, HGETALL, SCAN, HSCAN, SSCAN, and ZSCAN commands.

      3. Specify the maintenance window.

        Choose a window for DCS O&M personnel to perform maintenance on your instance. You will be contacted before any maintenance activities are performed.

      diff --git a/docs/dcs/umn/dcs-ug-0326011.html b/docs/dcs/umn/dcs-ug-0326011.html index 2988a990..9db1c0a9 100644 --- a/docs/dcs/umn/dcs-ug-0326011.html +++ b/docs/dcs/umn/dcs-ug-0326011.html @@ -32,7 +32,7 @@

      N/A

      -

      Scaling up

      +

      Scaling out

      Redis 4.0

      @@ -41,7 +41,7 @@

      Scaling up/down

      -

      Scaling up

      +

      Scaling up/out, down/in, and replica quantity change

      N/A

      @@ -52,7 +52,7 @@

      Scaling up/down

      -

      Scaling up

      +

      Scaling up/out, down/in, and replica quantity change

      N/A

      @@ -63,7 +63,7 @@

      Scaling up/down

      -

      Scaling up/down, out/in

      +

      Scaling up/down, out/in, and replica quantity change

      N/A

      @@ -72,14 +72,116 @@

    If the reserved memory of a DCS Redis 3.0 instance is insufficient, the scaling may fail when the memory is used up.

    +

    Change the replica quantity and capacity separately.

    -
  • Impact of scaling:
    • Single-node and master/standby
      • A DCS Redis 4.0/5.0/6.0 instance will be disconnected for several seconds and remain read-only for about 1 minute.
      • A DCS Redis 3.0 instance will be disconnected and remain read-only for 5 to 30 minutes.
      • For scaling up, only the memory of the instance is expanded. The CPU processing capability is not improved.
      • Data of single-node instances may be lost because they do not support data persistence. After scaling, check whether the data is complete and import data if required.
      • Backup records of master/standby instances cannot be used after scaling down.
      -
    • Cluster
      • If the shard quantity is not decreased, the instance can always be connected, but the CPU usage will increase, compromising performance by up to 20%, and the latency will increase during data migration.
      • During scaling up, new Redis Server nodes are added, and data is automatically balanced to the new nodes.
      • Nodes will be deleted if the shard quantity decreases. To prevent disconnection, ensure that the deleted nodes are not directly referenced in your application.
      • Ensure that the used memory of each node is less than 70% of the maximum memory per node of the new flavor. Otherwise, you cannot perform the scale-in.
      • If the memory becomes full during scaling due to a large amount of data being written, scaling will fail. Modify specifications during off-peak hours.
      • Scaling involves data migration. The latency for accessing the key being migrated increases. For a Redis Cluster instance, ensure that the client can properly process the MOVED and ASK commands. Otherwise, requests will fail.
      • Before scaling, perform cache analysis to ensure that no big keys (≥ 512 MB) exist in the instance. Otherwise, scaling may fail.
      • Backup records created before scaling cannot be restored.
      -
    -
  • Notes on changing the number of replicas of a DCS Redis instance:

    Deleting replicas interrupts connections. If your application cannot reconnect to Redis or handle exceptions, you need to restart the application after scaling.

    +
  • Impact of scaling +
    + + + + + + + + + + + + + +
    Table 2 Impact of scaling

    Instance Type

    +

    Scaling Type

    +

    Impact

    +

    Single-node and master/standby

    +

    Scaling up/down

    +
    • During scaling up, a DCS Redis 4.0/5.0/6.0 instance will be disconnected for several seconds and remain read-only for about 1 minute. During scaling down, connections will not be interrupted.
    • A DCS Redis 3.0 instance will be disconnected for several seconds and remain read-only for 5 to 30 minutes.
    • For scaling up, only the memory of the instance is expanded. The CPU processing capability is not improved.
    • Single-node DCS instances do not support data persistence. Scaling may compromise data reliability. After scaling, check whether the data is complete and import data if required. If there is important data, use a migration tool to migrate the data to other instances for backup.
    • For master/standby instances, backup records created before scale-down cannot be used after scale-down. If necessary, download the backup file in advance or back up the data again after scale-down.
    +

    Proxy Cluster and Redis Cluster

    +

    Scaling up/down

    +
    • Scaling out by adding shards:
      • Scaling out does not interrupt connections but will occupy CPU resources, decreasing performance by up to 20%.
      • If the shard quantity increases, new Redis Server nodes are added, and data is automatically balanced to the new nodes, increasing the access latency.
      +
    • Scaling in by reducing shards:
      • If the shard quantity decreases, nodes will be deleted. Before scaling in a Redis Cluster instance, ensure that the deleted nodes are not directly referenced in your application, to prevent service access exceptions.
      • Nodes will be deleted, and connections will be interrupted. If your application cannot reconnect to Redis or handle exceptions, you may need to restart the application after scaling.
      +
    • Scaling up by shard size without changing the shard quantity:
      • Insufficient memory of the node's VM will cause the node to migrate. Service connections may stutter and the instance may become read-only during the migration.
      • Increasing the node capacity when the VM memory is sufficient does not affect services.
      +
    • Scaling down by reducing the shard size without changing the shard quantity has no impact.
    • To scale down an instance, ensure that the used memory of each node is less than 70% of the maximum memory per node of the new flavor.
    • The flavor changing operation may involve data migration, and the latency may increase. For a Redis Cluster instance, ensure that the client can process the MOVED and ASK commands. Otherwise, the request will fail.
    • If the memory becomes full during scaling due to a large amount of data being written, scaling will fail.
    • Before scaling, check for big keys through Cache Analysis. Redis has a limit on key migration. If the instance has any single key greater than 512 MB, scaling will fail when big key migration between nodes times out. The bigger the key, the more likely the migration will fail.
    • Before scaling a Redis Cluster instance, ensure that automated cluster topology refresh is enabled. If it is disabled, you will need to restart the client after scaling. For details about how to enable automated refresh if you use Lettuce, see an example of using Lettuce to connect to a Redis Cluster instance.
    • Backup records created before scaling cannot be used. If necessary, download the backup file in advance or back up the data again after scaling.
    +
    +
-

Procedure

  1. Log in to the DCS console.
  2. Click in the upper left corner and select a region and a project.
  3. In the navigation pane, choose Cache Manager.
  1. Choose More > Modify Specifications in the row containing the DCS instance.
  2. On the Modify Specifications page, select the desired specification.
  3. Click Submit to start modifying the DCS instance.

    You can go to Background Tasks page to view the modification status. For more information, see Viewing Background Tasks.

    +

    Procedure

    1. Log in to the DCS console.
    2. Click in the upper left corner and select a region and a project.
    3. In the navigation pane, choose Cache Manager.
    1. Choose More > Modify Specifications in the row containing the DCS instance.
    2. On the Modify Specifications page, select the desired specification.

      For a master/standby DCS Redis 4.0/5.0 instance or a Redis Cluster DCS Redis 4.0/5.0/6.0 instance, you can choose to change by specification or replica quantity.

      +
      +

    3. Set Apply Change to Now or During maintenance.

      Select During maintenance if the modification interrupts connections.

      + +
      + + + + + + + + + + + + + +
      Table 3 Scenarios where specification modification interrupts connections

      Change

      +

      When Connections Are Interrupted

      +

      Scaling up a single-node or master/standby instance

      +

      Memory is increased from a size smaller than 8 GB to 8 GB or larger.

      +

      Scaling down a Proxy Cluster and Redis Cluster instance

      +

      The number of shards is decreased.

      +

      Deleting replicas

      +

      Replicas are deleted from a master/standby or Redis Cluster instance.

      +
      +
      +
      • If the modification does not interrupt connections, it will be applied immediately even if you select During maintenance.
      • The modification cannot be withdrawn once submitted. To reschedule a modification, you can change the maintenance window. The maintenance window can be changed up to three times.
      • Modifications on DCS Redis 3.0 instances can only be applied immediately.
      • If you apply the change during maintenance, the change starts at any time within the maintenance window, rather than at the start time of the window.
      • If a large amount of data needs to be migrated when you scale down a cluster instance, the operation may not be completed within the maintenance window.
      +
      +

    4. Click Next. In the dialog box that is displayed, click Yes.
    5. Confirm the change details and view the risk check result.

      If any risk is found in the check, the instance may fail to be modified. For details, see Table 4.

      + +
      + + + + + + + + + + + + + + + + + + + + + +
      Table 4 Risk check items

      Check Item

      +

      Reason for Check

      +

      Solution

      +

      Dataset memory distribution check

      +
      NOTE:

      This check item applies only to Proxy Cluster and Redis Cluster instances.

      +
      +

      Specification modification of a cluster instance involves data migration between nodes. If an instance has any key bigger than 512 MB, the modification will fail when big key migration between nodes times out.

      +

      If the instance dataset memory is unevenly distributed among nodes and the difference is greater than 512 MB, the instance has a big key and the change may fail.

      +

      Analyze big keys and Handle big keys before proceeding with the change.

      +

      Memory usage check

      +

      If the memory usage of a node is greater than 90%, keys may be evicted or the change may fail.

      +

      If the memory usage is too high, optimize the memory by optimizing big keys, scanning for expired keys, or deleting some keys.

      +

      Network input traffic check

      +
      NOTE:

      This check item applies only to single-node, and master/standby instances.

      +
      +

      The change may fail if the network input traffic is too heavy and the write buffer overflows.

      +

      Perform the change during off-peak hours.

      +

      CPU usage check

      +

      If the node CPU usage within 5 minutes is greater than 90%, the change may fail.

      +

      Perform the change during off-peak hours.

      +
      +
      +

    6. Click Submit to start modifying the DCS instance.

      You can go to Background Tasks page to view the modification status. For more information, see Viewing Background Tasks.

      Specification modification of a single-node or master/standby DCS instance takes about 5 to 30 minutes to complete, while that of a cluster DCS instance takes a longer time. After an instance is successfully modified, it changes to the Running state.
      • If the specification modification of a single-node DCS instance fails, the instance is temporarily unavailable for use. The specification remains unchanged. Some management operations (such as parameter configuration and specification modification) are temporarily not supported. After the specification modification is completed in the backend, the instance changes to the new specification and becomes available for use again.
      • If the specification modification of a master/standby or cluster DCS instance fails, the instance is still available for use with its original specifications. Some management operations (such as parameter configuration, backup, restoration, and specification modification) are temporarily not supported. Remember not to read or write more data than allowed by the original specifications; otherwise, data loss may occur.
      • After the specification modification is successful, the new specification of the instance takes effect.
      diff --git a/docs/dcs/umn/dcs-ug-0326019.html b/docs/dcs/umn/dcs-ug-0326019.html index 23020452..ee761c85 100644 --- a/docs/dcs/umn/dcs-ug-0326019.html +++ b/docs/dcs/umn/dcs-ug-0326019.html @@ -596,7 +596,7 @@
      NOTE:

      Slow queries caused by the MIGRATE, SLAVEOF, CONFIG, BGSAVE, and BGREWRITEAOF commands are not counted.

      -
      • 1: yes
      • 0: no
      +
      • 1: yes
      • 0: no

      DCS Redis instance

      @@ -784,7 +784,7 @@

      Instance Node Status

      -

      Status of instance nodes. If the status is normal, the value is 0. If the status is abnormal, the value is 1.

      +

      Status of instance nodes. If the status is normal, the value is 0. If the status is abnormal, the value is 1.

      -

      @@ -1210,7 +1210,7 @@
-

Redis Server Metrics of DCS Redis Instances

  • The Monitored Object column lists instances that support the corresponding metrics.
+

Redis Server Metrics of DCS Redis Instances

  • The Monitored Object column lists instances that support the corresponding metrics.
@@ -1378,7 +1378,7 @@

Memory Fragmentation Ratio

-

Current memory fragmentation, which is the ratio between used_memory_rss/used_memory.

+

Current memory fragmentation, which is the ratio between used_memory_rss/used_memory.

≥ 0

@@ -1598,7 +1598,7 @@
NOTE:

Slow queries caused by the MIGRATE, SLAVEOF, CONFIG, BGSAVE, and BGREWRITEAOF commands are not counted.

-
  • 1: yes
  • 0: no
+
  • 1: yes
  • 0: no

Redis Server of a cluster instance

Redis Server of a master/standby DCS Redis 4.0/5.0/6.0 instance

@@ -1624,7 +1624,7 @@

SADD

-

Number of SADD commands processed per second

+

Number of SADD commands processed per second

Unit: count/s

0–500,000

@@ -1639,7 +1639,7 @@

SMEMBERS

-

Number of SMEMBERS commands processed per second

+

Number of SMEMBERS commands processed per second

Unit: count/s

0–500,000

@@ -1667,7 +1667,7 @@

DEL

-

Number of DEL commands processed per second

+

Number of DEL commands processed per second

Unit: count/s

0–500,000

@@ -1682,7 +1682,7 @@

EXPIRE

-

Number of EXPIRE commands processed per second

+

Number of EXPIRE commands processed per second

Unit: count/s

0–500,000

@@ -1697,7 +1697,7 @@

GET

-

Number of GET commands processed per second

+

Number of GET commands processed per second

Unit: count/s

0–500,000

@@ -1712,7 +1712,7 @@

HDEL

-

Number of HDEL commands processed per second

+

Number of HDEL commands processed per second

Unit: count/s

0–500,000

@@ -1727,7 +1727,7 @@

HGET

-

Number of HGET commands processed per second

+

Number of HGET commands processed per second

Unit: count/s

0–500,000

@@ -1742,7 +1742,7 @@

HMGET

-

Number of HMGET commands processed per second

+

Number of HMGET commands processed per second

Unit: count/s

0–500,000

@@ -1757,7 +1757,7 @@

HMSET

-

Number of HMSET commands processed per second

+

Number of HMSET commands processed per second

Unit: count/s

0–500,000

@@ -1772,7 +1772,7 @@

HSET

-

Number of HSET commands processed per second

+

Number of HSET commands processed per second

Unit: count/s

0–500,000

@@ -1787,7 +1787,7 @@

MGET

-

Number of MGET commands processed per second

+

Number of MGET commands processed per second

Unit: count/s

0–500,000

@@ -1802,7 +1802,7 @@

MSET

-

Number of MSET commands processed per second

+

Number of MSET commands processed per second

Unit: count/s

0–500,000

@@ -1817,7 +1817,7 @@

SET

-

Number of SET commands processed per second

+

Number of SET commands processed per second

Unit: count/s

0–500,000

@@ -1861,7 +1861,7 @@
-

Proxy Metrics

  • The Monitored Object column lists instances that support the corresponding metrics.
  • Dimensions lists the metric dimensions.
+

Proxy Metrics

  • The Monitored Object column lists instances that support the corresponding metrics.
  • Dimensions lists the metric dimensions.
- + + + diff --git a/docs/dcs/umn/dcs-ug-190812001.html b/docs/dcs/umn/dcs-ug-190812001.html index 4fab9ae2..e2de2950 100644 --- a/docs/dcs/umn/dcs-ug-190812001.html +++ b/docs/dcs/umn/dcs-ug-190812001.html @@ -24,7 +24,7 @@ diff --git a/docs/dcs/umn/dcs-ug-210330002.html b/docs/dcs/umn/dcs-ug-210330002.html index c48af4b1..c48a0d9a 100644 --- a/docs/dcs/umn/dcs-ug-210330002.html +++ b/docs/dcs/umn/dcs-ug-210330002.html @@ -8,9 +8,8 @@ -

Expired Key Scan

DCS integrates these strategies and allows you to periodically release the memory used by expired keys. You can configure scheduled scans on the master nodes of your instances. The entire keyspace is traversed during the scans, triggering Redis to check whether the keys have expired and to remove expired keys if any.

-

This function is supported only by DCS Redis 4.0, 5.0, and 6.0 instances.

-

Perform expired key scans during off-peak hours to avoid 100% CPU usage.

+

Expired DCS Key Scan

DCS integrates these strategies and allows you to periodically release the memory used by expired keys. You can configure scheduled scans on the master nodes of your instances. The entire keyspace is traversed during the scans, triggering Redis to check whether the keys have expired and to remove expired keys if any.

+
  • This function is supported only by DCS Redis 4.0, 5.0, and 6.0 instances.
  • Perform expired key scans during off-peak hours to avoid 100% CPU usage.
  • Released expired keys cannot be queried.

Procedure

  1. Log in to the DCS console.
  2. Click in the upper left corner of the management console and select the region where your instance is located.
  3. In the navigation pane, choose Cache Manager.
  4. Click the name of a DCS Redis instance.
  5. Choose Analysis and Diagnosis > Cache Analysis.
  6. On the Expired Key Scan tab page, scan for expired keys and release them.

    • Click Start Scanning to scan for expired keys immediately.
    • Enable Scheduled to schedule automatic scans at a specified time. For details about how to configure automatic scans, see Scheduling Automatic Scans.
Table 5 Proxy metrics of Proxy Cluster DCS Redis 3.0 instances

Metric ID

diff --git a/docs/dcs/umn/dcs-ug-0326026.html b/docs/dcs/umn/dcs-ug-0326026.html index 3667d075..cdfd101c 100644 --- a/docs/dcs/umn/dcs-ug-0326026.html +++ b/docs/dcs/umn/dcs-ug-0326026.html @@ -8,7 +8,12 @@

2024-02-17

+

2024-05-10

+

Updated Creating a DCS Redis Instance and Modifying DCS Instance Specifications. Shard size can be customized for Redis Cluster instances.

+

2024-02-17

Updated Exporting DCS Instance List and How Do I Access a DCS Redis Instance Through Redis Desktop Manager?.

IP Address/Range

A maximum of 20 IP addresses or IP address ranges can be added to an instance. Separate multiple IP addresses or IP address ranges with commas.

-

Unsupported IP address and IP address range: 0.0.0.0 and 0.0.0/0.

+

Unsupported IP address and IP address range: 0.0.0.0 and 0.0.0.0/0.

10.10.10.1,10.10.10.10