diff --git a/umn/source/_static/images/en-us_image_0000001424936648.png b/umn/source/_static/images/en-us_image_0000001424936648.png deleted file mode 100644 index 1997ee7..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001424936648.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001424936724.png b/umn/source/_static/images/en-us_image_0000001424936724.png deleted file mode 100644 index 3268ede..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001424936724.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001425096592.png b/umn/source/_static/images/en-us_image_0000001425096592.png new file mode 100644 index 0000000..440b60b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001425096592.png differ diff --git a/umn/source/_static/images/en-us_image_0000001425096656.png b/umn/source/_static/images/en-us_image_0000001425096656.png deleted file mode 100644 index 69d2a00..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001425096656.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001425254180.png b/umn/source/_static/images/en-us_image_0000001425254180.png deleted file mode 100644 index 8622ce3..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001425254180.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001425254200.png b/umn/source/_static/images/en-us_image_0000001425254200.png deleted file mode 100644 index f704ef9..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001425254200.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001425254296.png b/umn/source/_static/images/en-us_image_0000001425254296.png deleted file mode 100644 index 5975e3c..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001425254296.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001425413568.png b/umn/source/_static/images/en-us_image_0000001425413568.png deleted file mode 100644 index 1335cb7..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001425413568.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001474893253.png b/umn/source/_static/images/en-us_image_0000001474893253.png deleted file mode 100644 index fcce958..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001474893253.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001475012977.png b/umn/source/_static/images/en-us_image_0000001475012977.png deleted file mode 100644 index f9b1b69..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001475012977.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001475013001.png b/umn/source/_static/images/en-us_image_0000001475013001.png deleted file mode 100644 index f17d858..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001475013001.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001475132933.png b/umn/source/_static/images/en-us_image_0000001475132933.png deleted file mode 100644 index f469e3c..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001475132933.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001475132957.png b/umn/source/_static/images/en-us_image_0000001475132957.png deleted file mode 100644 index 9899d92..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001475132957.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001475132965.png b/umn/source/_static/images/en-us_image_0000001475132965.png index b8e1047..6edc879 100644 Binary files a/umn/source/_static/images/en-us_image_0000001475132965.png and b/umn/source/_static/images/en-us_image_0000001475132965.png differ diff --git a/umn/source/_static/images/en-us_image_0000001475133049.png b/umn/source/_static/images/en-us_image_0000001475133049.png deleted file mode 100644 index deefe45..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001475133049.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001475213493.png b/umn/source/_static/images/en-us_image_0000001475213493.png deleted file mode 100644 index ace09f6..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001475213493.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001475213513.png b/umn/source/_static/images/en-us_image_0000001475213513.png deleted file mode 100644 index d921a6e..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001475213513.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001475213541.png b/umn/source/_static/images/en-us_image_0000001475213541.png deleted file mode 100644 index ea7c577..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001475213541.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001475213569.png b/umn/source/_static/images/en-us_image_0000001475213569.png deleted file mode 100644 index c8bede7..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001475213569.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001475213609.png b/umn/source/_static/images/en-us_image_0000001475213609.png deleted file mode 100644 index 3112e4e..0000000 Binary files a/umn/source/_static/images/en-us_image_0000001475213609.png and /dev/null differ diff --git a/umn/source/_static/images/en-us_image_0000001580548376.png b/umn/source/_static/images/en-us_image_0000001580548376.png new file mode 100644 index 0000000..e96c092 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001580548376.png differ diff --git a/umn/source/_static/images/en-us_image_0000001581027344.png b/umn/source/_static/images/en-us_image_0000001581027344.png new file mode 100644 index 0000000..8106e7a Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001581027344.png differ diff --git a/umn/source/_static/images/en-us_image_0000001583192212.png b/umn/source/_static/images/en-us_image_0000001583192212.png new file mode 100644 index 0000000..dd36d9b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001583192212.png differ diff --git a/umn/source/_static/images/en-us_image_0000001628646381.png b/umn/source/_static/images/en-us_image_0000001628646381.png new file mode 100644 index 0000000..b0aa425 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001628646381.png differ diff --git a/umn/source/_static/images/en-us_image_0000001631228369.png b/umn/source/_static/images/en-us_image_0000001631228369.png new file mode 100644 index 0000000..a82c89e Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001631228369.png differ diff --git a/umn/source/_static/images/en-us_image_0000001631956069.png b/umn/source/_static/images/en-us_image_0000001631956069.png new file mode 100644 index 0000000..65cb357 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001631956069.png differ diff --git a/umn/source/account_management/creating_an_account.rst b/umn/source/account_management/creating_an_account.rst index f2c4d6c..aaadd16 100644 --- a/umn/source/account_management/creating_an_account.rst +++ b/umn/source/account_management/creating_an_account.rst @@ -31,10 +31,10 @@ Procedure | | | | | - Must be case-sensitive. | | | - Can include 8 to 32 characters. | - | | - The password must contain at least three types of lowercase letters, uppercase letters, digits, and special characters ~! ``@#%^*-_=+?`` | + | | - Must contain at least three of the following character types: letters, digits, and special characters ``~!@#%^*-_=+?`` | | | - Do not use weak or easy-to-guess passwords. | +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Confirm Password | None | + | Confirm Password | The confirm password must be the same as the entered password. | +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+ | Schema | Schema to be associated with the account. You can select an existing schema from the drop-down list. | | | | diff --git a/umn/source/backups_and_restorations/consistent_backups.rst b/umn/source/backups_and_restorations/consistent_backups.rst deleted file mode 100644 index 5132d53..0000000 --- a/umn/source/backups_and_restorations/consistent_backups.rst +++ /dev/null @@ -1,8 +0,0 @@ -:original_name: ddm_12_0005.html - -.. _ddm_12_0005: - -Consistent Backups -================== - -DDM replaces its consistent backup function with a new feature called "metadata restore" under Data Restoration. After the change, existing consistency backups cannot be used for restoration, and operations related to consistency backups are hidden You can restore data by referring to section "Restoring Metadata." If you have any questions, contact DDM technical support. diff --git a/umn/source/backups_and_restorations/index.rst b/umn/source/backups_and_restorations/index.rst index 4138547..6315e6c 100644 --- a/umn/source/backups_and_restorations/index.rst +++ b/umn/source/backups_and_restorations/index.rst @@ -5,7 +5,6 @@ Backups and Restorations ======================== -- :ref:`Consistent Backups ` - :ref:`Restoring Data to a New Instance ` - :ref:`Restoring Metadata ` @@ -13,6 +12,5 @@ Backups and Restorations :maxdepth: 1 :hidden: - consistent_backups restoring_data_to_a_new_instance restoring_metadata diff --git a/umn/source/backups_and_restorations/restoring_data_to_a_new_instance.rst b/umn/source/backups_and_restorations/restoring_data_to_a_new_instance.rst index 13ade00..bf07600 100644 --- a/umn/source/backups_and_restorations/restoring_data_to_a_new_instance.rst +++ b/umn/source/backups_and_restorations/restoring_data_to_a_new_instance.rst @@ -19,8 +19,6 @@ Constraints - Restoring data to a new DDM instance will overwrite data on it and cause the instance to be unavailable during restoration. - The new RDS for MySQL instances must have the same or later versions than the original ones, and their storage space must be greater than or equal to that of the original ones. -- Data cannot be restored to a local RDS for MySQL instance. -- Data cannot be restored to an RDS for MySQL instance that uses SSDs for storage. - Restoration is not supported if the DDM instance is in the primary network and the destination RDS for MySQL instance is in the extended network. - The source DDM instance must be of the version 2.3.2.11 or later, and the destination DDM instance must be of the version 3.0.8 or later. - Time points that data can be restored to depend on the backup policy set on original data nodes. @@ -58,4 +56,4 @@ Procedure #. On the displayed **Restore to New Instance** page, specify a time range and a point in time and select the DDM instance created in :ref:`1 ` as the destination DDM instance. -#. Select the RDS for MySQL instances created in :ref:`2 ` as destination DB instances and check the confirmation box. Click **OK**. Wait for 1 to 3 minutes for the data restoration to complete. +#. Select the RDS for MySQL instances created in :ref:`2 ` as destination instances and check the confirmation box. Click **OK**. Wait for 1 to 3 minutes for the data restoration to complete. diff --git a/umn/source/backups_and_restorations/restoring_metadata.rst b/umn/source/backups_and_restorations/restoring_metadata.rst index 9c5afbe..eeec871 100644 --- a/umn/source/backups_and_restorations/restoring_metadata.rst +++ b/umn/source/backups_and_restorations/restoring_metadata.rst @@ -38,13 +38,13 @@ Restoring Metadata to a Point in Time Create a DDM instance. For details, see :ref:`Creating a DDM Instance `. -#. In the DDM instance list, locate the newly-created instance and click its name. +#. In the DDM instance list, locate the instance that needs to be restored and click its name. #. In the navigation pane on the left, choose **Backups & Restorations**. Click **Restore Metadata**. #. Specify a time point. DDM will select an appropriate DDM metadata backup closest to the time point. -#. Select the destination instance created in :ref:`1 `. If there are no instances available, create an instance by referring to :ref:`Creating a DDM Instance `. +#. Select the destination DDM instance created in :ref:`1 `. #. Select the RDS for MySQL instance that has PITR completed and click **OK**. diff --git a/umn/source/conf.py b/umn/source/conf.py index 86ecdee..35bfb96 100644 --- a/umn/source/conf.py +++ b/umn/source/conf.py @@ -18,7 +18,7 @@ import os import sys extensions = [ - 'otcdocstheme' + 'otcdocstheme', ] otcdocs_auto_name = False diff --git a/umn/source/connection_management/changing_a_database_port.rst b/umn/source/connection_management/changing_a_database_port.rst index 8e0ade1..08e41b0 100644 --- a/umn/source/connection_management/changing_a_database_port.rst +++ b/umn/source/connection_management/changing_a_database_port.rst @@ -19,13 +19,9 @@ Procedure For DDM instances, the database port number ranges from 1025 to 65534 except for ports 1033, 7009, 8888, and 12017 because they are in use by DDM. The default value is **5066**. - - Specify a new port number and click |image2|. + - Click |image2|. - - In the dialog box, click **Yes**. - - Changing the database port requires a restart of the DDM instance. - - - In the dialog box, click **No**. + Changing the database port requires a restart of the DDM instance. To continue the change, click **Yes** in the displayed dialog box. To cancel the change, click **No**. - To cancel the change, click |image3|. diff --git a/umn/source/connection_management/configuring_access_control.rst b/umn/source/connection_management/configuring_access_control.rst index a06087a..6cd9b0e 100644 --- a/umn/source/connection_management/configuring_access_control.rst +++ b/umn/source/connection_management/configuring_access_control.rst @@ -13,13 +13,10 @@ DDM supports load balancing by default, but some regions may not support. If an Procedure --------- -.. important:: +#. Log in to the DDM console and choose **Instances** in the navigation pane. In the instance list, locate the DDM instance that you want to configure access control for, and click its name. - After a read-only group is created, the entry for configuring access control will be moved to the operation column of the group. +#. On the displayed **Basic Information** page, in the **Network Information** area, enable **Access Control**. -#. On the DDM console, choose **Instances** in the navigation pane. In the instance list, locate the DDM instance that you want to scale out, and click its name. On the displayed **Basic Information** page, enable **Access Control** in the **Network Information** area. -#. Click **Configure**. In the **Configure Access Control** dialog box, specify **Access Policy**, enter the required IP addresses, and click **OK**. + After a read-only group is created for a DDM instance, you can enable **Access Control** in the **Group Information** area on the **Basic Information** page. - .. note:: - - If read/write splitting is enabled, access control only takes effect for groups. +#. Click **Configure** on the right of **Access Control**. In the **Configure Access Control** dialog box, specify **Access Policy**, enter the required IP addresses, and click **OK**. diff --git a/umn/source/data_node_management/configuring_read_weights.rst b/umn/source/data_node_management/configuring_read_weights.rst index f089c7c..c50e586 100644 --- a/umn/source/data_node_management/configuring_read_weights.rst +++ b/umn/source/data_node_management/configuring_read_weights.rst @@ -5,21 +5,35 @@ Configuring Read Weights ======================== +If one DDM instance is associated with multiple data nodes, you can synchronize read weight settings of the first data node to other data nodes. + Prerequisites ------------- You have logged in to the DDM console. -Scenarios ---------- +Precautions +----------- -If one DDM instance is associated with multiple data nodes, you can synchronize read weight settings of the first data node to other data nodes. +The read weight can be 0 to 100. Procedure --------- -#. In the instance list, locate the DDM instance whose data nodes you want to configure read weights for, and click its name. -#. Choose **Data Nodes** in the left navigation pane and click **Configure Read Weight**. -#. In the displayed **Configure Read Weight** dialog box, set the required parameters and click **OK**. -#. Wait the request to configure read weights is submitted. -#. Check whether read weights of read replicas are updated. +#. In the instance list, locate the DDM instance whose data nodes you want to configure read weights for. +#. Click the instance name to enter the **Basic Information** page. +#. In the navigation pane, choose **Data Nodes**. +#. Set read weights for associated instances. + + - Set read weights for multiple instances. + + If you want to set read weights for multiple instances at a time, click **Configure Read Weight** on the **Data Nodes** page. + + In the displayed dialog box, click **Synchronize** to apply the read weight of the first instance to other instances. This operation requires that read weights of all instances should be the same. Otherwise, you need to manually configure a read weight for each instance. + + - Set a read weight for an instance. + + If you want to set the read weight of an instance, locate the target instance and click **Configure Read Weight** in the **Operation** column. + +#. Click **Yes**. +#. After the read weight is configured successfully, you can view the updated read weight on the **Data Nodes** page. diff --git a/umn/source/data_node_management/index.rst b/umn/source/data_node_management/index.rst index c7f94e0..a261568 100644 --- a/umn/source/data_node_management/index.rst +++ b/umn/source/data_node_management/index.rst @@ -6,8 +6,9 @@ Data Node Management ==================== - :ref:`Overview ` -- :ref:`Configuring Read Weights ` - :ref:`Synchronizing Data Node Information ` +- :ref:`Configuring Read Weights ` +- :ref:`Splitting Read and Write Requests ` - :ref:`Reloading Table Data ` .. toctree:: @@ -15,6 +16,7 @@ Data Node Management :hidden: overview - configuring_read_weights synchronizing_data_node_information + configuring_read_weights + splitting_read_and_write_requests reloading_table_data diff --git a/umn/source/data_node_management/reloading_table_data.rst b/umn/source/data_node_management/reloading_table_data.rst index d0c0995..1495cca 100644 --- a/umn/source/data_node_management/reloading_table_data.rst +++ b/umn/source/data_node_management/reloading_table_data.rst @@ -19,7 +19,4 @@ Procedure --------- #. Choose **Instances** on the left navigation pane, in the instance list, locate the instance whose information is changed and click the instance name. - #. Choose **More** > **Reload Table Data** in the **Operation** column. - - A message is returned, indicating that table data of instance *XXX* has been reloaded. diff --git a/umn/source/data_node_management/splitting_read_and_write_requests.rst b/umn/source/data_node_management/splitting_read_and_write_requests.rst new file mode 100644 index 0000000..ae0450c --- /dev/null +++ b/umn/source/data_node_management/splitting_read_and_write_requests.rst @@ -0,0 +1,39 @@ +:original_name: ddm_06_0012.html + +.. _ddm_06_0012: + +Splitting Read and Write Requests +================================= + +Read/write splitting offloads read requests from primary instances to read replicas on a data node at a ratio, improving processing of read/write transactions. This function is transparent to applications, and you do not need to modify service code. Configure read weights of primary instances and their read replicas on the DDM console, and read traffic will be distributed at the preset ratio and write traffic will be forwarded to the primary instances by default. The ratio is generally based on service requirements and loads of associated data nodes. + +Data is asynchronously replicated from the primary instance to read replicas, and there is a delay between them in milliseconds. Set weights of the primary instance and its read replicas to 0 and 100, respectively, that is, distribute all read requests to read replicas if sub-second latency is allowed for read requests and these requests require high query costs that may impact read/write transactions. In other scenarios, adjust the ratio based on service requirements. + +Precautions +----------- + +- The SELECT statements that contain hints or modify data in transactions are all executed by the primary instances. +- If the associated primary instance becomes faulty and parameter **Seconds_Behind_Master** on its read replicas is set to **NULL**, read-only requests are still forwarded to the primary instance. Recover the faulty instance as soon as possible. + +Prerequisites +------------- + +- You have created a DDM instance and a data node with read replicas. +- You have created a schema. + +Procedure +--------- + +#. On the **Instances** page, locate the required DDM instance and click its name. +#. In the navigation pane, choose **Data Nodes**. +#. On the displayed page, locate the target instance and click **Configure Read Weight** in the **Operation** column. + + - The read weight ranges from 0 to 100. + + - If you create a read replica for the associated instance, the read replica will handle all separated read requests by default. To re-assign read/write requests, you can configure read weights of the associated instance and its read replica. + + - After the read weights are configured, the primary instance and its read replica will handle read requests according to the ratio: Read weight of primary instance/Total read weights of primary instance and read replica: Read weight of read replica/Total read weights of primary instance and read replica. + + For example: If an RDS for MySQL instance contains one primary instance and one read replica and read weights of the primary instance and its read replica are 20 and 80 respectively, they will process read requests in the ratio of 1:4. In other words, the primary instance processes 1/4 of read requests and read replica processes 3/4. Write requests are automatically routed to the primary instance. + +#. After the read weights are configured successfully, you can view the weights on the **Data Nodes** page. diff --git a/umn/source/faqs/connection_management/how_can_i_handle_garbled_characters_generated_when_i_connect_a_mysql_instance_to_a_ddm_instance.rst b/umn/source/faqs/connection_management/how_can_i_handle_garbled_characters_generated_when_i_connect_a_mysql_instance_to_a_ddm_instance.rst index d2c3c41..6722795 100644 --- a/umn/source/faqs/connection_management/how_can_i_handle_garbled_characters_generated_when_i_connect_a_mysql_instance_to_a_ddm_instance.rst +++ b/umn/source/faqs/connection_management/how_can_i_handle_garbled_characters_generated_when_i_connect_a_mysql_instance_to_a_ddm_instance.rst @@ -13,4 +13,4 @@ Example: .. code-block:: - mysql -h 127.0.0.1 -P 5066 -D database --default-character-set=utf8 -u ddmuser -p password + mysql -h 127.0.0.1 -P 5066 -D database --default-character-set=utf8 -u ddmuser diff --git a/umn/source/faqs/ddm_usage/what_should_i_do_if_an_error_message_is_returned_when_i_specify_an_auto-increment_primary_key_during_migration.rst b/umn/source/faqs/ddm_usage/what_should_i_do_if_an_error_message_is_returned_when_i_specify_an_auto-increment_primary_key_during_migration.rst index 7d13b01..4f1476d 100644 --- a/umn/source/faqs/ddm_usage/what_should_i_do_if_an_error_message_is_returned_when_i_specify_an_auto-increment_primary_key_during_migration.rst +++ b/umn/source/faqs/ddm_usage/what_should_i_do_if_an_error_message_is_returned_when_i_specify_an_auto-increment_primary_key_during_migration.rst @@ -9,4 +9,4 @@ Execute the following SQL statement to modify the start value of the auto-increm .. code-block:: - ALTER SEQUENCE Database name.SEQ name START WITH New start value + ALTER SEQUENCE . START WITH diff --git a/umn/source/faqs/rds-related_questions/what_risky_operations_on_rds_for_mysql_will_affect_ddm.rst b/umn/source/faqs/rds-related_questions/what_risky_operations_on_rds_for_mysql_will_affect_ddm.rst index 2830cfa..3a0c1fa 100644 --- a/umn/source/faqs/rds-related_questions/what_risky_operations_on_rds_for_mysql_will_affect_ddm.rst +++ b/umn/source/faqs/rds-related_questions/what_risky_operations_on_rds_for_mysql_will_affect_ddm.rst @@ -16,6 +16,8 @@ What Risky Operations on RDS for MySQL Will Affect DDM? +============================================+========================================================================+==================================================================================================================================================================================================+ | Operations on the RDS for MySQL console | Deleting an RDS for MySQL instance | After an RDS for MySQL instance is deleted, all schemas and logical tables of the DDM instance associated with the RDS instance become unavailable. | +--------------------------------------------+------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | Stopping an RDS for MySQL instance | After an RDS for MySQL instance is stopped, all schemas and logical tables of the DDM instance associated with the RDS instance become unavailable. | + +--------------------------------------------+------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | Performing the primary/standby switchover of an RDS for MySQL instance | RDS for MySQL may be intermittently interrupted during the primary/standby switchover. In addition, a small amount of data may be lost in case of long delay in primary/standby synchronization. | | | | | | | | - Creating schemas or logical tables is not allowed on DDM during the primary/standby switchover of the RDS for MySQL instance. | @@ -23,7 +25,7 @@ What Risky Operations on RDS for MySQL Will Affect DDM? +--------------------------------------------+------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | Restarting an RDS for MySQL instance | The restart of an RDS for MySQL instance makes itself unavailable and will also affect the associated DDM instance. | +--------------------------------------------+------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | Resetting a password | After the RDS for MySQL instance password is reset, enter the new password on DDM when you create a schema. | + | | Resetting a password | After the password of an RDS for MySQL instance is reset, enter the new password on the **DB Instance Connection** page when creating a DDM schema. | +--------------------------------------------+------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | Modifying a parameter template | The following parameters are set to fixed values. If their values are modified, DDM will not function properly. | | | | | diff --git a/umn/source/getting_started/overview.rst b/umn/source/getting_started/overview.rst index 7d96558..2b359f0 100644 --- a/umn/source/getting_started/overview.rst +++ b/umn/source/getting_started/overview.rst @@ -22,7 +22,7 @@ Process of Using DDM :ref:`Step 4: Log In to the DDM Schema ` -.. figure:: /_static/images/en-us_image_0000001475213541.png +.. figure:: /_static/images/en-us_image_0000001628646381.png :alt: **Figure 1** Flowchart for using DDM **Figure 1** Flowchart for using DDM diff --git a/umn/source/getting_started/step_1_create_a_ddm_instance_and_an_rds_for_mysql_instance.rst b/umn/source/getting_started/step_1_create_a_ddm_instance_and_an_rds_for_mysql_instance.rst index 1d42683..f72d67f 100644 --- a/umn/source/getting_started/step_1_create_a_ddm_instance_and_an_rds_for_mysql_instance.rst +++ b/umn/source/getting_started/step_1_create_a_ddm_instance_and_an_rds_for_mysql_instance.rst @@ -41,7 +41,6 @@ Procedure | | - Must start with a letter. | | | - Must be 4 to 64 characters long. | | | - Can contain only letters, digits, and hyphens (-). | - | | - Cannot contain other special characters. | +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Node Class | Class of the DDM instance node. You can select **General-enhanced** or **Kunpeng general computing-plus** and then specify a node class. | | | | @@ -80,15 +79,17 @@ Procedure #. To view and manage the instance, go to the **Instances** page. - The default database port is **5066** and cab be changed after a DDM instance is created. + The default database port is **5066** and can be changed after a DDM instance is created. For details, see :ref:`Changing a Database Port `. #. Switch to the RDS console, click **Create** **DB Instance** in the upper right corner, specify the required information, and click **Next**. + For details about how to create an RDS for MySQL instance, see `Create a DB Instance `__. + .. caution:: - The RDS for MySQL instance you will buy must be in the same VPC and subnet as your DDM instance. If they are not in the same subnet, configure routes to ensure network connectivity. + The RDS for MySQL instance must be in the same VPC and subnet as your DDM instance. If they are not in the same subnet, configure routes to ensure network connectivity. #. After confirming the settings, click **Submit**. Wait 1 to 3 minutes till the instance is created. diff --git a/umn/source/getting_started/step_2_create_a_schema_and_associate_it_with_an_rds_for_mysql_instance.rst b/umn/source/getting_started/step_2_create_a_schema_and_associate_it_with_an_rds_for_mysql_instance.rst index 3d90901..fbc30ec 100644 --- a/umn/source/getting_started/step_2_create_a_schema_and_associate_it_with_an_rds_for_mysql_instance.rst +++ b/umn/source/getting_started/step_2_create_a_schema_and_associate_it_with_an_rds_for_mysql_instance.rst @@ -5,55 +5,35 @@ Step 2: Create a Schema and Associate It with an RDS for MySQL Instance ======================================================================= -You can create a schema on the **Instances** or **Schemas** page. This section uses the **Instances** page as an example to describe how to create a schema. - - -.. figure:: /_static/images/en-us_image_0000001475213493.png - :alt: **Figure 1** Instances page - - **Figure 1** Instances page - - -.. figure:: /_static/images/en-us_image_0000001475132933.png - :alt: **Figure 2** Schemas page - - **Figure 2** Schemas page - Procedure --------- #. Log in to the DDM console, and in the navigation pane, choose **Instances**. In the instance list, locate the required DDM instance and click **Create Schema** in the **Operation** column. - #. On the displayed page, specify a sharding mode, enter a schema name, set the number of shards, select the required DDM accounts, and click **Next**. + .. table:: **Table 1** Parameter description - .. figure:: /_static/images/en-us_image_0000001475012977.png - :alt: **Figure 3** Creating a schema + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+==========================================================================================================================================================================================================================+ + | Sharding | - **Sharded**: indicates that one schema can be associated with multiple data nodes, and all shards will be evenly distributed across the nodes. | + | | - **Unsharded**: indicates that one schema can be associated with only one data node, and only one shard can be created on the data node. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Schema | The name contains 2 to 48 characters and must start with a lowercase letter. Only lowercase letters, digits, and underscores (_) are allowed. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Account | The DDM account that needs to be associated with the schema. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Data Nodes | Select only the data nodes that are in the same VPC as your DDM instance and not in use by other data nodes. DDM will create databases on the selected data nodes without affecting their existing databases and tables. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Shards | The total shards are the shards on all data nodes. There cannot be more data nodes than there are shards in the schema. Each data node must have at least one shard assigned. Recommended shards per data node: 8 to 64. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - **Figure 3** Creating a schema - - .. note:: - - - DDM supports two sharding options: - - - **Sharded**: indicates that one schema can be associated with multiple data nodes, and all shards will be evenly distributed across the nodes. - - **Unsharded**: indicates that one schema can be associated with only one data node, and only one shard can be created on that instance. - - - **Schema Name**: indicates the name of the schema. The name contains 2 to 48 characters and must start with a lowercase letter. Only lowercase letters, digits, and underscores (_) are allowed. - - **Shards**: indicates the total number of shards that can be created in the schema. The number of shards on a data node cannot exceed 64. If more than 64 shards are required, contact DDM technical support. - -#. Enter a database account with the required permissions and click **Test Connection**. +#. On the **DB Instance Connection** page, enter a database account with the required permissions and click **Test Connection**. .. note:: Required permissions: SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER WITH GRANT OPTION - Create a special account and assign it the above permissions as required. Then use the account to create a schema for later operations. - - - .. figure:: /_static/images/en-us_image_0000001425254180.png - :alt: **Figure 4** Testing the connection - - **Figure 4** Testing the connection + You can create a database account for the RDS for MySQL instance and assign it the above permissions in advance. #. After the test becomes successful, click **Finish**. diff --git a/umn/source/getting_started/step_3_create_a_ddm_account.rst b/umn/source/getting_started/step_3_create_a_ddm_account.rst index 1691adf..307a234 100644 --- a/umn/source/getting_started/step_3_create_a_ddm_account.rst +++ b/umn/source/getting_started/step_3_create_a_ddm_account.rst @@ -24,10 +24,10 @@ Procedure | Password | Password of the account. The password: | | | | | | - Can include 8 to 32 characters. | - | | - Must contain at least three of the following character types: letters, digits, swung dashes (~), and exclamation marks (!). @ # % ^ \* - \_ = + ? | + | | - Must contain at least three of the following character types: letters, digits, and special characters ``~!@#%^*-_=+?`` | | | - Cannot be a weak password. It cannot be overly simple and easily guessed. | +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Confirm Password | ``-`` | + | Confirm Password | The confirm password must be the same as the entered password. | +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+ | Schema | Schema to be associated with the DDM account. You can select an existing schema from the drop-down list. | | | | diff --git a/umn/source/getting_started/step_4_log_in_to_the_ddm_schema.rst b/umn/source/getting_started/step_4_log_in_to_the_ddm_schema.rst index e13135a..c38b3b8 100644 --- a/umn/source/getting_started/step_4_log_in_to_the_ddm_schema.rst +++ b/umn/source/getting_started/step_4_log_in_to_the_ddm_schema.rst @@ -82,12 +82,12 @@ Using the MySQL CLI to Log In to a Schema .. code-block:: - mysql -h ${DDM_SERVER_ADDRESS} -P${DDM_SERVER_PORT} -u${DDM_USER} -p [-D${DDM_DBNAME}] [--default-character-set=utf8][--default_auth=mysql_native_password] + mysql -h ${DDM_SERVER_ADDRESS} -P ${DDM_SERVER_PORT} -u ${DDM_USER} -p [-D ${DDM_DBNAME}] [--default -character -set=utf8][--default_auth=mysql_native_password] .. table:: **Table 1** Parameter description +------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------+ - | Example Parameter | Description | Example Value | + | Parameter | Description | Example Value | +====================================+========================================================================================================================================================+=======================+ | DDM_SERVER_ADDRESS | IP address of the DDM instance | 192.168.0.200 | +------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------+ @@ -101,18 +101,16 @@ Using the MySQL CLI to Log In to a Schema | | | | | | Configure this parameter if garbled characters are displayed during parsing due to inconsistency between MySQL connection code and actually used code. | | +------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------+ - | default_auth=mysql_native_password | The password authentication plug-in is used by default. | ``-`` | + | default_auth=mysql_native_password | (Optional) The password authentication plug-in is used by default. | ``-`` | + | | | | + | | If you use the MySQL 8.0 client, this parameter is required. | | +------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------+ - .. note:: - - If you use the MySQL 8.0 client, set **default_auth** to **mysql_native_password**. - #. View the command output. The following is an example output of running a MySQL command in the Windows CLI. .. code-block:: - C:\Users\testDDM>mysql -h192.168.0.200 -P5066 -Ddb_5133 -udbuser01 -p + C:\Users\testDDM>mysql -h 192.168.0.200 -P 5066 -D db_5133 -u dbuser01 -p Enter password: Reading table information for completion of table and column names You can turn off this feature to get a quicker startup with -A @@ -144,7 +142,7 @@ Using a JDBC Driver to Log In to a Schema .. note:: - JDBC drivers 5.1.35 to 5.1.45 are recommended. + JDBC drivers 5.1.49 or later are recommended. #. Create a database connection. @@ -174,7 +172,7 @@ Using a JDBC Driver to Log In to a Schema .. code-block:: - loadBalanceAutoCommitStatementThreshold=5&loadBalanceHostRemovalGracePeriod=15000&loadBalanceBlacklistTimeout=60000&loadBalancePingTimeout=5000&retriesAllDown=10&connectTimeout=10000"; + loadBalanceAutoCommitStatementThreshold=5&loadBalanceHostRemovalGracePeriod=15000&loadBalanceBlacklistTimeout=60000&loadBalancePingTimeout=5000&retriesAllDown=10&connectTimeout=10000 .. note:: @@ -184,7 +182,7 @@ Using a JDBC Driver to Log In to a Schema - **loadBalanceBlacklistTimeout**: indicates the time in milliseconds between checks of servers which are unavailable, by controlling how long a server lives in the global blacklist. - **loadBalancePingTimeout**: indicates the time in milliseconds that the connection will wait for a response to a ping operation when you set **loadBalanceValidateConnectionOnSwapServer** to **true**. - **retriesAllDown**: indicates the maximum number of connection attempts before an exception is thrown when a valid host is searched. SQLException will be returned if the threshold of retries is reached with no valid connections obtained. - - **connectTimeout**: indicates the maximum amount of time in milliseconds that the JDBC driver is willing to wait to set up a socket connection. **0** indicates that the connection does not time out. Only JDK-1.4 or later is supported. The default value **0**. + - **connectTimeout**: indicates the maximum amount of time in milliseconds that the JDBC driver is willing to wait to set up a socket connection. **0** indicates that the connection does not time out. This parameter is available to JDK-1.4 or later versions. The default value is **0**. .. _ddm_02_0005__section144072633313: diff --git a/umn/source/instance_management/deleting_a_ddm_instance.rst b/umn/source/instance_management/deleting_a_ddm_instance.rst index e1b74cf..b09a80f 100644 --- a/umn/source/instance_management/deleting_a_ddm_instance.rst +++ b/umn/source/instance_management/deleting_a_ddm_instance.rst @@ -5,21 +5,21 @@ Deleting a DDM Instance ======================= -Prerequisites -------------- +You can delete instances that are no longer needed. -You have logged in to the DDM console. +Precautions +----------- -.. important:: - - Deleted DDM instances cannot be recovered. Exercise caution when performing this operation. +- Deleted instances cannot be recovered. Exercise caution when performing this operation. +- Deleting a DDM instance will not affect its associated RDS instances. +- Deleting a DDM instance involves deleting its associated schemas and DDM accounts. +- If you need to delete data stored on the associated data nodes when deleting a DDM instance, select **Delete data on data nodes**. Procedure --------- #. In the instance list, locate the DDM instance that you want to delete and choose **More** > **Delete** in the **Operation** column. + #. In the displayed dialog box, click **Yes**. - .. note:: - - - To delete data stored on data nodes, select **Delete data on data nodes**. + To delete data stored on the associated data nodes, select **Delete data on data nodes**. diff --git a/umn/source/instance_management/index.rst b/umn/source/instance_management/index.rst index f0dfb9f..d750c53 100644 --- a/umn/source/instance_management/index.rst +++ b/umn/source/instance_management/index.rst @@ -13,7 +13,6 @@ Instance Management - :ref:`Restarting a DDM Instance ` - :ref:`Deleting a DDM Instance ` - :ref:`Modifying Parameters of a DDM Instance ` -- :ref:`Splitting Read and Write Requests ` - :ref:`Configuring a Parameter Template ` - :ref:`Administrator Account ` @@ -29,6 +28,5 @@ Instance Management restarting_a_ddm_instance/index deleting_a_ddm_instance modifying_parameters_of_a_ddm_instance - splitting_read_and_write_requests configuring_a_parameter_template administrator_account diff --git a/umn/source/instance_management/modifying_parameters_of_a_ddm_instance.rst b/umn/source/instance_management/modifying_parameters_of_a_ddm_instance.rst index 00afe35..49389a0 100644 --- a/umn/source/instance_management/modifying_parameters_of_a_ddm_instance.rst +++ b/umn/source/instance_management/modifying_parameters_of_a_ddm_instance.rst @@ -28,74 +28,80 @@ Procedure .. table:: **Table 1** Parameters of a DDM instance - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Parameter | Default Value | Value Range | Description | - +==================================+====================+======================================================================================================================================================================================+==========================================================================================================================================================================================================================================================================================================================================================================================================+ - | bind_table | ``-`` | The value should be in format **[{tb.col1,tb2.col2},{tb.col2,tb3.col1},...]**. *tb.col1,tb2.col2* indicates a table name.column name pair, and the value may contain multiple pairs. | Data association among multiple sharded tables. The optimizer processes JOIN operations at the MySQL layer based on these associations. For details about parameter examples, see the description below the table. | - | | | | | - | | | The version should be: | | - | | | | | - | | | DDM 2.3.2.7 or later. | | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | character_set_server | utf8mb4 | gbk, utf8, utf8mb4 | DDM server's character set. To store emoticons, set both this parameter and the character set on RDS to **utf8mb4**. | - | | | | | - | | | | For a DDM instance 3.0.9 or later, you can execute **show variables like '%char%'** to query its character set. You will find that **character_set_client**, **character_set_results**, and **character_set_connection** in the command output all have a fixed value, **utf8mb4**. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | collation_server | utf8mb4_unicode_ci | utf8mb4_unicode_ci, utf8mb4_bin, utf8mb4_general_ci | Collation on the DDM server. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | concurrent_execution_level | DATA_NODE | RDS_INSTANCE, DATA_NODE, PHY_TABLE | Concurrency level of scanning table shards in a logical table. **DATA_NODE**: indicates that database shards are scanned in parallel and table shards in each database shard are scanned in serial. **RDS_INSTANCE**: indicates that RDS instances are scanned in parallel and shards in each DB instance are scanned in serial. **PHY_TABLE**: indicates that all table shards are scanned in parallel. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | connection_idle_timeout | 28800 | 60—86400 | Number of seconds the server waits for activity on a connection before closing it. The default value is **28800**, indicating that the server waits for 28800 seconds before closing a connection. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | contains_shard_key | OFF | OFF or ON | Whether the SELECT, UPDATE, and DELETE statements must contain sharding keys in filter conditions. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | ddl_precheck_mdl_threshold_time | 120 | 0-3600 | Threshold of the MDL duration in DDL pre-check. The unit is second. The default value is **120**. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | enable_table_recycle | OFF | OFF or ON | **ON**: indicates that the table recycle bin is enabled. | - | | | | | - | | | | **OFF**: indicates that the table recycle bin is disabled. | - | | | | | - | | | | After the table recycle bin is enabled, deleted tables are moved to the recycle bin and can be recovered by running the RESTORE command within seven days. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | long_query_time | 1 | 0.01-10 | Minimum duration of a query to be logged as slow, in seconds. The default value is **1**, indicating that the query is considered as a slow query if its execution duration is greater than or equal to 1 second. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | max_allowed_packet | 1073741824 | 1024-1073741824 | Maximum size of one packet or any generated intermediate string. The packet message buffer is initialized to **net_buffer_length** bytes, but can grow up to **max_allowed_packet** bytes when needed. This value is small by default, to catch large (and possibly incorrect) packets. The value must be a multiple of **1024**. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | max_backend_connections | 0 | 0—10000000 | Maximum of concurrent client connections allowed per DDM instance. When this parameter is set to **0** (default), the maximum concurrent connections from a DDM node to an RDS instance is: (RDS instance's maximum connections - 20)/DDM nodes. This parameter does not take effect only after maximum connections are set on RDS. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | max_connections | 20000 | 10-40000 | Minimum concurrent connections from a DDM instance node to the client. | - | | | | | - | | | | This value depends on specifications and processing capabilities of the target data node. Too many connections may cause connection waiting, affecting performance. The consumption of DDM connections varies with the number of shards and SQL design. | - | | | | | - | | | | For example, If a SQL statement contains a sharding key, each DDM connection consumes one data node connection. If the SQL statement contains no sharding keys and the number of shards is N, N data node connections are consumed. | - | | | | | - | | | | If SQL design is appropriate and processing capabilities of DDM and its data nodes are good enough, you can set this parameter to a value slightly smaller than the product of backend data nodes x maximum connections supported by each data node. | - | | | | | - | | | | Carry out pressure tests on your services and then select a proper value. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | min_backend_connections | 10 | 0—10000000 | Minimum concurrent connections from a DDM node to an RDS instance. The default value is **10**. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | seconds_behind_master | 30 | 0—7200 | Threshold in seconds of the replication lag between a primary RDS instance to its read replica. The default value is **30**, indicating that the time for data replication between the primary RDS instance and its read replicas cannot exceed 30 seconds. If the time exceeds 30 seconds, the data read requests are no longer forwarded to the read replicas. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | sql_execute_timeout | 28800 | 100—28800 | Number of seconds to wait for a SQL statement to execute before it times out. The default value is **28800**, indicating that the SQL statement times out if its execution time is greater than or equal to 28800 seconds. | - | | | | | - | | | | For data nodes, ensure that **net_write_timeout** has a greater value than **sql_execute_timeout**. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | temp_table_size_limit | 1000000 | 500000-2000000000 | Size of a temporary table. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | transaction_policy | XA | XA, FREE, NO_DTX | Transactions supported by DDM. XA transaction, which attempts to ensure atomicity and isolation. FREE transaction, which is a best-effort commit transaction that allows data to be written to multiple shards, without impacting performance. FREE transactions do not ensure atomicity. NO_DTX transaction, which is a single-shard transaction. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | transfer_hash_to_mod_hash | OFF | OFF or ON | Whether the hash algorithm must be converted into mod_hash during table creation. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | ultimate_optimize | ON | OFF or ON | Whether the SQL execution plan is optimized based on parameter values. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | force_read_master_in_transaction | OFF | OFF or ON | Whether SQL statements involved in each transaction are read from the master node. | - | | | | | - | | | | .. caution:: | - | | | | | - | | | | CAUTION: | - | | | | This parameter is available in version 3.0.9 or later. If this feature is enabled in version 3.0.9 but the version is downgraded to 3.0.9 below, the feature keeps enabled when the version returns to 3.0.9 or later. | - +----------------------------------+--------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | Parameter | Description | Value Range | Default Value | + +==================================+=======================================================================================================================================================================================================================================================================================================================================================================================================+=========================================================================================================================================================================================+====================+ + | bind_table | Data association among multiple sharded tables. The optimizer processes JOIN operations at the MySQL layer based on these associations. For details about parameter examples, see the description below the table. | The value should be in format **[{tb1.col1,tb2.col2},{tb1.col2,tb3.col1},...]**. *tb1.col1,tb2.col2* indicates a table name.column name pair, and the value may contain multiple pairs. | ``-`` | + | | | | | + | | | The version should be: | | + | | | | | + | | | DDM 2.3.2.7 or later. | | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | character_set_server | DDM server's character set. To store emoticons, set both this parameter and the character set on RDS to **utf8mb4**. | gbk, utf8, utf8mb4 | utf8mb4 | + | | | | | + | | For a DDM instance 3.0.9 or later, you can execute **show variables like '%char%'** to query its character set. You will find that **character_set_client**, **character_set_results**, and **character_set_connection** in the command output all have a fixed value, **utf8mb4**. | | | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | collation_server | Collation on the DDM server. | utf8mb4_unicode_ci, utf8mb4_bin, utf8mb4_general_ci | utf8mb4_unicode_ci | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | concurrent_execution_level | Concurrency level of scanning table shards in a logical table. **DATA_NODE**: indicates that database shards are scanned in parallel and table shards in each database shard are scanned in serial. **RDS_INSTANCE**: indicates that RDS instances are scanned in parallel and shards in each instance are scanned in serial. **PHY_TABLE**: indicates that all table shards are scanned in parallel. | RDS_INSTANCE, DATA_NODE, PHY_TABLE | DATA_NODE | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | connection_idle_timeout | Number of seconds the server waits for activity on a connection before closing it. The default value is **28800**, indicating that the server waits for 28,800 seconds before closing a connection. | 60-86400 | 28800 | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | contains_shard_key | Whether the SELECT, UPDATE, and DELETE statements must contain sharding keys in filter conditions. | OFF or ON | OFF | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | ddl_precheck_mdl_threshold_time | Threshold of the MDL duration in DDL pre-check. The unit is second. The default value is **120**. | 1-3600 | 120 | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | enable_table_recycle | **ON**: indicates that the table recycle bin is enabled. | OFF or ON | OFF | + | | | | | + | | **OFF**: indicates that the table recycle bin is disabled. | | | + | | | | | + | | After the table recycle bin is enabled, deleted tables are moved to the recycle bin and can be recovered by running the RESTORE command within seven days. | | | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | long_query_time | Minimum duration of a query to be logged as slow, in seconds. The default value is **1**, indicating that the query is considered as a slow query if its execution duration is greater than or equal to 1 second. | 0.01-10 | 1 | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | max_allowed_packet | Maximum size of one packet or any generated intermediate string. The packet message buffer is initialized to **net_buffer_length** bytes, but can grow up to **max_allowed_packet** bytes when needed. This value is small by default, to catch large (and possibly incorrect) packets. The value must be a multiple of **1024**. | 1024-1073741824 | 1073741824 | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | max_backend_connections | Maximum of concurrent client connections allowed per DDM instance. | 0-10000000 | 0 | + | | | | | + | | The default value is **0**. | | | + | | | | | + | | Actual value: (Maximum connections of RDS - 20)/DDM nodes | | | + | | | | | + | | This parameter does not take effect only after maximum connections are set on RDS. | | | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | max_connections | Minimum concurrent connections from a DDM instance node to the client. | 10-40000 | 20000 | + | | | | | + | | This value depends on specifications and processing capabilities of the target data node. Too many connections may cause connection waiting, affecting performance. The consumption of DDM connections varies with the number of shards and SQL design. | | | + | | | | | + | | For example, If a SQL statement contains a sharding key, each DDM connection consumes one data node connection. If the SQL statement contains no sharding keys and the number of shards is N, N data node connections are consumed. | | | + | | | | | + | | If SQL design is appropriate and processing capabilities of DDM and its data nodes are good enough, you can set this parameter to a value slightly smaller than the product of backend data nodes x maximum connections supported by each data node. | | | + | | | | | + | | Carry out pressure tests on your services and then select a proper value. | | | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | min_backend_connections | Minimum concurrent connections from a DDM node to an RDS instance. The default value is **10**. | 0-10000000 | 10 | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | seconds_behind_master | Threshold in seconds of the replication lag between a primary RDS instance to its read replica. The default value is **30**, indicating that the time for data replication between the primary RDS instance and its read replicas cannot exceed 30 seconds. If the time exceeds 30 seconds, the data read requests are no longer forwarded to the read replicas. | 0-7200 | 30 | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | sql_execute_timeout | Number of seconds to wait for a SQL statement to execute before it times out. The default value is **28800**, indicating that the SQL statement times out if its execution time is greater than or equal to 28800 seconds. | 100-28800 | 28800 | + | | | | | + | | For data nodes, ensure that **net_write_timeout** has a greater value than **sql_execute_timeout**. | | | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | temp_table_size_limit | Size of a temporary table. | 500000-2000000000 | 1000000 | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | transaction_policy | Transactions supported by DDM. XA transaction, which attempts to ensure atomicity and isolation. FREE transaction, which is a best-effort commit transaction that allows data to be written to multiple shards, without impacting performance. FREE transactions do not ensure atomicity. NO_DTX transaction, which is a single-shard transaction. | XA, FREE, NO_DTX | XA | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | transfer_hash_to_mod_hash | Whether the hash algorithm must be converted into mod_hash during table creation. | OFF or ON | OFF | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | ultimate_optimize | Whether the SQL execution plan is optimized based on parameter values. | OFF or ON | ON | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ + | force_read_master_in_transaction | Whether SQL statements involved in each transaction are read from the master node. | OFF or ON | OFF | + | | | | | + | | .. caution:: | | | + | | | | | + | | CAUTION: | | | + | | This parameter is available in version 3.0.9 or later. If this feature is enabled in version 3.0.9 but the version is downgraded to 3.0.9 below, the feature keeps enabled when the version returns to 3.0.9 or later. | | | + +----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------+ By default, DDM allows you to modify only the preceding instance parameters. If you need to modify other parameters in some special scenarios such as data migration, contact technical support. diff --git a/umn/source/instance_management/restarting_a_ddm_instance/restarting_a_ddm_instance.rst b/umn/source/instance_management/restarting_a_ddm_instance/restarting_a_ddm_instance.rst index 86df63a..7e3117d 100644 --- a/umn/source/instance_management/restarting_a_ddm_instance/restarting_a_ddm_instance.rst +++ b/umn/source/instance_management/restarting_a_ddm_instance/restarting_a_ddm_instance.rst @@ -5,15 +5,15 @@ Restarting a DDM Instance ========================= +You may need to restart an instance to perform maintenance. + +The DDM instance is not available during restart, and the restart operation cannot be undone. Exercise caution when performing this operation. + Prerequisites ------------- - You have logged in to the DDM console. -- The DDM instance that you want to restart is in the **Running** state. - - .. important:: - - The DDM instance is not available during restart, and the restart operation cannot be undone. Exercise caution when performing this operation. +- The instance is in the **Available** status. Procedure --------- diff --git a/umn/source/instance_management/restarting_a_ddm_instance/restarting_a_node.rst b/umn/source/instance_management/restarting_a_ddm_instance/restarting_a_node.rst index 2542087..8efbe97 100644 --- a/umn/source/instance_management/restarting_a_ddm_instance/restarting_a_node.rst +++ b/umn/source/instance_management/restarting_a_ddm_instance/restarting_a_node.rst @@ -7,6 +7,17 @@ Restarting a Node You can restart a single node of your DDM instance. +An instance is not available when one of its nodes is being restarted. The restart operation cannot be undone. Exercise caution when you restart an instance node. + +Prerequisites +------------- + +- You have logged in to the DDM console. +- There is a DDM instance available, and its nodes are normal. + +Procedure +--------- + #. In the instance list, locate the DDM instance whose node you want to restart and click its name. #. In the **Node Information** area, locate the target node and click **Restart** in the **Operation** column. #. In the displayed dialog box, click **Yes**. diff --git a/umn/source/instance_management/splitting_read-only_and_read-write_services/how_are_read-only_services_split_from_read-write_services.rst b/umn/source/instance_management/splitting_read-only_and_read-write_services/how_are_read-only_services_split_from_read-write_services.rst index c3a7cf8..851002f 100644 --- a/umn/source/instance_management/splitting_read-only_and_read-write_services/how_are_read-only_services_split_from_read-write_services.rst +++ b/umn/source/instance_management/splitting_read-only_and_read-write_services/how_are_read-only_services_split_from_read-write_services.rst @@ -17,7 +17,7 @@ Procedure .. note:: - One DDM instance supports multiple read-only groups. Each group contains at least 2 nodes, and each instance contains up to 32 nodes. - - One node belongs to only one group, and its group cannot be changed once determined. Nodes in the same group must be of the same class. + - One node belongs to only one group, and its group cannot be changed once determined. Nodes in the same group must be of the same node class. #. On the **Create Group** page, select the required role, VPC, and node class, specify the quantity of new nodes, and click **Next**. @@ -25,16 +25,4 @@ Procedure #. After the creation is complete, check whether the original **Node Information** area becomes the **Group Information** area. Then you can manage nodes in the group. - - .. figure:: /_static/images/en-us_image_0000001424936724.png - :alt: **Figure 1** Node groups - - **Figure 1** Node groups - - - .. figure:: /_static/images/en-us_image_0000001425413568.png - :alt: **Figure 2** Group and node information - - **Figure 2** Group and node information - To delete a group of a DDM instance, locate the group that you want to delete and click **Delete**. The corresponding floating IP address becomes invalid once the group is deleted. This may affect your services. Retain at least one read/write group. diff --git a/umn/source/instance_management/splitting_read-only_and_read-write_services/what_is_read-only_service_isolation.rst b/umn/source/instance_management/splitting_read-only_and_read-write_services/what_is_read-only_service_isolation.rst index e4fbb9b..12d7ab3 100644 --- a/umn/source/instance_management/splitting_read-only_and_read-write_services/what_is_read-only_service_isolation.rst +++ b/umn/source/instance_management/splitting_read-only_and_read-write_services/what_is_read-only_service_isolation.rst @@ -14,10 +14,8 @@ DDM provides two types of node groups, read-only and read/write, which handle re .. note:: - - The node group function is only available for whitelisted users. To enable this function, contact technical support. - - The kernel version must be 2.4.1.2 or later. - - If you want read-only groups to handle SQL queries, make sure that the associated data node has available read replicas. If there are no available read replicas, the following error messages may be returned: + If you want read-only groups to handle SQL queries, make sure that the associated data node has available read replicas. If there are no available read replicas, the following error messages may be returned: - - backend database connection error; - - query has been canceled - - execute error: No read-only node + - backend database connection error; + - query has been canceled + - execute error: No read-only node diff --git a/umn/source/instance_management/splitting_read_and_write_requests.rst b/umn/source/instance_management/splitting_read_and_write_requests.rst deleted file mode 100644 index 99bcfca..0000000 --- a/umn/source/instance_management/splitting_read_and_write_requests.rst +++ /dev/null @@ -1,76 +0,0 @@ -:original_name: ddm_06_0012.html - -.. _ddm_06_0012: - -Splitting Read and Write Requests -================================= - -Overview --------- - -Read/write splitting offloads read requests from primary DB instances to read replicas on a data node at a ratio, improving processing of read/write transactions. This function is transparent to applications, and you do not need to modify service code. Configure read weights of primary instances and their read replicas on the DDM console, and read traffic will be distributed at the preset ratio and write traffic will be forwarded to the primary instances by default. The ratio is generally based on service requirements and loads of associated data nodes. - -Data is asynchronously replicated from the primary instance to read replicas, and there is a delay between them in milliseconds. Set weights of the primary instance and its read replicas to 0 and 100, respectively, that is, distribute all read requests to read replicas if sub-second latency is allowed for read requests and these requests require high query costs that may impact read/write transactions. In other scenarios, adjust the ratio based on service requirements. - -The SELECT statements that contain hints or modify data in transactions are all executed by the primary DB instances. - -If the associated primary DB instance becomes faulty and parameter **Seconds_Behind_Master** on its read replicas is set to **NULL**, read-only requests are still forwarded to the primary DB instance. Recover the faulty instance as soon as possible. - -Prerequisites -------------- - -- You have created a DDM instance and a data node with read replicas. -- You have created a schema. - -Procedure ---------- - -#. On the **Instances** page, locate the required DDM instance and click its name. - -#. Choose **Data Nodes**. - -#. Set weights of the associated instance. - - .. note:: - - If the associated instance is a read replica, it handles all separated read requests by default. To configure read/write weights of the read replica, perform the following operations: - - - If you want to set read weights of multiple instances, click **Configure Read Weight**. :ref:`Figure 1 ` shows an example. - - In the displayed dialog box, you can click **Synchronize** to apply the read weight of the first instance to other instances, as shown in :ref:`Figure 2 `. This operation requires that all involved instances should have the same number of read replicas. Otherwise, you need to manually configure the read weight for each instance. - - .. _ddm_06_0012__fig195175335516: - - .. figure:: /_static/images/en-us_image_0000001424936648.png - :alt: **Figure 1** Configuring read weights for multiple instances - - **Figure 1** Configuring read weights for multiple instances - - .. _ddm_06_0012__fig195318811264: - - .. figure:: /_static/images/en-us_image_0000001475013001.png - :alt: **Figure 2** Synchronizing the read weight of the first instance to other instances - - **Figure 2** Synchronizing the read weight of the first instance to other instances - - - .. figure:: /_static/images/en-us_image_0000001425254200.png - :alt: **Figure 3** Manually configuring the read weight for each instance - - **Figure 3** Manually configuring the read weight for each instance - - - If you want to set the read weight of an instance, locate the target instance and click **Configure Read Weight** in the **Operation** column. - - - .. figure:: /_static/images/en-us_image_0000001475213513.png - :alt: **Figure 4** Configuring the read weight for a single instance - - **Figure 4** Configuring the read weight for a single instance - -#. Wait the request is submitted. - - - .. figure:: /_static/images/en-us_image_0000001475132957.png - :alt: **Figure 5** Request submitted - - **Figure 5** Request submitted diff --git a/umn/source/parameter_template_management/applying_a_parameter_template.rst b/umn/source/parameter_template_management/applying_a_parameter_template.rst index 2dd648d..f92e61b 100644 --- a/umn/source/parameter_template_management/applying_a_parameter_template.rst +++ b/umn/source/parameter_template_management/applying_a_parameter_template.rst @@ -26,7 +26,7 @@ Procedure A parameter template can be applied to one or more DDM instances. -#. In the displayed dialog box, select one or more DDM instances to which the parameter template will be applied and click **OK**. +#. In the displayed dialog box, select one or more DDM instances that you want to apply the parameter template to and click **OK**. After the parameter template is applied to DDM instances successfully, you can view its application history by referring to :ref:`Viewing Application Records of a Parameter Template `. diff --git a/umn/source/parameter_template_management/comparing_two_parameter_templates.rst b/umn/source/parameter_template_management/comparing_two_parameter_templates.rst index 8f18157..29dbced 100644 --- a/umn/source/parameter_template_management/comparing_two_parameter_templates.rst +++ b/umn/source/parameter_template_management/comparing_two_parameter_templates.rst @@ -10,20 +10,20 @@ Scenarios You can apply different parameter templates to the same DDM instance to view impacts on parameter settings of the instance. -You can also apply the same parameter template to different DDM instances to learn about the impacts. - Procedure --------- #. Log in to the management console. + #. Click |image1| in the upper left corner and select a region and a project. + #. Click |image2| in the upper left corner of the page and choose **Databases** > **Distributed Database Middleware**. -#. Choose **Parameter Templates**, click the **Custom Templates** tab, locate the required parameter template, and click **Compare** in the **Operation** column. + +#. On the **Parameter Templates** page, locate the required parameter template and click **Compare** in the **Operation** column. + #. In the displayed dialog box, select a parameter template and click **OK**. - .. note:: - - You can also click the **Default Templates** tab and compare a default template with a customer template. The procedure is the same as comparing parameter templates on the **Custom Templates** page. + You can compare different custom parameter templates, or a default parameter template with a custom parameter template. - If their settings are different, the parameter names and values of both parameter templates are displayed. - If their settings are the same, no data is displayed. diff --git a/umn/source/parameter_template_management/creating_a_parameter_template.rst b/umn/source/parameter_template_management/creating_a_parameter_template.rst index 5bea289..8f35a02 100644 --- a/umn/source/parameter_template_management/creating_a_parameter_template.rst +++ b/umn/source/parameter_template_management/creating_a_parameter_template.rst @@ -15,7 +15,7 @@ When you have already created a parameter template and want to provide most of i The following are the key points you should know when using parameters from a parameter template: -- When you change a parameter value in a parameter template that has been applied to a DB instance, the change applies only to the current DB instance and does not affect other DB instances. +- Changing a parameter value in a parameter template does not change any parameter in a DDM instance where it has been applied before. - When you change a parameter value in a parameter template and save the change, the change will take effect only after you apply the parameter template to a DDM instance and manually restart the instance. - Improper parameter settings may have unintended adverse effects, including degraded performance and system instability. Exercise caution when modifying parameters and you need to back up data before modifying parameters in a parameter template. Before applying parameter template changes to a production DDM instance, you should try out these changes on a test DDM instance. @@ -33,7 +33,8 @@ Procedure .. note:: - Each user can create up to 100 parameter templates. + - Each user can create up to 100 parameter templates. + - The parameter template quota is shared by all DDM instances in a project. .. |image1| image:: /_static/images/en-us_image_0000001425413508.png .. |image2| image:: /_static/images/en-us_image_0000001425254220.png diff --git a/umn/source/parameter_template_management/editing_a_parameter_template.rst b/umn/source/parameter_template_management/editing_a_parameter_template.rst index 607d3b1..b592514 100644 --- a/umn/source/parameter_template_management/editing_a_parameter_template.rst +++ b/umn/source/parameter_template_management/editing_a_parameter_template.rst @@ -11,12 +11,9 @@ You cannot change parameter values in default parameter templates. When you chan The following are the key points you should know when using parameters from a parameter template: -- After you change a parameter value and save the change, the change will take effect only after you apply the parameter template to a DDM instance and manually restart the instance. For details, see :ref:`Applying a Parameter Template `. +- When you modify a custom parameter template, the modifications take effect only after you apply the parameter template to DDM instances. For details, see :ref:`Applying a Parameter Template `. - The time when the modification takes effect is determined by the type of the parameter. - -.. note:: - - Parameters in default parameter templates cannot be modified. You can view these parameters by clicking template names. If a custom parameter template is set incorrectly and causes an instance restart to fail, you can re-configure the custom parameter template according to configurations of the default parameter template. +- Parameters in default parameter templates cannot be modified. You can view these parameters by clicking template names. If a custom parameter template is set incorrectly and causes an instance restart to fail, you can re-configure the custom parameter template according to configurations of the default parameter template. Procedure --------- @@ -33,10 +30,6 @@ Procedure Available operations are as follows: - .. important:: - - After you modify parameters in a parameter template, some modifications immediately take effect for the DDM instance to which the parameter template applies. Exercise caution when performing this operation. - - To save the modifications, click **Save**. - To cancel the modifications, click **Cancel**. @@ -44,9 +37,8 @@ Procedure .. important:: - The modifications take effect only after you apply the parameter template to DDM instances. For details, see :ref:`Applying a Parameter Template `. - - If you have modified certain parameters or collations, you need to manually restart the DDM instance for the modifications to take effect. However, the restart caused by node class changes (if any) does not make these modifications take effect. + - The modifications take effect only after you apply the parameter template to DDM instances. For details, see :ref:`Applying a Parameter Template `. + - The instance restart caused by node class changes will not put parameter modifications into effect. .. |image1| image:: /_static/images/en-us_image_0000001425096704.png .. |image2| image:: /_static/images/en-us_image_0000001425413588.png diff --git a/umn/source/parameter_template_management/replicating_a_parameter_template.rst b/umn/source/parameter_template_management/replicating_a_parameter_template.rst index 0b6b04b..73fb1ad 100644 --- a/umn/source/parameter_template_management/replicating_a_parameter_template.rst +++ b/umn/source/parameter_template_management/replicating_a_parameter_template.rst @@ -10,8 +10,6 @@ Scenarios You can replicate a parameter template you have created. When you have already created a parameter template and want to provide most of its custom parameters and values in a new parameter template, you can replicate the template you created. -After a parameter template is replicated, the new template may be displayed about 5 minutes later. - Default parameter templates cannot be replicated. You can create parameter templates based on the default ones. Procedure diff --git a/umn/source/schema_management/configuring_the_sql_blacklist.rst b/umn/source/schema_management/configuring_the_sql_blacklist.rst index 534924f..d214806 100644 --- a/umn/source/schema_management/configuring_the_sql_blacklist.rst +++ b/umn/source/schema_management/configuring_the_sql_blacklist.rst @@ -26,8 +26,8 @@ Procedure .. note:: - - **Prefix Match**: Enter SQL statements that contain keywords such as DROP XXXX or DELETE XXX and are not allowed by the current schema. + - **Prefix Match**: Enter SQL statements that contain keywords such as DROP or DELETE and are not allowed by the current schema. - **Full-text Match**: Enter full-text SQL statements that are not allowed by the current schema. - - **Regular Expression Match**: Enter regular expressions that are not allowed by the current schema. - - Separate SQL statements in the blacklist with commas (,). The size of SQL statements for prefix match, full-text match, and regular expression match cannot exceed 1 KB, respectively. + - **Regular Expression Match**: Enter specific regular expressions that are not allowed by the current schema. + - Separate SQL statements in the blacklist with semicolons (;). The size of SQL statements for prefix match, full-text match, and regular expression match cannot exceed 1 KB, respectively. - If you want to clear all the SQL statements in prefix match and full-text match areas, clear them separately and click **OK**. diff --git a/umn/source/schema_management/creating_a_schema.rst b/umn/source/schema_management/creating_a_schema.rst index 5386760..c69c4e3 100644 --- a/umn/source/schema_management/creating_a_schema.rst +++ b/umn/source/schema_management/creating_a_schema.rst @@ -16,25 +16,44 @@ Prerequisites - The internal account name is in the format: Fixed prefix (such as DDMRW, DDMR, or DDMREP) + Hash value of the data node ID. - A random password is generated, which contains 16 to 32 characters. - - All DB instances associated with one schema must have the same major MySQL version. + - All instances associated with one schema must have the same major MySQL version. - Multiple schemas can be created in a DDM instance and associated with the same data node. One DDM instance can be associated with either RDS for MySQL or GaussDB(for MySQL) instances, but not both. - One data node cannot be associated with schemas in different DDM instances. - - If you select **Sharded** for **Sharding** when you create a schema, the shard name follows the rule: schema+xxxx. *xxxx* indicates the digit increased from 0000. For example, if a schema name is **db_cbb5** and the total number of shards is 2, the shard names are **db_cbb5_0000** and **db_cbb5_0001**. - - Read-only DB instances cannot be associated with the schema as data nodes. - -You can create a schema in two ways: on the **Instances** page or on the **Schemas** page. This section uses the **Instances** page as an example to describe how to create a schema. + - If you create a sharded schema, more than one shard will be generated in the schema. Shard names follow the rule: **\ \_\ **. ** here indicates a four-digit number starting from 0000. This number will be incremented by one. For example, if a schema name is **db_cbb5** and there are 2 shards, the shard names are **db_cbb5_0000** and **db_cbb5_0001**. + - Read-only instances cannot be associated with the schema as data nodes. Procedure --------- #. In the navigation pane, choose **Instances**. In the instance list, locate the DDM instance that you want to create a schema for and click **Create Schema** in the **Operation** column. -#. On the displayed page, specify a sharding mode, enter a schema name, set the number of shards, select the required DDM accounts, and click **Next**. + +#. On the **Create Schema** page, set required parameters by referring to :ref:`Table 1 `, and click **Next**. + + .. _ddm_06_0006__table5532135017574: + + .. table:: **Table 1** Parameter description + + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+=============================================================================================================================================================================================================================+ + | Sharding | - **Sharded**: indicates that one schema can be associated with multiple data nodes, and all shards will be evenly distributed across the nodes. | + | | - **Unsharded**: indicates that one schema can be associated with only one data node, and only one shard can be created on the RDS instance. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Schema | The name contains 2 to 48 characters and must start with a lowercase letter. Only lowercase letters, digits, and underscores (_) are allowed. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Account | The DDM account that needs to be associated with the schema. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Data Nodes | Select only the data nodes that are in the same VPC as your DDM instance and not in use by other DDM instances. DDM will create databases on the selected data nodes without affecting their existing databases and tables. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Shards | The total shards are the shards on all data nodes. There cannot be more data nodes than there are shards in the schema. Each data node has to have at least one shard assigned. Recommended shards per data node: 8 to 64. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + #. Enter a database account with the required permissions and click **Test Connection**. .. note:: Required permissions: SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER WITH GRANT OPTION - Create a special account and assign it the above permissions as required. Then use the account to create a schema for later operations. + You can create a database account for the RDS for MySQL instance and assign it the above permissions in advance. #. After the test becomes successful, click **Finish**. diff --git a/umn/source/schema_management/deleting_a_schema.rst b/umn/source/schema_management/deleting_a_schema.rst index 675496e..629c828 100644 --- a/umn/source/schema_management/deleting_a_schema.rst +++ b/umn/source/schema_management/deleting_a_schema.rst @@ -27,5 +27,3 @@ Procedure - Your schema will become faulty if you delete its associated data nodes by clicking the **Delete** button in the schema list. - To delete data stored on the associated data nodes, select **Delete data on data nodes** in the displayed dialog box. - - If you want to delete a schema, check whether there are data nodes associated with this schema. If the associated DB instances have been deleted, click **Synchronize DB Instance Data** and delete the schema. - - If the associated data nodes are not deleted but their information is modified, such as the instance name, engine, engine version, maximum connections, port number, or IP address, click **Synchronize DB Instance Data** and delete the schema. diff --git a/umn/source/schema_management/importing_schema_information.rst b/umn/source/schema_management/importing_schema_information.rst index 23f1539..2b817e7 100644 --- a/umn/source/schema_management/importing_schema_information.rst +++ b/umn/source/schema_management/importing_schema_information.rst @@ -10,25 +10,24 @@ Scenarios When you deploy DR or migrate data across regions, you can import schema information in destination DDM instances. The imported information includes schema information and shard information, excluding service data and index data. -Prerequisites -------------- +Precautions +----------- -The DDM instance has no schemas. +The destination DDM instance has no schemas with the same name. Procedure --------- -#. Log in to the DDM console; in the instance list, locate the DDM instance that you want to import schema information into and click its name. +#. Log in to the DDM console, in the instance list, locate the DDM instance that you want to import schema information into and click its name. #. On the displayed page, in the navigation pane, choose **Schemas**. #. On the displayed page, click **Import Schema Information**. - - .. note:: - - More than one JSON file can be imported into a DDM instance in the premise that the DDM instance does not have schemas with the same names as those in the JSON file. - -#. On the displayed page, click **Select File** to select the JSON file exported in :ref:`Exporting Schema Information `, choose the required data nodes, enter a database account with the required permissions, and click **Finish**. +#. On the displayed page, click **Select File** to select the required JSON file which has been exported in :ref:`Exporting Schema Information `. +#. Select the required data nodes, enter a database account with required permissions, and click **Finish**. .. note:: - The number of selected data nodes is the same as the number of data nodes imported into the DDM instance. + - Required permissions: SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER WITH GRANT OPTION + + You can create a database account for the RDS for MySQL instance and assign it the above permissions in advance. diff --git a/umn/source/service_overview/regions_and_azs.rst b/umn/source/service_overview/regions_and_azs.rst index 6e15805..48c5dd0 100644 --- a/umn/source/service_overview/regions_and_azs.rst +++ b/umn/source/service_overview/regions_and_azs.rst @@ -13,7 +13,14 @@ The combination of a region and an availability zone (AZ) identifies the locatio - A region is a geographic area where physical data centers are located. Each region is completely independent, improving fault tolerance and stability. After a resource is created, its region cannot be changed. - An AZ is a physical location using independent power supplies and networks. Faults in an AZ do not affect other AZs. A region can contain multiple AZs, which are physically isolated but interconnected through internal networks. This ensures the independence of AZs and provides low-cost and low-latency network connections. -shows the relationship between regions and AZs. +:ref:`Figure 1 ` shows the relationship between regions and AZs. + +.. _ddm_01_0007__fig18764197715: + +.. figure:: /_static/images/en-us_image_0000001425096592.png + :alt: **Figure 1** Regions and AZs + + **Figure 1** Regions and AZs Selecting a Region ------------------ diff --git a/umn/source/service_overview/usage_constraints/data_nodes.rst b/umn/source/service_overview/usage_constraints/data_nodes.rst index 75288d8..3fd3294 100644 --- a/umn/source/service_overview/usage_constraints/data_nodes.rst +++ b/umn/source/service_overview/usage_constraints/data_nodes.rst @@ -8,9 +8,7 @@ Data Nodes Constraints on data nodes are as follows: - Data nodes can be only RDS for MySQL and GaussDB(for MySQL) instances of versions 5.7 and 8.0. - - DDM cannot connect to MySQL instances using SSL connections. - - Case sensitivity support cannot be enabled for MySQL instances. .. note:: @@ -19,9 +17,4 @@ Constraints on data nodes are as follows: - If you are using MySQL 8.0, select **Case insensitive** for **Table Name** when you create a MySQL instance. - Modifying configurations of a data node may result in an exception in using your DDM instance. After the modification, click **Synchronize Data Node Information** on the **Data Nodes** page to synchronize changes from the data node to DDM. - - |image1| - - Character set GBK is not allowed for data nodes. - -.. |image1| image:: /_static/images/en-us_image_0000001425096656.png diff --git a/umn/source/service_overview/usage_constraints/index.rst b/umn/source/service_overview/usage_constraints/index.rst index 052f6c6..57dcf53 100644 --- a/umn/source/service_overview/usage_constraints/index.rst +++ b/umn/source/service_overview/usage_constraints/index.rst @@ -7,7 +7,7 @@ Usage Constraints - :ref:`Network Access ` - :ref:`Data Nodes ` -- :ref:`Unavailable Features and Limitations ` +- :ref:`Unsupported Features and Limitations ` - :ref:`High-risk Operations ` .. toctree:: @@ -16,5 +16,5 @@ Usage Constraints network_access data_nodes - unavailable_features_and_limitations + unsupported_features_and_limitations high-risk_operations diff --git a/umn/source/service_overview/usage_constraints/unavailable_features_and_limitations.rst b/umn/source/service_overview/usage_constraints/unsupported_features_and_limitations.rst similarity index 87% rename from umn/source/service_overview/usage_constraints/unavailable_features_and_limitations.rst rename to umn/source/service_overview/usage_constraints/unsupported_features_and_limitations.rst index d3c4ac9..25395e3 100644 --- a/umn/source/service_overview/usage_constraints/unavailable_features_and_limitations.rst +++ b/umn/source/service_overview/usage_constraints/unsupported_features_and_limitations.rst @@ -2,7 +2,7 @@ .. _ddm_01_0174: -Unavailable Features and Limitations +Unsupported Features and Limitations ==================================== Unsupported Features @@ -36,11 +36,18 @@ Unsupported Features - CHECKSUM TABLE syntax - Table maintenance statements, including ANALYZE, CHECK, CHECKSUM, OPTIMIZE, and REPAIR TABLE -- Statements for assigning a value to or querying variable **session**, for example, set @rowid=0;select @rowid:=@rowid+1,id from user +- Statements for assigning a value to or querying variable **session** + + For example: + + .. code-block:: + + set @rowid=0;select @rowid:=@rowid+1,id from user; + - SQL statements that use -- or /.../ to comment out a single line or multiple lines of code - DDM provides incomplete support for system variable queries. The returned values are variable values of RDS instances, instead of DDM kernel variable values. For example, the returned values of SELECT @@autocommit do not indicate the current transaction status. -- Executing SET Syntax to modify global variables +- Executing SET syntax to modify global variables - PARTITION syntax. Partitioned tables are not recommended. - LOAD XML statement @@ -48,7 +55,7 @@ Unsupported Features Unsupported Operators --------------------- -- Assignment operator (:=) is not supported. After this operator is expected, it does not take effect even if no errors are reported. +- Assignment operator (:=) is not supported. - Operator (->) is not supported. This operator can be executed successfully in a single table. An error is reported when this operator is executed in other types of tables. - Operator (->>) is not supported. This operator can be executed successfully in a single table. An error is reported when this operator is executed in other types of tables. - Expression IS UNKNOWN @@ -56,7 +63,7 @@ Unsupported Operators Unsupported Functions --------------------- -Do not use a function if you are not sure whether it can be pushed down to RDS. The compute layer of DDM does not support the following functions: +The compute layer of DDM does not support the following functions: - XML functions - Function **ANY_VALUE()** @@ -77,8 +84,8 @@ Do not use a function if you are not sure whether it can be pushed down to RDS. - Aggregate function **VAR_SAMP()** - Aggregate function **VARIANCE()** -SQL Syntaxes ------------- +SQL Syntax +---------- **SELECT** @@ -108,11 +115,12 @@ SQL Syntaxes **Aggregation** -- Function **asc/desc** cannot be used in the GROUP BY statement to sort out results. +Function **asc** or **desc** cannot be used in the GROUP BY statement to sort out results. - .. note:: +.. note:: - DDM automatically ignores keyword **asc/desc** after GROUP BY. In MySQL versions earlier than 8.0.13, function **asc/desc** can be used in the GROUP BY statement to sort out results. In MySQL 8.0.13 or later, a syntax error is reported if you use function **asc/desc** this way. ORDER BY is recommended for sorting. + - DDM automatically ignores keyword **asc** or **desc** after GROUP BY. + - In MySQL versions earlier than 8.0.13, function **asc** or **desc** can be used in the GROUP BY statement to sort out results. In MySQL 8.0.13 or later, a syntax error is reported if you use function **asc** or **desc** this way. ORDER BY is recommended for sorting. **Subqueries** @@ -141,7 +149,7 @@ SQL Syntaxes - PARTITION syntax is not supported. Partitioned tables are not recommended. -- Setting **datetime** to **1582** or any value smaller in INSERT statements is not supported. +- Setting **YYYY** of **datetime** (in the format of **YYYY-MM-DD HH:MM:SS**) to **1582** or any value smaller in INSERT statements is not supported. - Nesting a subquery in ON DUPLICATE KEY UPDATE of an INSERT statement is not supported. The following is an example: @@ -165,7 +173,7 @@ SQL Syntaxes .. code-block:: - UPDATE tbl_1 a,tbl_2 b set a.name=b.name where a.id=b.id; + UPDATE tbl_1 a, tbl_2 b set a.name=b.name where a.id=b.id; **name** indicates the sharding key of table **tbl_1**. @@ -175,19 +183,21 @@ SQL Syntaxes .. code-block:: - UPDATE tbl_1 a,tbl_1 b set a.tinyblob_col=concat(b.tinyblob_col,'aaabbb'); + UPDATE tbl_1 a, tbl_1 b set a.tinyblob_col=concat(b.tinyblob_col, 'aaabbb'); -- UPDATE JOIN supports only joins with WHERE conditions. The following is an example: +- UPDATE JOIN supports only joins with WHERE conditions. + + The following is an example: .. code-block:: - UPDATE tbl_3,tbl_4 SET tbl_3.varchar_col='dsgfdg'; + UPDATE tbl_3, tbl_4 SET tbl_3.varchar_col='dsgfdg'; - Referencing other object columns in assignment statements or expressions is not supported when UPDATE JOIN syntax is used. The following is an example: .. code-block:: - UPDATE tbl_1 a,tbl_2 b SET a.name=concat(b.name,'aaaa'),b.name=concat(a.name,'bbbb') ON a.id=b.id; + UPDATE tbl_1 a, tbl_2 b SET a.name=concat(b.name, 'aaaa'),b.name=concat(a.name, 'bbbb') ON a.id=b.id; - You can update a sharding field by two steps: delete the original sharding field and then insert a new field. During this process, the results of querying the sharding fields involved in the target table may be inconsistent. diff --git a/umn/source/shard_configuration/assessment.rst b/umn/source/shard_configuration/assessment.rst index a7ab02a..0e1ae2b 100644 --- a/umn/source/shard_configuration/assessment.rst +++ b/umn/source/shard_configuration/assessment.rst @@ -11,9 +11,3 @@ Before changing shards, you need to carry out a preliminary evaluation and deter - DDM node class: Determine nodes of the DDM instance and vCPUs and memory size of each node. - Data node class: Determine the number of data nodes and vCPUs and memory size of each node. - Business scale: Analyze current service scale and growth trend. - -Customer stories: - -A customer has a four-node DDM instance. Each node has 8 vCPUs and 16 GB memory. The instance is associated with 6 data nodes, containing 73,000 physical tables in total where 100 billion data records are stored, with the data volume up to about 12 TB. - -Changing shards will definitely cause the migration of all schema data. Each data record must be rerouted, so the computing speed is obviously slower than the computing speed when shards are unchanged. Considering service requirements, change the DDM node class to 32 vCPUs \| 64 GB, increase data nodes to 12, and upgrade the DDM kernel version to the latest. Then you can restore the node class to the original one as required. The shards are not changed, so only half of the shards are migrated from original data nodes to new data nodes, with no route redistribution involved. Keep shards unchanged and increase data nodes unless there is a single physical table whose storage has reached the upper limit. diff --git a/umn/source/shard_configuration/operation_guide.rst b/umn/source/shard_configuration/operation_guide.rst index d484260..72dab7c 100644 --- a/umn/source/shard_configuration/operation_guide.rst +++ b/umn/source/shard_configuration/operation_guide.rst @@ -13,7 +13,7 @@ Prerequisites - There is a DDM instance with available schemas. - There is an RDS for MySQL instance in the same VPC as the DDM instance, and is not associated with any other DDM instances. If adding data nodes is required, ensure that the new data nodes are in the same VPC as the DDM instance. - The kernel version of the DDM instance must be 3.0.8.3 or later. The latest kernel version is recommended. -- Ensure that the DB instances to be associated with your schema cannot be in read-only states. +- Ensure that the instances to be associated with your schema cannot be in read-only states. Procedure --------- @@ -47,7 +47,7 @@ Procedure #. View progress at the Task Center or run command **show migrate status** on your SQL client to view progress. A shard configuration task consists of two phases: full migration and incremental migration. - .. figure:: /_static/images/en-us_image_0000001475213569.png + .. figure:: /_static/images/en-us_image_0000001631956069.png :alt: **Figure 1** Run the required command to view task progress **Figure 1** Run the required command to view task progress @@ -74,7 +74,7 @@ Procedure **FULL_SUCCEED_COUNT**: indicates the objects that have finished a full migration in the current scale-out subtask. - **FULL_TOTAL_COUNT**: indicates the all objects that need to be migrated by a full migration in the current scale-out subtask. + **FULL_TOTAL_COUNT**: indicates all objects that need to be migrated by a full migration in the current scale-out subtask. **FULL_PERCENTAGE**: indicates the percentage of migrated objects in the full migration in the current scale-out subtask. diff --git a/umn/source/shard_configuration/overview_and_application_scenarios.rst b/umn/source/shard_configuration/overview_and_application_scenarios.rst index 249cec5..fadb243 100644 --- a/umn/source/shard_configuration/overview_and_application_scenarios.rst +++ b/umn/source/shard_configuration/overview_and_application_scenarios.rst @@ -13,40 +13,40 @@ Shard configuration is a core function of DDM. With this function, you can incre Application Scenarios --------------------- -1. Keep shards unchanged and increase data nodes. +DDM provides the following methods of configuring shards to meet different service needs. -This method does not change the number of shards and only increases the number of data nodes. Some shards are migrated from original data nodes to new data nodes. The shard data is not redistributed, so this method is the fastest one among all three methods is recommended. +**Method 1: Keep shards unchanged and increase data nodes** -This method meets rapid service growth after horizontal sharding and can reduce costs at the beginning stage of services. +This method does not change the number of shards and only increases the number of data nodes. Some shards are migrated from original data nodes to new data nodes. The shard data is not redistributed, so this method is the fastest one among all three methods and is recommended. -It is also suitable if RDS for MySQL instances cannot meet storage space and read/write performance requirements. +This method underpins rapid service growth after horizontal sharding and can reduce costs at the early stage of services. It is also suitable if RDS for MySQL instances cannot meet storage space and read/write performance requirements. -.. figure:: /_static/images/en-us_image_0000001475213609.png +.. figure:: /_static/images/en-us_image_0000001581027344.png :alt: **Figure 1** Adding RDS for MySQL instances with shards unchanged **Figure 1** Adding RDS for MySQL instances with shards unchanged -2. Increase shards with data nodes unchanged. +**Method 2: Add shards with data nodes unchanged** -This method increases the shards, but not data nodes. It changes total shards, total table shards, and table sharding rules. Data is redistributed to different shards, and broadcast tables are increased. +This method adds shards, but not data nodes. It changes total shards, total table shards, and table sharding rules. Data is redistributed to all shards. Old tables in original shards will be deleted, and broadcast tables are increased. This method is suitable if the associated RDS for MySQL instance has sufficient storage space but one of its tables contains a large amount of data, with query performance limited. -.. figure:: /_static/images/en-us_image_0000001425254296.png - :alt: **Figure 2** Increasing shards with RDS for MySQL instances unchanged +.. figure:: /_static/images/en-us_image_0000001580548376.png + :alt: **Figure 2** Adding shards with RDS for MySQL instances unchanged - **Figure 2** Increasing shards with RDS for MySQL instances unchanged + **Figure 2** Adding shards with RDS for MySQL instances unchanged -3. Increase both shards and data nodes. +**Method 3: Add both shards and data nodes** -This method increases both shards and data nodes. It changes total shards, total table shards, and table sharding rules. Data is redistributed to different shards, and broadcast tables are increased. +This method increases both shards and data nodes. It changes total shards, total table shards, and table sharding rules. Data is redistributed to different shards. Old tables on the original shards will be deleted, and broadcast tables are increased. This method is suitable if RDS for MySQL instances cannot meet storage space and read/write requirements and there is a physical table containing a large amount of data with query performance limited. -.. figure:: /_static/images/en-us_image_0000001475133049.png - :alt: **Figure 3** Increasing shards and RDS for MySQL instances +.. figure:: /_static/images/en-us_image_0000001631228369.png + :alt: **Figure 3** Adding shards and RDS for MySQL instances - **Figure 3** Increasing shards and RDS for MySQL instances + **Figure 3** Adding shards and RDS for MySQL instances diff --git a/umn/source/sql_syntax/ddl/creating_a_table.rst b/umn/source/sql_syntax/ddl/creating_a_table.rst index 8c59ee3..3d53aea 100644 --- a/umn/source/sql_syntax/ddl/creating_a_table.rst +++ b/umn/source/sql_syntax/ddl/creating_a_table.rst @@ -9,7 +9,7 @@ Creating a Table - Do not create tables whose names start with **\_ddm**. DDM manages such tables as internal tables by default - Sharded tables do not support globally unique indexes. If the unique key is different from the sharding key, data uniqueness cannot be ensured. - - The auto-increment key should be of the BIGINT data type. To avoid duplicate values, do not use TINYINT, SMALLINT, MEDIUMINT, INTEGER, or INT as the auto-increment key. + - The auto-increment key should be a BIGINT value. To avoid duplicate values, do not use TINYINT, SMALLINT, MEDIUMINT, INTEGER, or INT as the auto-increment key. Database and Table Sharding --------------------------- @@ -24,7 +24,7 @@ The following is an example statement when HASH is used for database sharding an PRIMARY KEY(id) ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci DBPARTITION BY HASH(id) - TBPARTITION BY mod_hash(name) tbpartitions 8; + TBPARTITION BY MOD_HASH(name) tbpartitions 8; Database Sharding ----------------- @@ -63,8 +63,8 @@ The following is an example statement: .. code-block:: - CREATE TABLE single( + CREATE TABLE single_tb1 ( id bigint NOT NULL AUTO_INCREMENT COMMENT 'Primary key id', name varchar(128), PRIMARY KEY(id) - ); + ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; diff --git a/umn/source/sql_syntax/ddl/overview.rst b/umn/source/sql_syntax/ddl/overview.rst index 7781909..3a9a8c5 100644 --- a/umn/source/sql_syntax/ddl/overview.rst +++ b/umn/source/sql_syntax/ddl/overview.rst @@ -12,36 +12,46 @@ DDL Statements that Can Be Executed on a MySQL Client - TRUNCATE - .. code-block:: + Example: + + .. code-block:: text - Example: TRUNCATE TABLE t1 - Deletes all data from table t1. - TRUNCATE TABLE is used to delete all data from a table and has the DROP permission. In logic, TRUNCATE TABLE is similar to the DELETE statement for deleting all data from a table. + + Deletes all data from table t1. + + TRUNCATE TABLE deletes all data from a table and has the DROP permission. In logic, TRUNCATE TABLE is similar to the DELETE statement for deleting all rows from a table. - ALTER TABLE - .. code-block:: + Example: + + .. code-block:: text - Example: ALTER TABLE t2 DROP COLUMN c, DROP COLUMN d; - Deletes columns c and d fom table t2. - ALTER can add or delete a column, create or drop an index, change the type of an existing column, rename columns or tables, or change the storage engine or comments of a table. + + Deletes columns c and d fom table t2. + + ALTER can add or delete a column, create or drop an index, change the type of an existing column, rename columns or tables, or change the storage engine or comments of a table. - DROP INDEX - .. code-block:: + Example: + + .. code-block:: text - Example: DROP INDEX `PRIMARY` ON t; - Deletes primary key from table t. - DROP INDEX can delete index index_name from table tbl_name. + + Deletes primary key from table t. - CREATE INDEX - .. code-block:: + Example: + + .. code-block:: text - Example: CREATE INDEX part_of_name ON customer (name(10)); - Creates an index using the first 10 characters in column name (assuming that there are non-binary character strings in column name). - CREATE INDEX can add an index to an existing table. + + Creates an index using the first 10 characters in column name (assuming that there are non-binary character strings in column name). + + CREATE INDEX can add an index to an existing table. diff --git a/umn/source/sql_syntax/ddl/sharding_algorithm_overview.rst b/umn/source/sql_syntax/ddl/sharding_algorithm_overview.rst index db5dbfc..ebb26c6 100644 --- a/umn/source/sql_syntax/ddl/sharding_algorithm_overview.rst +++ b/umn/source/sql_syntax/ddl/sharding_algorithm_overview.rst @@ -54,33 +54,37 @@ Different sharding algorithms support different data types. The following table .. table:: **Table 2** Supported data types +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ - | Sharding Algorithm | TINYINT | SMALLINT | MEDIUMINT | INTEGER | INT | BIGINT | CHAR | VARCHAR | DATE | DATETIME | TIMESTAMP | OTHERS | + | Sharding Algorithm | TINYINT | SMALLINT | MEDIUMINT | INTEGER | INT | BIGINT | CHAR | VARCHAR | DATE | DATETIME | TIMESTAMP | Others | +====================+=========+==========+===========+=========+=====+========+======+=========+======+==========+===========+========+ - | MOD_HASH | Y | Y | Y | Y | Y | Y | Y | Y | ╳ | ╳ | ╳ | ╳ | + | MOD_HASH | Y | Y | Y | Y | Y | Y | Y | Y | N | N | N | N | +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ - | MOD_HASH_CI | Y | Y | Y | Y | Y | Y | Y | Y | ╳ | ╳ | ╳ | ╳ | + | MOD_HASH_CI | Y | Y | Y | Y | Y | Y | Y | Y | N | N | N | N | +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ - | HASH | Y | Y | Y | Y | Y | Y | Y | Y | ╳ | ╳ | ╳ | ╳ | + | HASH | Y | Y | Y | Y | Y | Y | Y | Y | N | N | N | N | +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ - | RANGE | Y | Y | Y | Y | Y | Y | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | + | RANGE | Y | Y | Y | Y | Y | Y | N | N | N | N | N | N | +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ - | RIGHT_SHIFT | Y | Y | Y | Y | Y | Y | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | + | RIGHT_SHIFT | Y | Y | Y | Y | Y | Y | N | N | N | N | N | N | +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ - | YYYYMM | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | Y | Y | Y | ╳ | + | YYYYMM | N | N | N | N | N | N | N | N | Y | Y | Y | N | +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ - | YYYYDD | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | Y | Y | Y | ╳ | + | YYYYDD | N | N | N | N | N | N | N | N | Y | Y | Y | N | +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ - | YYYYWEEK | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | Y | Y | Y | ╳ | + | YYYYWEEK | N | N | N | N | N | N | N | N | Y | Y | Y | N | +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ - | MM | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | Y | Y | Y | ╳ | + | MM | N | N | N | N | N | N | N | N | Y | Y | Y | N | +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ - | DD | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | Y | Y | Y | ╳ | + | DD | N | N | N | N | N | N | N | N | Y | Y | Y | N | +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ - | MMDD | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | Y | Y | Y | ╳ | + | MMDD | N | N | N | N | N | N | N | N | Y | Y | Y | N | +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ - | WEEK | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | ╳ | Y | Y | Y | ╳ | + | WEEK | N | N | N | N | N | N | N | N | Y | Y | Y | N | +--------------------+---------+----------+-----------+---------+-----+--------+------+---------+------+----------+-----------+--------+ +.. note:: + + **Y** indicates that the data type is supported, and **N** indicates that the data type is not supported. + Table Creation Syntax of Sharding Algorithms -------------------------------------------- diff --git a/umn/source/sql_syntax/ddl/sharding_algorithms/dd.rst b/umn/source/sql_syntax/ddl/sharding_algorithms/dd.rst index 63c2dfa..e1710e1 100644 --- a/umn/source/sql_syntax/ddl/sharding_algorithms/dd.rst +++ b/umn/source/sql_syntax/ddl/sharding_algorithms/dd.rst @@ -14,7 +14,7 @@ Instructions ------------ - The sharding key must be DATE, DATETIME, or TIMESTAMP. -- This algorithm can be used only for table sharding, instead of database sharding. +- This algorithm can be used only for table sharding. It cannot be used for database sharding. Data Routing ------------ diff --git a/umn/source/sql_syntax/ddl/sharding_algorithms/hash.rst b/umn/source/sql_syntax/ddl/sharding_algorithms/hash.rst index 14c1ff3..655fb34 100644 --- a/umn/source/sql_syntax/ddl/sharding_algorithms/hash.rst +++ b/umn/source/sql_syntax/ddl/sharding_algorithms/hash.rst @@ -8,7 +8,7 @@ HASH Application Scenarios --------------------- -This algorithm features even distribution of data or sharding tables by year, month, week, day, or a combination of them. Arithmetic operators such as equality (=) and IN operators are often used in SQL queries. +This algorithm features even distribution of data and sharding tables. Arithmetic operators such as equality (=) and IN operators are often used in SQL queries. Instructions ------------ @@ -20,7 +20,7 @@ Data Routing Determine the range of each database or table shard using 102400. -For example, if there are 8 shards in each schema, use formula 102400/8=12800 to calculate the range of each shard as follows: 0=0-12799, 1=12800-25599, 2=25600-38399, 3=38400-51199, 4=51200-63999, 5=64000-76799, 6=76800-89599, and 7=89600-102399 +For example, if there are 8 shards in each schema, use formula 102400/8 = 12800 to calculate the range of each shard as follows: 0=[0,12799], 1=[12800,25599], 2=[25600,38399], 3=[38400,51199], 4=[51200,63999], 5=[64000,76799], 6=[76800,89599], and 7=[89600,102399]. To determine the route, calculate CRC32 value based on the sharding key value and divide the CRC value by 102400. Then check which range the remainder belongs to. @@ -52,7 +52,7 @@ Calculation Method +---------------+--------------------------------------------------------+------------------------------+ | weekofyear() | weekofyear(yyyy-MM-dd)=Week number of the current year | weekofyear ('2019-10-11')=41 | +---------------+--------------------------------------------------------+------------------------------+ - | day() | day(yyyy-MM-dd)=Day number of the current year | day ('2019-10-11')=11 | + | day() | day(yyyy-MM-dd)=dd | day ('2019-10-11')=11 | +---------------+--------------------------------------------------------+------------------------------+ .. table:: **Table 3** Required calculation methods when the sharding key is the DATE type @@ -68,29 +68,29 @@ Calculation Method Syntax for Creating Tables -------------------------- -**Assume that you use field** **ID** **as the sharding key and the HASH algorithm to shard databases:** +- Assume that you use field ID as the sharding key and the HASH algorithm to shard databases: -.. code-block:: + .. code-block:: - create table hash_tb ( - id int, - name varchar(30) DEFAULT NULL, - create_time datetime DEFAULT NULL, - primary key(id) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8 dbpartition by hash (ID); + create table hash_tb ( + id int, + name varchar(30) DEFAULT NULL, + create_time datetime DEFAULT NULL, + primary key(id) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8 dbpartition by hash (ID); -**Assume that you use field** **ID** **as the sharding key and the hash algorithm to shard databases and tables:** +- Assume that you use field ID as the sharding key and the hash algorithm to shard databases and tables: -.. code-block:: + .. code-block:: - create table mod_hash_tb ( - id int, - name varchar(30) DEFAULT NULL, - create_time datetime DEFAULT NULL, - primary key(id) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8 - dbpartition by hash (ID) - tbpartition by hash (ID) tbpartitions 4; + create table mod_hash_tb ( + id int, + name varchar(30) DEFAULT NULL, + create_time datetime DEFAULT NULL, + primary key(id) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8 + dbpartition by hash (ID) + tbpartition by hash (ID) tbpartitions 4; Precautions ----------- diff --git a/umn/source/sql_syntax/ddl/sharding_algorithms/mm.rst b/umn/source/sql_syntax/ddl/sharding_algorithms/mm.rst index 616a0da..25b9477 100644 --- a/umn/source/sql_syntax/ddl/sharding_algorithms/mm.rst +++ b/umn/source/sql_syntax/ddl/sharding_algorithms/mm.rst @@ -14,7 +14,7 @@ Instructions ------------ - The sharding key must be DATE, DATETIME, or TIMESTAMP. -- This algorithm can be used only for table sharding, instead of database sharding. +- This algorithm can be used only for table sharding. It cannot be used for database sharding. Data Routing ------------ diff --git a/umn/source/sql_syntax/ddl/sharding_algorithms/mmdd.rst b/umn/source/sql_syntax/ddl/sharding_algorithms/mmdd.rst index 2c544b7..1c65c4c 100644 --- a/umn/source/sql_syntax/ddl/sharding_algorithms/mmdd.rst +++ b/umn/source/sql_syntax/ddl/sharding_algorithms/mmdd.rst @@ -8,13 +8,13 @@ MMDD Application Scenarios --------------------- -This algorithm applies when you want to shard data by day in a year. One table shard for one day is recommended. +This algorithm applies when you want to shard data by day in a year. One table shard for one day (at most 366 days in a year) is recommended. Instructions ------------ - The sharding key must be DATE, DATETIME, or TIMESTAMP. -- This algorithm can be used only for table sharding, instead of database sharding. +- This algorithm can be used only for table sharding. It cannot be used for database sharding. Data Routing ------------ diff --git a/umn/source/sql_syntax/ddl/sharding_algorithms/mod_hash.rst b/umn/source/sql_syntax/ddl/sharding_algorithms/mod_hash.rst index 8ea97cf..6aebb7f 100644 --- a/umn/source/sql_syntax/ddl/sharding_algorithms/mod_hash.rst +++ b/umn/source/sql_syntax/ddl/sharding_algorithms/mod_hash.rst @@ -20,7 +20,7 @@ Data Routing The data route depends on the remainder of the sharding key value divided by database or table shards. If the value is a string, convert the string into a hashed value and calculate the data route based on the value. -For example, MOD_HASH('8') is equivalent to 8 % D. D is the number of database or table shards. +For example, if the sharding key value is **8**, MOD_HASH('8') is equivalent to 8 % D. D is the number of database or table shards. Calculation Method ------------------ @@ -38,7 +38,11 @@ Calculation Method +--------------------------------------------+------------------------------------------------------------------------------+--------------------------------+ | Database sharding key = Table sharding key | Table routing result = Sharding key value % (Database shards x Table shards) | Table shard: 16 % (8 x 3) = 16 | | | | | - | | Database routing result = Table routing result / Table shards | Database shard: 16 / 3 = 5 | + | | Database routing result = Table routing result/Table shards | Database shard: 16/3 = 5 | + | | | | + | | .. note:: | | + | | | | + | | Database routing result = Table routing result/Table shards | | +--------------------------------------------+------------------------------------------------------------------------------+--------------------------------+ **Method 2: Use a String as the Sharding Key** @@ -50,15 +54,17 @@ Calculation Method +============================================+====================================================================================+======================================+ | Database sharding key ≠ Table sharding key | Database routing result = hash(Database sharding key value) % Database shards | hash('abc') = 'abc'.hashCode()=96354 | | | | | - | | Table routing result = hash(Table sharding key value % Table shards | Database shard: 96354 % 8 = 2; | + | | Table routing result = hash(Table sharding key value) % Table shards | Database shard: 96354 % 8 = 2; | | | | | | | | Table shard: 96354 % 3 = 0; | +--------------------------------------------+------------------------------------------------------------------------------------+--------------------------------------+ | Database sharding key = Table sharding key | Table routing result = hash(Sharding key value) % (Database shards x Table shards) | hash('abc') = 'abc'.hashCode()=96354 | | | | | - | | Database routing result = Table routing result / Table shards | Table shard: 96354 % (8 x 3) = 18 | + | | Database routing result = Table routing result/Table shards | Table shard: 96354 % (8 x 3) = 18 | | | | | - | | | Database shard: 18 / 3=6 | + | | .. note:: | Database shard: 18/3 = 6 | + | | | | + | | Database routing result is rounded off to the nearest integer. | | +--------------------------------------------+------------------------------------------------------------------------------------+--------------------------------------+ Syntax for Creating Tables @@ -90,4 +96,4 @@ Syntax for Creating Tables Precautions ----------- -- The MOD_HASH algorithm is a simple way to find the remainder of the sharding key value divided by shards. This algorithm features even distribution of sharding key values to ensure even results. +The MOD_HASH algorithm is a simple way to find the remainder of the sharding key value divided by shards. This algorithm features even distribution of sharding key values to ensure even results. diff --git a/umn/source/sql_syntax/ddl/sharding_algorithms/mod_hash_ci.rst b/umn/source/sql_syntax/ddl/sharding_algorithms/mod_hash_ci.rst index d975fe1..f0f0ed3 100644 --- a/umn/source/sql_syntax/ddl/sharding_algorithms/mod_hash_ci.rst +++ b/umn/source/sql_syntax/ddl/sharding_algorithms/mod_hash_ci.rst @@ -36,7 +36,11 @@ Calculation Method +--------------------------------------------+------------------------------------------------------------------------------+--------------------------------+ | Database sharding key = Table sharding key | Table routing result = Sharding key value % (Database shards x Table shards) | Table shard: 16 % (8 x 3) = 16 | | | | | - | | Database routing result = Table routing result / Table shards | Database shard: 16 / 3 = 5 | + | | Database routing result = Table routing result/Table shards | Database shard: 16/3 = 5 | + | | | | + | | .. note:: | | + | | | | + | | Database routing result is rounded off to the nearest integer. | | +--------------------------------------------+------------------------------------------------------------------------------+--------------------------------+ **Method 2: Use a String as the Sharding Key** @@ -48,15 +52,17 @@ Calculation Method +============================================+====================================================================================+====================================================+ | Database sharding key ≠ Table sharding key | Database routing result = hash(Database sharding key value) % Database shards | hash('abc') = 'abc'.toUpperCase().hashCode()=64578 | | | | | - | | Table routing result = hash(Table sharding key value % Table shards | Database shard: 64578 % 8 = 2; | + | | Table routing result = hash(Table sharding key value) % Table shards | Database shard: 64578 % 8 = 2; | | | | | | | | Table shard: 64578 % 3 = 0; | +--------------------------------------------+------------------------------------------------------------------------------------+----------------------------------------------------+ | Database sharding key = Table sharding key | Table routing result = hash(Sharding key value) % (Database shards x Table shards) | hash('abc') = 'abc'.toUpperCase().hashCode()=64578 | | | | | - | | Database routing result = Table routing result / Table shards | Table shard: 64578% (8 x 3) = 18 | + | | Database routing result = Table routing result/Table shards | Table shard: 64578 % (8 x 3) = 18 | | | | | - | | | Database shard: 18 / 3 = 6 | + | | .. note:: | Database shard: 18/3 = 6 | + | | | | + | | Database routing result is rounded off to the nearest integer. | | +--------------------------------------------+------------------------------------------------------------------------------------+----------------------------------------------------+ Syntax for Creating Tables @@ -71,7 +77,7 @@ Syntax for Creating Tables name varchar(30) DEFAULT NULL, create_time datetime DEFAULT NULL, primary key(id) - ) ENGINE = InnoDB DEFAULT CHARSET = utf8 dbpartition by mod_hash_ci(ID); + ) ENGINE = InnoDB DEFAULT CHARSET = utf8 dbpartition by mod_hash_ci(id); - Assume that you use field **ID** as the sharding key to shard databases and tables based on MOD_HASH_CI: @@ -83,10 +89,10 @@ Syntax for Creating Tables create_time datetime DEFAULT NULL, primary key(id) ) ENGINE = InnoDB DEFAULT CHARSET = utf8 - dbpartition by mod_hash_ci(ID) - tbpartition by mod_hash_ci(ID) tbpartitions 4; + dbpartition by mod_hash_ci(id) + tbpartition by mod_hash_ci(id) tbpartitions 4; Precautions ----------- -- The MOD_HASH_CI algorithm is a simple way to find the remainder of the sharding key value divided by shards. This algorithm features even distribution of sharding key values to ensure even results. +The MOD_HASH_CI algorithm is a simple way to find the remainder of the sharding key value divided by shards. This algorithm features even distribution of sharding key values to ensure even results. diff --git a/umn/source/sql_syntax/ddl/sharding_algorithms/range.rst b/umn/source/sql_syntax/ddl/sharding_algorithms/range.rst index 35b1555..06d51fc 100644 --- a/umn/source/sql_syntax/ddl/sharding_algorithms/range.rst +++ b/umn/source/sql_syntax/ddl/sharding_algorithms/range.rst @@ -20,7 +20,7 @@ Data Routing Data is routed to different shards by the sharding key value based on algorithm metadata rules. -Metadata needs to be set when a table is created. For example, if there are eight shards in one schema, the metadata range can be 1-2=0, 3-4=1, 5-6=2, 7-8=3, 9-10=4, 11-12=5, 13-14=6, and default=7. Data is routed to shards by the sharding key value based on the range. +Metadata needs to be set when a table is created. For example, if there are eight shards in one schema, the metadata range can be [1-2]=0, [3-4]=1, [5-6]=2, [7-8]=3, [9-10]=4, [11-12]=5, [13-14]=6, and default=7. Data is routed to shards by the sharding key value based on the range. Calculation Method ------------------ @@ -29,35 +29,39 @@ Calculation Method .. table:: **Table 1** Required calculation methods when the sharding key is the integer data type - +-----------------------+----------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------+ - | Condition | Calculation Method | Example | - +=======================+======================================================================================================================+===============================================================================================+ - | Integer sharding keys | Database routing result: Data is routed to different shards based on the sharding key and the preset metadata range. | Data is routed to shard1 if the sharding key value is 3 and the preset metadata range is 3-4. | - +-----------------------+----------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------+ + +-----------------------+----------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+ + | Condition | Calculation Method | Example | + +=======================+======================================================================================================================+=================================================================================================+ + | Integer sharding keys | Database routing result: Data is routed to different shards based on the sharding key and the preset metadata range. | Data is routed to shard1 if the sharding key value is 3 and the preset metadata range is [3-4]. | + +-----------------------+----------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+ **Method 2: Use a Date as the Sharding Key** .. table:: **Table 2** Supported date functions - +---------------+--------------------------------------------------------+------------------------------+ - | Date Function | Calculation Method | Example | - +===============+========================================================+==============================+ - | year() | year(yyyy-MM-dd)=yyyy | year('2019-10-11')=2019 | - +---------------+--------------------------------------------------------+------------------------------+ - | month() | month(yyyy-MM-dd)=MM | month('2019-10-11')=10 | - +---------------+--------------------------------------------------------+------------------------------+ - | weekofyear() | weekofyear(yyyy-MM-dd)=Week number of the current year | weekofyear ('2019-10-11')=41 | - +---------------+--------------------------------------------------------+------------------------------+ - | day() | day(yyyy-MM-dd)=Day number of the current month | day ('2019-10-11')=11 | - +---------------+--------------------------------------------------------+------------------------------+ + +-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------+ + | Date Function | Calculation Method | Example | + +=======================+================================================================================================================================================================================================================================================+==============================+ + | year() | year(yyyy-MM-dd)=yyyy | year('2019-10-11')=2019 | + +-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------+ + | month() | month(yyyy-MM-dd)=MM | month('2019-10-11')=10 | + +-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------+ + | weekofyear() | weekofyear(yyyy-MM-dd)=Week number of the current year | weekofyear ('2019-10-11')=41 | + | | | | + | | .. note:: | | + | | | | + | | The Weekofyear() function is used to return the week number of a specific date represented by the date parameter in a year. For details, see `WEEK `__. | | + +-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------+ + | day() | day(yyyy-MM-dd)=dd | day ('2019-10-11')=11 | + +-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------+ .. table:: **Table 3** Calculation methods - +-------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ - | Condition | Calculation Method | Example | - +===================+=======================================================================================================================================================+=======================================================================================================================================+ - | Date sharding key | Database routing: Data is routed to different database shards based on the date function (database sharding key value) and the preset metadata range. | Data is routed to shard 4 based on the metadata range 9-10 when the sharding key value is 10: month(2019-10-11)=10 belongs to 9-10=4. | - +-------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ + +-------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ + | Condition | Calculation Method | Example | + +===================+=======================================================================================================================================================+=========================================================================================================================================+ + | Date sharding key | Database routing: Data is routed to different database shards based on the date function (database sharding key value) and the preset metadata range. | Data is routed to shard 4 based on the metadata range 9-10 when the sharding key value is 10: month(2019-10-11)=10 belongs to [9-10]=4. | + +-------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ Syntax for Creating Tables -------------------------- diff --git a/umn/source/sql_syntax/ddl/sharding_algorithms/week.rst b/umn/source/sql_syntax/ddl/sharding_algorithms/week.rst index 5aaea4a..88223c3 100644 --- a/umn/source/sql_syntax/ddl/sharding_algorithms/week.rst +++ b/umn/source/sql_syntax/ddl/sharding_algorithms/week.rst @@ -8,13 +8,13 @@ WEEK Application Scenarios --------------------- -This algorithm applies when you want to shard data by day in a week. One table shard for one day is recommended. +This algorithm applies when you want to shard data by day in a week. One table shard for one weekday is recommended. Instructions ------------ - The sharding key must be DATE, DATETIME, or TIMESTAMP. -- This algorithm can be used only for table sharding, instead of database sharding. +- This algorithm can be used only for table sharding. It cannot be used for database sharding. Data Routing ------------ diff --git a/umn/source/sql_syntax/ddl/sharding_algorithms/yyyydd.rst b/umn/source/sql_syntax/ddl/sharding_algorithms/yyyydd.rst index 7c76ab4..660d3be 100644 --- a/umn/source/sql_syntax/ddl/sharding_algorithms/yyyydd.rst +++ b/umn/source/sql_syntax/ddl/sharding_algorithms/yyyydd.rst @@ -24,7 +24,7 @@ For example, YYYYDD('2012-12-31 12:12:12') is equivalent to (2012 x 366 + 366) % .. note:: - "2012-12-31" is the 366th day of 2012, so the calculation is "2012 x 366". + 2012-12-31 is the 366th day of 2012, so the calculation is 2012 x 366 + 366. Calculation Method ------------------ @@ -44,7 +44,11 @@ Calculation Method | | | | | | Table routing result = (yyyy x 366 + Day of the current year) % (Database shards x Table shards) | Database shard: (2012 x 366 + 366) % (8 x 3) = 6 | | | | | - | | Database routing result = Table routing result / Table shards | Database shard: 6 / 3 = 2 | + | | Database routing result = Table routing result/Table shards | Database shard: 6/3 = 2 | + | | | | + | | .. note:: | | + | | | | + | | Database routing result is rounded off to the nearest integer. | | +--------------------------------------------+--------------------------------------------------------------------------------------------------+--------------------------------------------------+ Syntax for Creating Tables @@ -52,7 +56,7 @@ Syntax for Creating Tables Assume that there are already 8 physical databases in your database instance. Now you want to shard data by year and day and require that data of the same day be stored in one table and each day within two years should correspond to an independent table, so that you can query data from a physical table in a physical database by the sharding key. -In this scenario, you can select the YYYYDD algorithm. Then create at least 732 physical tables for 732 days of the two years (366 days for one year), each day corresponding to one table. Since you already have 8 physical databases, 92 (732 / 8 = 91.5, rounded up to 92) physical tables should be created in each of them. The number of tables should be an integral multiple of databases. The following is an example SQL statement for creating a table: +In this scenario, you can select the YYYYDD algorithm. Then create at least 732 physical tables for 732 days of the two years (366 days for one year), each day corresponding to one table. Since you already have 8 physical databases, 92 (732/8 = 91.5, rounded up to 92) physical tables should be created in each of them. The number of tables should be an integral multiple of databases. The following is an example SQL statement for creating a table: .. code-block:: @@ -77,8 +81,6 @@ Syntax for creating tables when only database sharding is required: ) ENGINE = InnoDB DEFAULT CHARSET = utf8 dbpartition by YYYYDD(create_time); -.. code-block:: text - Precautions ----------- diff --git a/umn/source/sql_syntax/ddl/sharding_algorithms/yyyymm.rst b/umn/source/sql_syntax/ddl/sharding_algorithms/yyyymm.rst index c941ea6..63988e9 100644 --- a/umn/source/sql_syntax/ddl/sharding_algorithms/yyyymm.rst +++ b/umn/source/sql_syntax/ddl/sharding_algorithms/yyyymm.rst @@ -40,7 +40,11 @@ Calculation Method | | | | | | Table routing result = (yyyy x 12 + MM) % (Database shards x Table shards) | Table shard: (2012 x 12 + 11) % (8 x 3) = 11 | | | | | - | | Database routing result = Table routing result / Table shards | Database shard: 11 % 3 = 3 | + | | Database routing result = Table routing result/Table shards | Database shard: 11 % 3 = 3 | + | | | | + | | .. note:: | | + | | | | + | | Database routing result is rounded off to the nearest integer. | | +--------------------------------------------+----------------------------------------------------------------------------+----------------------------------------------+ Syntax for Creating Tables diff --git a/umn/source/sql_syntax/ddl/sharding_algorithms/yyyyweek.rst b/umn/source/sql_syntax/ddl/sharding_algorithms/yyyyweek.rst index ca4c19c..e525ef9 100644 --- a/umn/source/sql_syntax/ddl/sharding_algorithms/yyyyweek.rst +++ b/umn/source/sql_syntax/ddl/sharding_algorithms/yyyyweek.rst @@ -24,7 +24,8 @@ For example, YYYYWEEK('2012-12-31 12:12:12') is equivalent to (2013 x 54 + 1) % .. note:: - 2012-12-31 is the first week of 2013, so the calculation is 2013 x 54 + 1. + - 2012-12-31 is the first week of 2013, so the calculation is 2013 x 54 + 1. + - For details on how to use YYYYWEEK, see `YEARWEEK Function `__. Calculation Method ------------------ @@ -44,7 +45,11 @@ Calculation Method | | | | | | Table routing result = (yyyy x54 + Week of the current year) % (Database shards x Table shards) | Database shard: (2013 x 54 + 1) % (8 x 3) = 7 | | | | | - | | Database routing result = Table routing result / Table shards | Database shard: 7 / 3 = 2 | + | | Database routing result = Table routing result/Table shards | Database shard: 7/3 = 2 | + | | | | + | | .. note:: | | + | | | | + | | Database routing result is rounded off to the nearest integer. | | +--------------------------------------------+-------------------------------------------------------------------------------------------------+-----------------------------------------------+ Syntax for Creating Tables diff --git a/umn/source/sql_syntax/dml/delete.rst b/umn/source/sql_syntax/dml/delete.rst index 9436eed..4cd086b 100644 --- a/umn/source/sql_syntax/dml/delete.rst +++ b/umn/source/sql_syntax/dml/delete.rst @@ -20,3 +20,4 @@ Syntax Restrictions - The WHERE clause does not support subqueries, including correlated and non-correlated subqueries. - Data in reference tables cannot be deleted when multiple tables are deleted at a time. +- PARTITION clauses are not supported. diff --git a/umn/source/sql_syntax/dml/index.rst b/umn/source/sql_syntax/dml/index.rst index f6f21a1..50bec52 100644 --- a/umn/source/sql_syntax/dml/index.rst +++ b/umn/source/sql_syntax/dml/index.rst @@ -13,7 +13,6 @@ DML - :ref:`SELECT JOIN Syntax ` - :ref:`SELECT UNION Syntax ` - :ref:`SELECT Subquery Syntax ` -- :ref:`Unsupported DML Statements ` - :ref:`Supported System Schema Queries ` .. toctree:: @@ -28,5 +27,4 @@ DML select_join_syntax select_union_syntax select_subquery_syntax - unsupported_dml_statements supported_system_schema_queries diff --git a/umn/source/sql_syntax/dml/select.rst b/umn/source/sql_syntax/dml/select.rst index 004a1fa..3e694cc 100644 --- a/umn/source/sql_syntax/dml/select.rst +++ b/umn/source/sql_syntax/dml/select.rst @@ -53,3 +53,5 @@ Syntax Description - Each SELECT statement in UNION does not support multiple columns with the same name, for example, SELECT id, id, name FROM t1 UNION SELECT pk, pk, name FROM t2 is not supported because this statement has duplicate column names. + +- User-defined sequencing similar to **ORDER BY FIELD(id,1,2,3)** is not supported. diff --git a/umn/source/sql_syntax/dml/supported_system_schema_queries.rst b/umn/source/sql_syntax/dml/supported_system_schema_queries.rst index 1da5234..e3365ec 100644 --- a/umn/source/sql_syntax/dml/supported_system_schema_queries.rst +++ b/umn/source/sql_syntax/dml/supported_system_schema_queries.rst @@ -5,7 +5,7 @@ Supported System Schema Queries =============================== -.. table:: **Table 1** Supported System Schema Queries +.. table:: **Table 1** Supported system schema queries +-----------------------------------+-----------------------------------------------------------------------------------------------------------------+ | DML Syntax | Restriction | diff --git a/umn/source/sql_syntax/dml/unsupported_dml_statements.rst b/umn/source/sql_syntax/dml/unsupported_dml_statements.rst deleted file mode 100644 index 832f410..0000000 --- a/umn/source/sql_syntax/dml/unsupported_dml_statements.rst +++ /dev/null @@ -1,22 +0,0 @@ -:original_name: ddm_12_0008.html - -.. _ddm_12_0008: - -Unsupported DML Statements -========================== - - -Unsupported DML Statements --------------------------- - -.. table:: **Table 1** Syntax restrictions on DML - - +------------------+-----------------------------------------------------------------------------------+ - | DML Syntax | Restriction | - +==================+===================================================================================+ - | DELETE statement | PARTITION clauses are not supported. | - +------------------+-----------------------------------------------------------------------------------+ - | UPDATE statement | Cross-shard subquery is not supported. | - +------------------+-----------------------------------------------------------------------------------+ - | SELECT statement | User-defined sequencing similar to **ORDER BY FIELD(id,1,2,3)** is not supported. | - +------------------+-----------------------------------------------------------------------------------+ diff --git a/umn/source/sql_syntax/dml/update.rst b/umn/source/sql_syntax/dml/update.rst index 5683a49..c3daebb 100644 --- a/umn/source/sql_syntax/dml/update.rst +++ b/umn/source/sql_syntax/dml/update.rst @@ -19,9 +19,11 @@ Syntax Restrictions - Subqueries are not supported, including correlated and non-correlated subqueries. +- Cross-shard subquery is not supported. + - The WHERE condition in the UPDATE statement does not support arithmetic expressions and their subqueries. -- Modifying reference tables is not supported during an update of multiple tables. +- Modifying broadcast tables is not supported during an update of multiple tables. (Data in columns of a broadcast table cannot be on the left of SET assignment statements). - Updating the sharding key field of a logical table is not supported because this operation may cause data redistribution. diff --git a/umn/source/sql_syntax/functions.rst b/umn/source/sql_syntax/functions.rst index b5c571a..aa9fcdc 100644 --- a/umn/source/sql_syntax/functions.rst +++ b/umn/source/sql_syntax/functions.rst @@ -10,96 +10,91 @@ Supported Functions .. table:: **Table 1** Operator functions - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Expression | Example | - +===================================+=======================================================================================================================================================================================================================================================================================+ - | IN | SELECT \* FROM Products WHERE vendor_id IN ( 'V000001', 'V000010' ) ORDER BY product_price | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | NOT IN | SELECT product_id, product_name FROM Products WHERE NOT vendor_id IN ('V000001', 'V000002') ORDER BY product_id | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | BETWEEN | SELECT id, product_id, product_name, product_price FROM Products WHERE id BETWEEN 000005 AND 000034 ORDER BY id | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | NOT...BETWEEN | SELECT product_id, product_name FROM Products WHERE NOT vendor_id BETWEEN 'V000002' and 'V000005' ORDER BY product_id | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | IS NULL | SELECT product_name FROM Products WHERE product_price IS NULL | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | IS NOT NULL | SELECT id, product_name FROM Products WHERE product_price IS NOT NULL ORDER BY id | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | AND | SELECT \* FROM Products WHERE vendor_id = 'V000001' AND product_price <= 4000 ORDER BY product_price | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | OR | SELECT \* FROM Products WHERE vendor_id = 'V000001' OR vendor_id = 'V000009' | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | NOT | SELECT product_id, product_name FROM Products WHERE NOT vendor_id = 'V000002' | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | LIKE | SELECT \* FROM Products WHERE product_name LIKE 'NAME%' ORDER BY product_name | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | NOT LIKE | SELECT \* FROM Products WHERE product_name NOT LIKE 'NAME%' ORDER BY product_name | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | CONCAT | SELECT product_id, product_name, Concat( product_id , '(', product_name ,')' ) AS product_test FROM Products ORDER BY product_id | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | + | SELECT 3 \* 2+5-100/50 | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | ``-`` | SELECT 3 \* 2+5-100/50 | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | \* | SELECT order_num, product_id, quantity, item_price, quantity*item_price AS expanded_price FROM OrderItems WHERE order_num BETWEEN 000009 AND 000028 ORDER BY order_num | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | / | SELECT 3 \* 2+5-100/50 | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | UPPER | SELECT id, product_id, UPPER(product_name) FROM Products WHERE id > 10 ORDER BY product_id | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | LOWER | SELECT id, product_id, LOWER(product_name) FROM Products WHERE id <= 10 ORDER BY product_id | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | SOUNDEX | SELECT \* FROM Vendors WHERE SOUNDEX(vendor_name) = SOUNDEX('test') ORDER BY vendor_name | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | IFNULL | SELECT IFNULL(product_id, 0) FROM Products; | - | | | - | | .. note:: | - | | | - | | - For DDM instances created before March 20, sharded tables do not support the calling of functions nested in the IFNULL and aggregation functions. For example, if you execute function **select IFNULL(sum(yan),0) from shenhai**, the result differs from the expected result. | - | | - For DDM instances created after March 20, sharded tables support only the calling of functions nested in the IFNULL and aggregation functions. | - +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Expression | Example | + +===============+========================================================================================================================================================================+ + | IN | SELECT \* FROM Products WHERE vendor_id IN ( 'V000001', 'V000010' ) ORDER BY product_price | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | NOT IN | SELECT product_id, product_name FROM Products WHERE vendor_id NOT IN ('V000001', 'V000002') ORDER BY product_id | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | BETWEEN | SELECT id, product_id, product_name, product_price FROM Products WHERE id BETWEEN 000005 AND 000034 ORDER BY id | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | NOT...BETWEEN | SELECT product_id, product_name FROM Products WHERE NOT vendor_id BETWEEN 'V000002' and 'V000005' ORDER BY product_id | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | IS NULL | SELECT product_name FROM Products WHERE product_price IS NULL | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | IS NOT NULL | SELECT id, product_name FROM Products WHERE product_price IS NOT NULL ORDER BY id | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | AND | SELECT \* FROM Products WHERE vendor_id = 'V000001' AND product_price <= 4000 ORDER BY product_price | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | OR | SELECT \* FROM Products WHERE vendor_id = 'V000001' OR vendor_id = 'V000009' | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | NOT | SELECT product_id, product_name FROM Products WHERE NOT vendor_id = 'V000002' | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | LIKE | SELECT \* FROM Products WHERE product_name LIKE 'NAME%' ORDER BY product_name | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | NOT LIKE | SELECT \* FROM Products WHERE product_name NOT LIKE 'NAME%' ORDER BY product_name | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | CONCAT | SELECT product_id, product_name, CONCAT( product_id , '(', product_name ,')' ) AS product_test FROM Products ORDER BY product_id | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | + | SELECT 3 \* 2+5-100/50 | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | ``-`` | SELECT 3 \* 2+5-100/50 | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | \* | SELECT order_num, product_id, quantity, item_price, quantity*item_price AS expanded_price FROM OrderItems WHERE order_num BETWEEN 000009 AND 000028 ORDER BY order_num | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | / | SELECT 3 \* 2+5-100/50 | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | UPPER | SELECT id, product_id, UPPER(product_name) FROM Products WHERE id > 10 ORDER BY product_id | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | LOWER | SELECT id, product_id, LOWER(product_name) FROM Products WHERE id <= 10 ORDER BY product_id | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | SOUNDEX | SELECT \* FROM Vendors WHERE SOUNDEX(vendor_name) = SOUNDEX('test') ORDER BY vendor_name | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | IFNULL | SELECT IFNULL(product_id, 0) FROM Products; | + +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ .. table:: **Table 2** Time and date functions - +-----------------------+------------------------------------------------------+-----------------------+ - | Expression | Example | Application Scope | - +=======================+======================================================+=======================+ - | DAY() | SELECT \* FROM TAB_DATE WHERE DAY(date)=21 | ``-`` | - | | | | - | | SELECT \* FROM TAB_DATE WHERE date='2018-12-21' | | - | | | | - | | INSERT INTO TAB_DATE(id,date) VALUES(1,'2018-05-22') | | - +-----------------------+------------------------------------------------------+-----------------------+ - | MONTH() | SELECT \* FROM TAB_DATE WHERE MONTH(date)=12 | ``-`` | - | | | | - | | SELECT \* FROM TAB_DATE WHERE date='2018-12-21' | | - | | | | - | | INSERT INTO TAB_DATE(id,date) VALUES(1,'2018-05-22') | | - +-----------------------+------------------------------------------------------+-----------------------+ - | YEAR() | SELECT \* FROM TAB_DATE WHERE YEAR(date)=2018 | ``-`` | - | | | | - | | SELECT \* FROM TAB_DATE WHERE date='2018-12-21' | | - | | | | - | | INSERT INTO TAB_DATE(id,date) VALUES(1,'2018-05-22') | | - +-----------------------+------------------------------------------------------+-----------------------+ + +-----------------------------------+------------------------------------------------------+ + | Expression | Example | + +===================================+======================================================+ + | DAY() | SELECT \* FROM TAB_DATE WHERE DAY(date)=21 | + | | | + | | SELECT \* FROM TAB_DATE WHERE date='2018-12-21' | + | | | + | | INSERT INTO TAB_DATE(id,date) VALUES(1,'2018-05-22') | + +-----------------------------------+------------------------------------------------------+ + | MONTH() | SELECT \* FROM TAB_DATE WHERE MONTH(date)=12 | + | | | + | | SELECT \* FROM TAB_DATE WHERE date='2018-12-21' | + | | | + | | INSERT INTO TAB_DATE(id,date) VALUES(1,'2018-05-22') | + +-----------------------------------+------------------------------------------------------+ + | YEAR() | SELECT \* FROM TAB_DATE WHERE YEAR(date)=2018 | + | | | + | | SELECT \* FROM TAB_DATE WHERE date='2018-12-21' | + | | | + | | INSERT INTO TAB_DATE(id,date) VALUES(1,'2018-05-22') | + +-----------------------------------+------------------------------------------------------+ .. table:: **Table 3** Mathematical functions - +------------+-----------------------------------------------------------------------------------------------------------------------------+-------------------+ - | Expression | Example | Application Scope | - +============+=============================================================================================================================+===================+ - | SQRT() | SELECT id, product_price, SQRT(product_price) AS price_sqrt FROM Products WHERE product_price < 4000 ORDER BY product_price | ``-`` | - +------------+-----------------------------------------------------------------------------------------------------------------------------+-------------------+ - | AVG() | SELECT AVG(product_price) AS avg_product FROM Products | ``-`` | - +------------+-----------------------------------------------------------------------------------------------------------------------------+-------------------+ - | COUNT() | SELECT COUNT(``*``) AS num_product FROM Products | ``-`` | - +------------+-----------------------------------------------------------------------------------------------------------------------------+-------------------+ - | MAX() | SELECT id, product_id, product_name, MAX(product_price) AS max_price FROM Products ORDER BY id | ``-`` | - +------------+-----------------------------------------------------------------------------------------------------------------------------+-------------------+ - | MIN() | SELECT id, product_id, product_name, MIN(product_price) AS min_price FROM Products ORDER BY id | ``-`` | - +------------+-----------------------------------------------------------------------------------------------------------------------------+-------------------+ - | SUM() | SELECT SUM(product_price) AS sum_product FROM Products | ``-`` | - +------------+-----------------------------------------------------------------------------------------------------------------------------+-------------------+ + +------------+-----------------------------------------------------------------------------------------------------------------------------+ + | Expression | Example | + +============+=============================================================================================================================+ + | SQRT() | SELECT id, product_price, SQRT(product_price) AS price_sqrt FROM Products WHERE product_price < 4000 ORDER BY product_price | + +------------+-----------------------------------------------------------------------------------------------------------------------------+ + | AVG() | SELECT AVG(product_price) AS avg_product FROM Products | + +------------+-----------------------------------------------------------------------------------------------------------------------------+ + | COUNT() | SELECT COUNT(``*``) AS num_product FROM Products | + +------------+-----------------------------------------------------------------------------------------------------------------------------+ + | MAX() | SELECT id, product_id, product_name, MAX(product_price) AS max_price FROM Products ORDER BY id | + +------------+-----------------------------------------------------------------------------------------------------------------------------+ + | MIN() | SELECT id, product_id, product_name, MIN(product_price) AS min_price FROM Products ORDER BY id | + +------------+-----------------------------------------------------------------------------------------------------------------------------+ + | SUM() | SELECT SUM(product_price) AS sum_product FROM Products | + +------------+-----------------------------------------------------------------------------------------------------------------------------+ Unsupported Functions --------------------- diff --git a/umn/source/sql_syntax/global_sequence/overview.rst b/umn/source/sql_syntax/global_sequence/overview.rst index 45e92f3..a45601c 100644 --- a/umn/source/sql_syntax/global_sequence/overview.rst +++ b/umn/source/sql_syntax/global_sequence/overview.rst @@ -29,13 +29,13 @@ Creating an Auto-Increment Sequence #. Run the following command to create an auto-increment sequence: - **create sequence** *xxxxx* ; + **create sequence** ; .. note:: - - *xxxxx* indicates the sequence name. + - ** indicates the sequence name. - The auto-increment key should be a BIGINT value. To avoid duplicate values, do not use TINYINT, SMALLINT, MEDIUMINT, INTEGER, or INT as the auto-increment key. - - Run **show sequences** to view the usage of the auto-increment sequence. If the usage reaches 100%, do not insert data any more and contact DDM technical support. + - Run **show sequences** to view the usage of the auto-increment sequence. If the usage reaches 100%, do not insert data anymore. Dropping an Auto-Increment Sequence ----------------------------------- @@ -48,9 +48,9 @@ Dropping an Auto-Increment Sequence #. Run the following command to drop an auto-increment sequence: - **drop sequence** *xxxxx* ; + **drop sequence** ; - **drop sequence** *DB.xxx*; + **drop sequence** *DB.*\ ; .. note:: @@ -68,12 +68,12 @@ Modifying the Start Value of an Auto-Increment Sequence #. Run the command to change the start value: - **alter sequence** *xxxxx* **START WITH** *yyyyy;* + **alter sequence** **START WITH** \ *;* .. note:: - - *xxxxx* indicates the sequence name. - - *yyyyy* indicates the start value of the target sequence. + - ** indicates the sequence name. + - ** indicates the start value of the target sequence. Querying an Auto-Increment Sequence ----------------------------------- diff --git a/umn/source/sql_syntax/global_sequence/using_global_sequences_in_insert_or_replace_statements.rst b/umn/source/sql_syntax/global_sequence/using_global_sequences_in_insert_or_replace_statements.rst index c99b0a0..8a9d21b 100644 --- a/umn/source/sql_syntax/global_sequence/using_global_sequences_in_insert_or_replace_statements.rst +++ b/umn/source/sql_syntax/global_sequence/using_global_sequences_in_insert_or_replace_statements.rst @@ -9,24 +9,21 @@ You can use global sequences in INSERT or REPLACE statements to provide unique g Concurrently executing schema.seq.nextval in multiple sessions is supported to obtain unique global sequence numbers. -Example -------- +Prerequisites +------------- -There are two schemas **dml_test_1** and **dml_test_2**, and both of them have table **test_seq**. +- There are two schemas **dml_test_1** and **dml_test_2**. -Table Definition ----------------- +- Both of them have table **test_seq**. -CREATE TABLE test_seq(col1 BIGINT,col2 BIGINT) DBPARTITION BY HASH(col1) + Run the following command to create a table: CREATE TABLE test_seq(col1 BIGINT,col2 BIGINT) DBPARTITION BY HASH(col1) -Procedure ---------- +How to Use Global Sequence +-------------------------- #. Log in to the required DDM instance using a client. -#. Open the required schema. - -#. Run the following command to create a global sequence for a schema: +#. Click the **dml_test_1** schema and run the following command to create a global sequence: **use dml_test_1**; @@ -36,13 +33,13 @@ Procedure #. Run the following statement to use the global sequence in an INSERT or REPLACE statement: - - **use dml_test_1**; - **insert into test_seq(col1,col2)values(seq_test.nextval,seq_test.currval)**; |image2| - - **use dml_test_2**; +#. Click the **dml_test_2** schema, run the following statement to use the global sequence in an INSERT or REPLACE statement: + + **use dml_test_2**; **insert into test_seq(col1,col2)values(dml_test_1.seq_test.nextval,dml_test_1.seq_test.currval)**; diff --git a/umn/source/sql_syntax/global_sequence/using_nextval_or_currval_to_query_global_sequence_numbers.rst b/umn/source/sql_syntax/global_sequence/using_nextval_or_currval_to_query_global_sequence_numbers.rst index 0fcbef1..e62208b 100644 --- a/umn/source/sql_syntax/global_sequence/using_nextval_or_currval_to_query_global_sequence_numbers.rst +++ b/umn/source/sql_syntax/global_sequence/using_nextval_or_currval_to_query_global_sequence_numbers.rst @@ -5,9 +5,9 @@ Using NEXTVAL or CURRVAL to Query Global Sequence Numbers ========================================================= -- NEXTVAL returns the next sequence number, and CURRVAL returns the current sequence number. NEXTVAL(N) returns *n* unique sequence numbers. -- NEXTVAL(N) can be used only in **select sequence.nextval(n)** and does not support cross-schema operations. -- CURRVAL(N) is not supported. +- NEXTVAL returns the next sequence number, and CURRVAL returns the current sequence number. nextval(n) returns *n* unique sequence numbers. +- nextval(n) can be used only in **select sequence.nextval(n)** and does not support cross-schema operations. +- currval(n) is not supported. Procedure --------- diff --git a/umn/source/sql_syntax/supported_sql_statements/check_table/checking_ddl_consistency_of_physical_tables_in_all_logical_tables.rst b/umn/source/sql_syntax/supported_sql_statements/check_table/checking_ddl_consistency_of_physical_tables_in_all_logical_tables.rst index 972fb16..bb6f8d1 100644 --- a/umn/source/sql_syntax/supported_sql_statements/check_table/checking_ddl_consistency_of_physical_tables_in_all_logical_tables.rst +++ b/umn/source/sql_syntax/supported_sql_statements/check_table/checking_ddl_consistency_of_physical_tables_in_all_logical_tables.rst @@ -40,5 +40,5 @@ Each row contains the check result of a logical table. - **INCONSISTENT_COUNT**: indicates the number of physical tables with inconsistent DDL results. - **DETAILS**: indicates names of the physical tables with inconsistent DDL check results. -.. |image1| image:: /_static/images/en-us_image_0000001474893253.png +.. |image1| image:: /_static/images/en-us_image_0000001583192212.png .. |image2| image:: /_static/images/en-us_image_0000001425254228.png diff --git a/umn/source/sql_syntax/supported_sql_statements/customized_hints_for_read_write_splitting.rst b/umn/source/sql_syntax/supported_sql_statements/customized_hints_for_read_write_splitting.rst index 49daabf..0afe4cc 100644 --- a/umn/source/sql_syntax/supported_sql_statements/customized_hints_for_read_write_splitting.rst +++ b/umn/source/sql_syntax/supported_sql_statements/customized_hints_for_read_write_splitting.rst @@ -9,8 +9,17 @@ DDM allows you to customize a hint to specify whether SQL statements are execute The following hint formats are supported: -- /*!mycat:db_type=\ **hos**\ t*/ -- /``*+`` db_type=\ **host** \*/ +Format 1 + +.. code-block:: text + + /*!mycat:db_type=host*/ + +Format 2 + +.. code-block:: text + + /*+ db_type=host */ **host** can be **master** or **slave**. **master** indicates a primary instance, and **slave** indicates a read replica. @@ -18,4 +27,4 @@ Currently, this function only applies to SELECT statements. .. note:: - After read/write splitting is enabled, write operations are performed only on the primary DB instance, and read operations are performed only on its read replicas. To read from the primary instance, you can customize a hint to forcibly perform read operations on the primary instance. This method is only suitable for queries. + After read/write splitting is enabled, write operations are performed only on the primary instance, and read operations are performed only on its read replicas. To read from the primary instance, you can customize a hint to forcibly perform read operations on the primary instance. This method is only suitable for queries. diff --git a/umn/source/sql_syntax/supported_sql_statements/hint-_allow_alter_rerun.rst b/umn/source/sql_syntax/supported_sql_statements/hint-_allow_alter_rerun.rst index ade7c73..7f64466 100644 --- a/umn/source/sql_syntax/supported_sql_statements/hint-_allow_alter_rerun.rst +++ b/umn/source/sql_syntax/supported_sql_statements/hint-_allow_alter_rerun.rst @@ -7,10 +7,12 @@ HINT- ALLOW_ALTER_RERUN **Command Format:** -.. code-block:: text - - /*+ allow_alter_rerun=true*/ALTER TABLE aaa_tb ADD schoolroll varchar(128) not null comment 'Enrollment data' +**/*+ allow_alter_rerun=true*/**\ ** **Description:** Using this hint ensures that commands can be repeatedly executed, and no error is reported. This hint supports the following ALTER TABLE statements: ADD COLUMN, MODIFY COLUMN, DROP COLUMN, ADD INDEX, DROP INDEX, CHANGE COLUMN, ADD PARTITION, and DROP PARTITION. + +Example: + +**/*+ allow_alter_rerun=true*/ALTER TABLE aaa_tb ADD schoolroll varchar(128) not null comment 'Enrollment data'** diff --git a/umn/source/sql_syntax/supported_sql_statements/load_data.rst b/umn/source/sql_syntax/supported_sql_statements/load_data.rst index 5615734..1c1a70b 100644 --- a/umn/source/sql_syntax/supported_sql_statements/load_data.rst +++ b/umn/source/sql_syntax/supported_sql_statements/load_data.rst @@ -12,9 +12,15 @@ LOAD DATA LOCAL INFILE '/data/data.txt' IGNORE INTO TABLE test CHARACTER SET 'ut .. note:: - If the data contains special characters, such as separators or escape characters, enclose the characters with quotation marks ("") and specify them using OPTIONALLY ENCLOSED BY '"'. + If a data field contains special characters like separators and escapes, execute OPTIONALLY ENCLOSED BY '"' to enclose the field with double quotation marks (""). - If the preceding method does not work, replace quotation marks ("") with special characters (\\) and marks ("). + Example: + + The following data field contains separators (,) and is enclosed with quotation marks: + + **"aab,,,bba,ddd"** + + If a data field contains quotation marks, the preceding method may not work. You can add a backslash (\\) before each quotation mark (") in the field, for example, **"aab,,,bba,ddd\\"ddd\\"bb,ae"**. - If keyword **LOCAL** is specified, the file is read from the client host. If keyword **LOCAL** is not specified, this function is not supported for security purposes. - You can use **FIELDS TERMINATED BY** to specify a separator between characters. The default value is **\\t**. @@ -49,6 +55,8 @@ LOAD DATA LOCAL INFILE '/data/data.txt' IGNORE INTO TABLE test CHARACTER SET 'ut Use Constraints --------------- +There are the following constraints on LOAD DATA syntax. + - LOW_PRIORITY is not supported. - CONCURRENT is not supported. - PARTITION (partition_name [, partition_name] ...) is not supported. diff --git a/umn/source/sql_syntax/supported_sql_statements/show_data_node.rst b/umn/source/sql_syntax/supported_sql_statements/show_data_node.rst index 4b076df..d59ec55 100644 --- a/umn/source/sql_syntax/supported_sql_statements/show_data_node.rst +++ b/umn/source/sql_syntax/supported_sql_statements/show_data_node.rst @@ -7,13 +7,11 @@ SHOW DATA NODE **Command Format:** -.. code-block:: text - - show data node: used to view data about database shards in the RDS instance. +**show data node**: used to view data about database shards in the RDS instance. **Output Details:** -**RDS_INSTANCE_ID**: indicates the ID of the RDS instance. +**RDS_instance_id**: indicates the ID of the RDS instance. **PHYSICAL_NODE**: used to view physical databases in the RDS instance. diff --git a/umn/source/sql_syntax/supported_sql_statements/show_physical_processlist.rst b/umn/source/sql_syntax/supported_sql_statements/show_physical_processlist.rst index 5b9667d..fe18ff0 100644 --- a/umn/source/sql_syntax/supported_sql_statements/show_physical_processlist.rst +++ b/umn/source/sql_syntax/supported_sql_statements/show_physical_processlist.rst @@ -23,19 +23,27 @@ SHOW PHYSICAL PROCESSLIST **Output Details:** -**ip**: indicates the IP address of the associated RDS instance. +**Ip**: indicates the IP address of the associated RDS instance. -**port**: indicates the port number of the associated RDS instance. +**Port**: indicates the port number of the associated RDS instance. -**instance id**: indicates the ID of the associated RDS instance. +**Instance_id**: indicates the ID of the associated RDS instance. -**type:master**: indicates that the associated instance is a primary instance, and **readreplica** indicates that the associated instance is a read replica. +**Type**: **master** indicates that the associated instance is a primary instance, and **readreplica** indicates that the associated instance is a read replica. -Columns after column **type** indicate the information about processes running on the associated RDS instance. Such information is the same as the output of command **show processlist** executed on the associated RDS instance. +Columns after column **Type** indicate the information about processes running on the associated RDS instance. Such information is the same as the output of command **show processlist** executed on the associated RDS instance. **Command Format 3:** -**kill physical physical_thread_id@rds_ip:rds_port**: kills the execution thread on the associated RDS instance. +Run the following statement to kill execution threads on the associated RDS instance: + +kill physical physical_thread_id@rds_ip:rds_port + +**physical_thread_id**: indicates the ID of an execution thread on the associated RDS instance. You can obtain it from the command output in **Command Format 2**. + +**rds_ip**: indicates the IP address of the associated RDS instance. You can obtain it from the command output in **Command Format 2**. + +**rds_port**: indicates the port number of the associated RDS instance. You can obtain it from the command output in **Command Format 2**. .. important:: diff --git a/umn/source/sql_syntax/supported_sql_statements/show_topology.rst b/umn/source/sql_syntax/supported_sql_statements/show_topology.rst index e9fe2ac..6ff91a2 100644 --- a/umn/source/sql_syntax/supported_sql_statements/show_topology.rst +++ b/umn/source/sql_syntax/supported_sql_statements/show_topology.rst @@ -7,13 +7,11 @@ SHOW TOPOLOGY **Command Format:** -.. code-block:: text - - show topology from table_name: used to view physical tables corresponding to a specified logical table. +**show topology from** **: used to view physical tables corresponding to a specified logical table. **Output Details:** -**Rds_instance_id**: indicates the ID of the RDS instance. +**RDS_instance_id**: indicates the ID of the RDS instance. **HOST**: indicates the IP address of the RDS instance. diff --git a/umn/source/sql_syntax/supported_sql_statements/truncate_table/additional_information.rst b/umn/source/sql_syntax/supported_sql_statements/truncate_table/additional_information.rst deleted file mode 100644 index 1e7d85a..0000000 --- a/umn/source/sql_syntax/supported_sql_statements/truncate_table/additional_information.rst +++ /dev/null @@ -1,8 +0,0 @@ -:original_name: ddm_12_0009.html - -.. _ddm_12_0009: - -Additional Information -====================== - -Hints are valid only for sharded tables. diff --git a/umn/source/sql_syntax/supported_sql_statements/truncate_table/hint-db.rst b/umn/source/sql_syntax/supported_sql_statements/truncate_table/hint-db.rst index 724d55d..aa41ed5 100644 --- a/umn/source/sql_syntax/supported_sql_statements/truncate_table/hint-db.rst +++ b/umn/source/sql_syntax/supported_sql_statements/truncate_table/hint-db.rst @@ -14,3 +14,7 @@ HINT-DB **Description:** Deleting data in physical tables corresponding to ** in ** does not affect physical tables in other database shards. + +.. note:: + + HINTs are instructions within a SQL statement that tell the data source optimizer to execute the statement in a flexible way. diff --git a/umn/source/sql_syntax/supported_sql_statements/truncate_table/hint-db_table.rst b/umn/source/sql_syntax/supported_sql_statements/truncate_table/hint-db_table.rst index 47bf4bd..7ad47fe 100644 --- a/umn/source/sql_syntax/supported_sql_statements/truncate_table/hint-db_table.rst +++ b/umn/source/sql_syntax/supported_sql_statements/truncate_table/hint-db_table.rst @@ -14,3 +14,7 @@ HINT-DB/TABLE **Description:** Deleting data in physical table ** in database shard ** does not affect other physical tables. + +.. note:: + + Hints are valid only for sharded tables. diff --git a/umn/source/sql_syntax/supported_sql_statements/truncate_table/hint-table.rst b/umn/source/sql_syntax/supported_sql_statements/truncate_table/hint-table.rst index 997b34c..c453998 100644 --- a/umn/source/sql_syntax/supported_sql_statements/truncate_table/hint-table.rst +++ b/umn/source/sql_syntax/supported_sql_statements/truncate_table/hint-table.rst @@ -5,6 +5,8 @@ HINT-TABLE ========== +HINTs are instructions within a SQL statement that tell the data source optimizer to execute the statement in a flexible way. This section describes how to use HINT syntax to delete data in a table. + **Command Format:** .. code-block:: text @@ -23,5 +25,9 @@ Deleting data in physical table ** in the current database |image2| +.. note:: + + Hints are valid only for sharded tables. + .. |image1| image:: /_static/images/en-us_image_0000001474893213.png .. |image2| image:: /_static/images/en-us_image_0000001425254188.png diff --git a/umn/source/sql_syntax/supported_sql_statements/truncate_table/index.rst b/umn/source/sql_syntax/supported_sql_statements/truncate_table/index.rst index eda4b1b..690e290 100644 --- a/umn/source/sql_syntax/supported_sql_statements/truncate_table/index.rst +++ b/umn/source/sql_syntax/supported_sql_statements/truncate_table/index.rst @@ -8,7 +8,6 @@ TRUNCATE TABLE - :ref:`HINT-DB ` - :ref:`HINT-TABLE ` - :ref:`HINT-DB/TABLE ` -- :ref:`Additional Information ` .. toctree:: :maxdepth: 1 @@ -17,4 +16,3 @@ TRUNCATE TABLE hint-db hint-table hint-db_table - additional_information diff --git a/umn/source/sql_syntax/use_constraints.rst b/umn/source/sql_syntax/use_constraints.rst index 9c043c3..a6394b2 100644 --- a/umn/source/sql_syntax/use_constraints.rst +++ b/umn/source/sql_syntax/use_constraints.rst @@ -23,7 +23,7 @@ Use Constraints - Most of SHOW statements such as SHOW PROFILES and SHOW ERRORS - Table maintenance statements, including ANALYZE, CHECK, CHECKSUM, OPTIMIZE, and REPAIR TABLE - Statements for assigning a value to or querying variable **session**, for example, set @rowid=0;select @rowid:=@rowid+1,id from user -- SQL statements that use -- or /*...*/ to comment out a single line or multiple lines of code +- SQL statements that use -- or ``/*...*/`` to comment out a single line or multiple lines of code - The result of the REPEAT function contains a maximum of 1,000,000 characters (in version 3.0.9 or later). Permission Levels diff --git a/umn/source/task_center.rst b/umn/source/task_center.rst index 87f5f63..ce41838 100644 --- a/umn/source/task_center.rst +++ b/umn/source/task_center.rst @@ -19,7 +19,7 @@ You can view the progress and results of asynchronous tasks on the **Task Center - Restarting a DDM instance - Binding an EIP to a DDM instance - Unbinding an EIP from a DDM instance - - Restoring data from current instance + - Restoring data from current DDM instance - Importing schema information - Flexible shard configuration @@ -31,7 +31,7 @@ Procedure #. Click |image2| in the upper left corner of the page and choose **Databases** > **Distributed Database Middleware**. #. Choose **Task Center** in the left navigation pane, locate the required task, and view its details. - - You can locate a task by name, order ID, or DB instance name/ID, or search for the required task by entering a task name in the search box in the upper right corner. + - You can locate a task by name, order ID, or instance name/ID, or search for the required task by entering a task name in the search box in the upper right corner. - You can click |image3| in the upper right corner to search for tasks executed within a specific period. The default time range is seven days.